diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..b75540645c241439fe838196ea57bb6464b43fbc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "80bfe291b16071c70f141e90e67e7032d966826b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e9e21997b1aca0707f8749ea13c09aec66c899d2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/.gitignore @@ -0,0 +1,2 @@ +/target/ +/Cargo.lock diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..fa41c1c690dd8a87118f3457bb7bfe390910ca84 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.lock @@ -0,0 +1,424 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "anyhow" +version = "1.0.101" +dependencies = [ + "backtrace", + "futures", + "rustversion", + "syn", + "thiserror", + "trybuild", +] + +[[package]] +name = "backtrace" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-link", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "dissimilar" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "libc" +version = "0.2.180" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + +[[package]] +name = "syn" +version = "2.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-triple" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "toml" +version = "0.9.11+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" + +[[package]] +name = "trybuild" +version = "1.0.115" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f614c21bd3a61bad9501d75cbb7686f00386c806d7f456778432c25cf86948a" +dependencies = [ + "dissimilar", + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" + +[[package]] +name = "zmij" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff05f8caa9038894637571ae6b9e29466c1f4f829d26c9b28f869a29cbe3445" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..95a8417434643e75431eef7009c6f4a7b254b42f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml @@ -0,0 +1,132 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.68" +name = "anyhow" +version = "1.0.101" +authors = ["David Tolnay "] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Flexible concrete Error type built on std::error::Error" +documentation = "https://docs.rs/anyhow" +readme = "README.md" +keywords = [ + "error", + "error-handling", +] +categories = [ + "rust-patterns", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/anyhow" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + +[features] +default = ["std"] +std = [] + +[lib] +name = "anyhow" +path = "src/lib.rs" + +[[test]] +name = "compiletest" +path = "tests/compiletest.rs" + +[[test]] +name = "test_autotrait" +path = "tests/test_autotrait.rs" + +[[test]] +name = "test_backtrace" +path = "tests/test_backtrace.rs" + +[[test]] +name = "test_boxed" +path = "tests/test_boxed.rs" + +[[test]] +name = "test_chain" +path = "tests/test_chain.rs" + +[[test]] +name = "test_context" +path = "tests/test_context.rs" + +[[test]] +name = "test_convert" +path = "tests/test_convert.rs" + +[[test]] +name = "test_downcast" +path = "tests/test_downcast.rs" + +[[test]] +name = "test_ensure" +path = "tests/test_ensure.rs" + +[[test]] +name = "test_ffi" +path = "tests/test_ffi.rs" + +[[test]] +name = "test_fmt" +path = "tests/test_fmt.rs" + +[[test]] +name = "test_macros" +path = "tests/test_macros.rs" + +[[test]] +name = "test_repr" +path = "tests/test_repr.rs" + +[[test]] +name = "test_source" +path = "tests/test_source.rs" + +[dependencies.backtrace] +version = "0.3.51" +optional = true + +[dev-dependencies.futures] +version = "0.3" +default-features = false + +[dev-dependencies.rustversion] +version = "1.0.6" + +[dev-dependencies.syn] +version = "2.0" +features = ["full"] + +[dev-dependencies.thiserror] +version = "2" + +[dev-dependencies.trybuild] +version = "1.0.108" +features = ["diff"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..ddf007aa86c49f77e31edd7f8adb3ecaaf111248 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/Cargo.toml.orig @@ -0,0 +1,40 @@ +[package] +name = "anyhow" +version = "1.0.101" +authors = ["David Tolnay "] +categories = ["rust-patterns", "no-std"] +description = "Flexible concrete Error type built on std::error::Error" +documentation = "https://docs.rs/anyhow" +edition = "2021" +keywords = ["error", "error-handling"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/anyhow" +rust-version = "1.68" + +[features] +default = ["std"] +std = [] + +[dependencies] +# On compilers older than 1.65, features=["backtrace"] may be used to enable +# backtraces via the `backtrace` crate. This feature has no effect on 1.65+ +# besides bringing in an unused dependency, as `std::backtrace` is always +# preferred. +backtrace = { version = "0.3.51", optional = true } + +[dev-dependencies] +futures = { version = "0.3", default-features = false } +rustversion = "1.0.6" +syn = { version = "2.0", features = ["full"] } +thiserror = "2" +trybuild = { version = "1.0.108", features = ["diff"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..1b5ec8b78e237b5c3b3d812a7c0a6589d0f7161d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/README.md new file mode 100644 index 0000000000000000000000000000000000000000..78225bb6603f14ecde59db68a0f4dd19048a5b58 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/README.md @@ -0,0 +1,179 @@ +Anyhow ¯\\\_(°ペ)\_/¯ +========================== + +[github](https://github.com/dtolnay/anyhow) +[crates.io](https://crates.io/crates/anyhow) +[docs.rs](https://docs.rs/anyhow) +[build status](https://github.com/dtolnay/anyhow/actions?query=branch%3Amaster) + +This library provides [`anyhow::Error`][Error], a trait object based error type +for easy idiomatic error handling in Rust applications. + +[Error]: https://docs.rs/anyhow/1.0/anyhow/struct.Error.html + +```toml +[dependencies] +anyhow = "1.0" +``` + +
+ +## Details + +- Use `Result`, or equivalently `anyhow::Result`, as the + return type of any fallible function. + + Within the function, use `?` to easily propagate any error that implements the + [`std::error::Error`] trait. + + ```rust + use anyhow::Result; + + fn get_cluster_info() -> Result { + let config = std::fs::read_to_string("cluster.json")?; + let map: ClusterMap = serde_json::from_str(&config)?; + Ok(map) + } + ``` + + [`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html + +- Attach context to help the person troubleshooting the error understand where + things went wrong. A low-level error like "No such file or directory" can be + annoying to debug without more context about what higher level step the + application was in the middle of. + + ```rust + use anyhow::{Context, Result}; + + fn main() -> Result<()> { + ... + it.detach().context("Failed to detach the important thing")?; + + let content = std::fs::read(path) + .with_context(|| format!("Failed to read instrs from {}", path))?; + ... + } + ``` + + ```console + Error: Failed to read instrs from ./path/to/instrs.json + + Caused by: + No such file or directory (os error 2) + ``` + +- Downcasting is supported and can be by value, by shared reference, or by + mutable reference as needed. + + ```rust + // If the error was caused by redaction, then return a + // tombstone instead of the content. + match root_cause.downcast_ref::() { + Some(DataStoreError::Censored(_)) => Ok(Poll::Ready(REDACTED_CONTENT)), + None => Err(error), + } + ``` + +- If using Rust ≥ 1.65, a backtrace is captured and printed with the error if + the underlying error type does not already provide its own. In order to see + backtraces, they must be enabled through the environment variables described + in [`std::backtrace`]: + + - If you want panics and errors to both have backtraces, set + `RUST_BACKTRACE=1`; + - If you want only errors to have backtraces, set `RUST_LIB_BACKTRACE=1`; + - If you want only panics to have backtraces, set `RUST_BACKTRACE=1` and + `RUST_LIB_BACKTRACE=0`. + + [`std::backtrace`]: https://doc.rust-lang.org/std/backtrace/index.html#environment-variables + +- Anyhow works with any error type that has an impl of `std::error::Error`, + including ones defined in your crate. We do not bundle a `derive(Error)` macro + but you can write the impls yourself or use a standalone macro like + [thiserror]. + + ```rust + use thiserror::Error; + + #[derive(Error, Debug)] + pub enum FormatError { + #[error("Invalid header (expected {expected:?}, got {found:?})")] + InvalidHeader { + expected: String, + found: String, + }, + #[error("Missing attribute: {0}")] + MissingAttribute(String), + } + ``` + +- One-off error messages can be constructed using the `anyhow!` macro, which + supports string interpolation and produces an `anyhow::Error`. + + ```rust + return Err(anyhow!("Missing attribute: {}", missing)); + ``` + + A `bail!` macro is provided as a shorthand for the same early return. + + ```rust + bail!("Missing attribute: {}", missing); + ``` + +
+ +## No-std support + +In no_std mode, almost all of the same API is available and works the same way. +To depend on Anyhow in no_std mode, disable our default enabled "std" feature in +Cargo.toml. A global allocator is required. + +```toml +[dependencies] +anyhow = { version = "1.0", default-features = false } +``` + +With versions of Rust older than 1.81, no_std mode may require an additional +`.map_err(Error::msg)` when working with a non-Anyhow error type inside a +function that returns Anyhow's error type, as the trait that `?`-based error +conversions are defined by is only available in std in those old versions. + +
+ +## Comparison to failure + +The `anyhow::Error` type works something like `failure::Error`, but unlike +failure ours is built around the standard library's `std::error::Error` trait +rather than a separate trait `failure::Fail`. The standard library has adopted +the necessary improvements for this to be possible as part of [RFC 2504]. + +[RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md + +
+ +## Comparison to thiserror + +Use Anyhow if you don't care what error type your functions return, you just +want it to be easy. This is common in application code. Use [thiserror] if you +are a library that wants to design your own dedicated error type(s) so that on +failures the caller gets exactly the information that you choose. + +[thiserror]: https://github.com/dtolnay/thiserror + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..2086d2971c35b96febf765d9bbacbb04fd202ab0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/build.rs @@ -0,0 +1,207 @@ +#![allow(clippy::uninlined_format_args)] + +use std::env; +use std::ffi::OsString; +use std::fs; +use std::io::ErrorKind; +use std::iter; +use std::path::Path; +use std::process::{self, Command, Stdio}; +use std::str; + +#[cfg(all(feature = "backtrace", not(feature = "std")))] +compile_error! { + "`backtrace` feature without `std` feature is not supported" +} + +fn main() { + let mut error_generic_member_access = false; + if cfg!(feature = "std") { + println!("cargo:rerun-if-changed=src/nightly.rs"); + + let consider_rustc_bootstrap; + if compile_probe(false) { + // This is a nightly or dev compiler, so it supports unstable + // features regardless of RUSTC_BOOTSTRAP. No need to rerun build + // script if RUSTC_BOOTSTRAP is changed. + error_generic_member_access = true; + consider_rustc_bootstrap = false; + } else if let Some(rustc_bootstrap) = env::var_os("RUSTC_BOOTSTRAP") { + if compile_probe(true) { + // This is a stable or beta compiler for which the user has set + // RUSTC_BOOTSTRAP to turn on unstable features. Rerun build + // script if they change it. + error_generic_member_access = true; + consider_rustc_bootstrap = true; + } else if rustc_bootstrap == "1" { + // This compiler does not support the generic member access API + // in the form that anyhow expects. No need to pay attention to + // RUSTC_BOOTSTRAP. + error_generic_member_access = false; + consider_rustc_bootstrap = false; + } else { + // This is a stable or beta compiler for which RUSTC_BOOTSTRAP + // is set to restrict the use of unstable features by this + // crate. + error_generic_member_access = false; + consider_rustc_bootstrap = true; + } + } else { + // Without RUSTC_BOOTSTRAP, this compiler does not support the + // generic member access API in the form that anyhow expects, but + // try again if the user turns on unstable features. + error_generic_member_access = false; + consider_rustc_bootstrap = true; + } + + if error_generic_member_access { + println!("cargo:rustc-cfg=std_backtrace"); + println!("cargo:rustc-cfg=error_generic_member_access"); + } + + if consider_rustc_bootstrap { + println!("cargo:rerun-if-env-changed=RUSTC_BOOTSTRAP"); + } + } + + let Some(rustc) = rustc_minor_version() else { + return; + }; + + if rustc >= 80 { + println!("cargo:rustc-check-cfg=cfg(anyhow_build_probe)"); + println!("cargo:rustc-check-cfg=cfg(anyhow_nightly_testing)"); + println!("cargo:rustc-check-cfg=cfg(anyhow_no_clippy_format_args)"); + println!("cargo:rustc-check-cfg=cfg(anyhow_no_core_error)"); + println!("cargo:rustc-check-cfg=cfg(error_generic_member_access)"); + println!("cargo:rustc-check-cfg=cfg(std_backtrace)"); + } + + if !error_generic_member_access && cfg!(feature = "std") { + // std::backtrace::Backtrace + // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#stabilized-apis + println!("cargo:rustc-cfg=std_backtrace"); + } + + if rustc < 81 { + // core::error::Error + // https://blog.rust-lang.org/2024/09/05/Rust-1.81.0.html#coreerrorerror + println!("cargo:rustc-cfg=anyhow_no_core_error"); + } + + if rustc < 85 { + // #[clippy::format_args] + // https://doc.rust-lang.org/1.85.1/clippy/attribs.html#clippyformat_args + println!("cargo:rustc-cfg=anyhow_no_clippy_format_args"); + } +} + +fn compile_probe(rustc_bootstrap: bool) -> bool { + if env::var_os("RUSTC_STAGE").is_some() { + // We are running inside rustc bootstrap. This is a highly non-standard + // environment with issues such as: + // + // https://github.com/rust-lang/cargo/issues/11138 + // https://github.com/rust-lang/rust/issues/114839 + // + // Let's just not use nightly features here. + return false; + } + + let rustc = cargo_env_var("RUSTC"); + let out_dir = cargo_env_var("OUT_DIR"); + let out_subdir = Path::new(&out_dir).join("probe"); + let probefile = Path::new("src").join("nightly.rs"); + + if let Err(err) = fs::create_dir(&out_subdir) { + if err.kind() != ErrorKind::AlreadyExists { + eprintln!("Failed to create {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + let rustc_wrapper = env::var_os("RUSTC_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let rustc_workspace_wrapper = + env::var_os("RUSTC_WORKSPACE_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let mut rustc = rustc_wrapper + .into_iter() + .chain(rustc_workspace_wrapper) + .chain(iter::once(rustc)); + let mut cmd = Command::new(rustc.next().unwrap()); + cmd.args(rustc); + + if !rustc_bootstrap { + cmd.env_remove("RUSTC_BOOTSTRAP"); + } + + cmd.stderr(Stdio::null()) + .arg("--cfg=anyhow_build_probe") + .arg("--edition=2018") + .arg("--crate-name=anyhow") + .arg("--crate-type=lib") + .arg("--cap-lints=allow") + .arg("--emit=dep-info,metadata") + .arg("--out-dir") + .arg(&out_subdir) + .arg(probefile); + + if let Some(target) = env::var_os("TARGET") { + cmd.arg("--target").arg(target); + } + + // If Cargo wants to set RUSTFLAGS, use that. + if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { + if !rustflags.is_empty() { + for arg in rustflags.split('\x1f') { + cmd.arg(arg); + } + } + } + + let success = match cmd.status() { + Ok(status) => status.success(), + Err(_) => false, + }; + + // Clean up to avoid leaving nondeterministic absolute paths in the dep-info + // file in OUT_DIR, which causes nonreproducible builds in build systems + // that treat the entire OUT_DIR as an artifact. + if let Err(err) = fs::remove_dir_all(&out_subdir) { + // libc::ENOTEMPTY + // Some filesystems (NFSv3) have timing issues under load where '.nfs*' + // dummy files can continue to get created for a short period after the + // probe command completes, breaking remove_dir_all. + // To be replaced with ErrorKind::DirectoryNotEmpty (Rust 1.83+). + const ENOTEMPTY: i32 = 39; + + if !(err.kind() == ErrorKind::NotFound + || (cfg!(target_os = "linux") && err.raw_os_error() == Some(ENOTEMPTY))) + { + eprintln!("Failed to clean up {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + success +} + +fn rustc_minor_version() -> Option { + let rustc = cargo_env_var("RUSTC"); + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + pieces.next()?.parse().ok() +} + +fn cargo_env_var(key: &str) -> OsString { + env::var_os(key).unwrap_or_else(|| { + eprintln!( + "Environment variable ${} is not set during execution of build script", + key, + ); + process::exit(1); + }) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/rust-toolchain.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/rust-toolchain.toml new file mode 100644 index 0000000000000000000000000000000000000000..20fe888c30ab44fa877a58de0304f4b5e2a5a5cf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/anyhow-1.0.101/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +components = ["rust-src"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..e195260b4ceefe59ad6dc7fe38d3a463f862fdd0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "20aac9d46e0852292bd43d845b6d9cb69c598c9e" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..fe71e356a65de00f56eb45a07a14ae19c0613706 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.lock @@ -0,0 +1,917 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anstream" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-lossy" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "934ff8719effd2023a48cf63e69536c1c3ced9d3895068f6f5cc9a4ff845e59b" +dependencies = [ + "anstyle", +] + +[[package]] +name = "anstyle-parse" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-svg" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3607949e9f6de49ea4bafe12f5e4fd73613ebf24795e48587302a8cc0e4bb35" +dependencies = [ + "anstream", + "anstyle", + "anstyle-lossy", + "html-escape", + "unicode-width", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "automod" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebb4bd301db2e2ca1f5be131c24eb8ebf2d9559bc3744419e93baf8ddea7e670" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "backtrace" +version = "0.3.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "cc" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaff6f8ce506b9773fa786672d63fc7a191ffea1be33f72bbd4aeacefca9ffc8" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "4.5.60" +dependencies = [ + "automod", + "clap-cargo", + "clap_builder", + "clap_derive", + "jiff", + "rustversion", + "semver", + "shlex", + "snapbox", + "trybuild", + "trycmd", +] + +[[package]] +name = "clap-cargo" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d546f0e84ff2bfa4da1ce9b54be42285767ba39c688572ca32412a09a73851e5" +dependencies = [ + "anstyle", +] + +[[package]] +name = "clap_builder" +version = "4.5.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +dependencies = [ + "anstream", + "anstyle", + "backtrace", + "clap_lex", + "strsim", + "terminal_size", + "unicase", + "unicode-width", +] + +[[package]] +name = "clap_derive" +version = "4.5.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +dependencies = [ + "anstyle", + "heck", + "proc-macro2", + "pulldown-cmark", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" + +[[package]] +name = "colorchoice" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "escargot" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05a3ac187a16b5382fef8c69fd1bad123c67b7cf3932240a2d43dcdd32cded88" +dependencies = [ + "log", + "once_cell", + "serde", + "serde_json", +] + +[[package]] +name = "gimli" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "html-escape" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d1ad449764d627e22bfd7cd5e8868264fc9236e07c752972b4080cd351cb476" +dependencies = [ + "utf8-width", +] + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jiff" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +dependencies = [ + "jiff-static", + "jiff-tzdb-platform", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", + "windows-sys 0.59.0", +] + +[[package]] +name = "jiff-static" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jiff-tzdb" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1283705eb0a21404d2bfd6eef2a7593d240bc42a0bdb39db0ad6fa2ec026524" + +[[package]] +name = "jiff-tzdb-platform" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "875a5a69ac2bab1a891711cf5eccbec1ce0341ea805560dcd90b7a2e925132e8" +dependencies = [ + "jiff-tzdb", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + +[[package]] +name = "object" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "os_pipe" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29d73ba8daf8fac13b0501d1abeddcfe21ba7401ada61a819144b6c2a4f32209" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "quote" +version = "1.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" + +[[package]] +name = "serde" +version = "1.0.204" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.204" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_spanned" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" +dependencies = [ + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "similar" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" + +[[package]] +name = "snapbox" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96dcfc4581e3355d70ac2ee14cfdf81dce3d85c85f1ed9e2c1d3013f53b3436b" +dependencies = [ + "anstream", + "anstyle", + "anstyle-svg", + "escargot", + "libc", + "normalize-line-endings", + "os_pipe", + "serde_json", + "similar", + "snapbox-macros", + "wait-timeout", + "windows-sys 0.59.0", +] + +[[package]] +name = "snapbox-macros" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16569f53ca23a41bb6f62e0a5084aa1661f4814a67fa33696a79073e03a664af" +dependencies = [ + "anstream", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "syn" +version = "2.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-triple" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "toml" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f271e09bde39ab52250160a67e88577e0559ad77e9085de6e9051a2c4353f8f8" +dependencies = [ + "indexmap", + "serde", + "serde_spanned 1.0.0", + "toml_datetime 0.7.0", + "toml_parser", + "toml_writer", + "winnow 0.7.11", +] + +[[package]] +name = "toml_datetime" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_datetime" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" +dependencies = [ + "indexmap", + "serde", + "serde_spanned 0.6.6", + "toml_datetime 0.6.6", + "winnow 0.6.13", +] + +[[package]] +name = "toml_parser" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c1c469eda89749d2230d8156a5969a69ffe0d6d01200581cdc6110674d293e" +dependencies = [ + "winnow 0.7.11", +] + +[[package]] +name = "toml_writer" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b679217f2848de74cabd3e8fc5e6d66f40b7da40f8e1954d92054d9010690fd5" + +[[package]] +name = "trybuild" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65af40ad689f2527aebbd37a0a816aea88ff5f774ceabe99de5be02f2f91dae2" +dependencies = [ + "glob", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml", +] + +[[package]] +name = "trycmd" +version = "0.15.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8b5cf29388862aac065d6597ac9c8e842d1cc827cb50f7c32f11d29442eaae4" +dependencies = [ + "anstream", + "automod", + "glob", + "humantime", + "humantime-serde", + "rayon", + "serde", + "shlex", + "snapbox", + "toml_edit", +] + +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + +[[package]] +name = "utf8-width" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..500a515455930e159feee43de8eb95b30500fe1d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml @@ -0,0 +1,635 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.74" +name = "clap" +version = "4.5.60" +build = false +include = [ + "build.rs", + "src/**/*", + "Cargo.toml", + "LICENSE*", + "README.md", + "examples/**/*", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A simple to use, efficient, and full-featured Command Line Argument Parser" +readme = "README.md" +keywords = [ + "argument", + "cli", + "arg", + "parser", + "parse", +] +categories = ["command-line-interface"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/clap-rs/clap" + +[package.metadata.docs.rs] +features = ["unstable-doc"] +rustdoc-args = ["--generate-link-to-definition"] + +[package.metadata.playground] +features = ["unstable-doc"] + +[package.metadata.release] +shared-version = true +tag-name = "v{{version}}" + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +search = "Unreleased" +replace = "{{version}}" +min = 1 + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +search = '\.\.\.HEAD' +replace = "...{{tag_name}}" +exactly = 1 + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +search = "ReleaseDate" +replace = "{{date}}" +min = 1 + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +search = "" +replace = """ + +## [Unreleased] - ReleaseDate +""" +exactly = 1 + +[[package.metadata.release.pre-release-replacements]] +file = "CHANGELOG.md" +search = "" +replace = """ + +[Unreleased]: https://github.com/clap-rs/clap/compare/{{tag_name}}...HEAD""" +exactly = 1 + +[[package.metadata.release.pre-release-replacements]] +file = "CITATION.cff" +search = "^date-released: ....-..-.." +replace = "date-released: {{date}}" + +[[package.metadata.release.pre-release-replacements]] +file = "CITATION.cff" +search = '^version: .+\..+\..+' +replace = "version: {{version}}" + +[[package.metadata.release.pre-release-replacements]] +file = "src/lib.rs" +search = 'blob/v.+\..+\..+/CHANGELOG.md' +replace = "blob/v{{version}}/CHANGELOG.md" +exactly = 1 + +[features] +cargo = ["clap_builder/cargo"] +color = ["clap_builder/color"] +debug = [ + "clap_builder/debug", + "clap_derive?/debug", +] +default = [ + "std", + "color", + "help", + "usage", + "error-context", + "suggestions", +] +deprecated = [ + "clap_builder/deprecated", + "clap_derive?/deprecated", +] +derive = ["dep:clap_derive"] +env = ["clap_builder/env"] +error-context = ["clap_builder/error-context"] +help = ["clap_builder/help"] +std = ["clap_builder/std"] +string = ["clap_builder/string"] +suggestions = ["clap_builder/suggestions"] +unicode = ["clap_builder/unicode"] +unstable-derive-ui-tests = [] +unstable-doc = [ + "clap_builder/unstable-doc", + "derive", +] +unstable-ext = ["clap_builder/unstable-ext"] +unstable-markdown = ["clap_derive/unstable-markdown"] +unstable-styles = ["clap_builder/unstable-styles"] +unstable-v5 = [ + "clap_builder/unstable-v5", + "clap_derive?/unstable-v5", + "deprecated", +] +usage = ["clap_builder/usage"] +wrap_help = ["clap_builder/wrap_help"] + +[lib] +name = "clap" +path = "src/lib.rs" +bench = false + +[[bin]] +name = "stdio-fixture" +path = "src/bin/stdio-fixture.rs" + +[[example]] +name = "01_quick" +path = "examples/tutorial_builder/01_quick.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "01_quick_derive" +path = "examples/tutorial_derive/01_quick.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "02_app_settings" +path = "examples/tutorial_builder/02_app_settings.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "02_app_settings_derive" +path = "examples/tutorial_derive/02_app_settings.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "02_apps" +path = "examples/tutorial_builder/02_apps.rs" +doc-scrape-examples = true + +[[example]] +name = "02_apps_derive" +path = "examples/tutorial_derive/02_apps.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "02_crate" +path = "examples/tutorial_builder/02_crate.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "02_crate_derive" +path = "examples/tutorial_derive/02_crate.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_01_flag_bool" +path = "examples/tutorial_builder/03_01_flag_bool.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_01_flag_bool_derive" +path = "examples/tutorial_derive/03_01_flag_bool.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_01_flag_count" +path = "examples/tutorial_builder/03_01_flag_count.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_01_flag_count_derive" +path = "examples/tutorial_derive/03_01_flag_count.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_02_option" +path = "examples/tutorial_builder/03_02_option.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_02_option_derive" +path = "examples/tutorial_derive/03_02_option.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_02_option_mult" +path = "examples/tutorial_builder/03_02_option_mult.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_02_option_mult_derive" +path = "examples/tutorial_derive/03_02_option_mult.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_03_positional" +path = "examples/tutorial_builder/03_03_positional.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_03_positional_derive" +path = "examples/tutorial_derive/03_03_positional.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_03_positional_mult" +path = "examples/tutorial_builder/03_03_positional_mult.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_03_positional_mult_derive" +path = "examples/tutorial_derive/03_03_positional_mult.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_04_subcommands" +path = "examples/tutorial_builder/03_04_subcommands.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_04_subcommands_alt_derive" +path = "examples/tutorial_derive/03_04_subcommands_alt.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_04_subcommands_derive" +path = "examples/tutorial_derive/03_04_subcommands.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_05_default_values" +path = "examples/tutorial_builder/03_05_default_values.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "03_05_default_values_derive" +path = "examples/tutorial_derive/03_05_default_values.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_06_optional_derive" +path = "examples/tutorial_derive/03_06_optional.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "03_06_required" +path = "examples/tutorial_builder/03_06_required.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_01_enum" +path = "examples/tutorial_builder/04_01_enum.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_01_enum_derive" +path = "examples/tutorial_derive/04_01_enum.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "04_01_possible" +path = "examples/tutorial_builder/04_01_possible.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_02_parse" +path = "examples/tutorial_builder/04_02_parse.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_02_parse_derive" +path = "examples/tutorial_derive/04_02_parse.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "04_02_validate" +path = "examples/tutorial_builder/04_02_validate.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_02_validate_derive" +path = "examples/tutorial_derive/04_02_validate.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "04_03_relations" +path = "examples/tutorial_builder/04_03_relations.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_03_relations_derive" +path = "examples/tutorial_derive/04_03_relations.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "04_04_custom" +path = "examples/tutorial_builder/04_04_custom.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "04_04_custom_derive" +path = "examples/tutorial_derive/04_04_custom.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "05_01_assert" +path = "examples/tutorial_builder/05_01_assert.rs" +test = true +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "05_01_assert_derive" +path = "examples/tutorial_derive/05_01_assert.rs" +test = true +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "busybox" +path = "examples/multicall-busybox.rs" +doc-scrape-examples = true + +[[example]] +name = "cargo-example" +path = "examples/cargo-example.rs" +doc-scrape-examples = true +required-features = [ + "cargo", + "color", +] + +[[example]] +name = "cargo-example-derive" +path = "examples/cargo-example-derive.rs" +doc-scrape-examples = true +required-features = [ + "derive", + "color", +] + +[[example]] +name = "demo" +path = "examples/demo.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "escaped-positional" +path = "examples/escaped-positional.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "escaped-positional-derive" +path = "examples/escaped-positional-derive.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "find" +path = "examples/find.rs" +doc-scrape-examples = true +required-features = ["cargo"] + +[[example]] +name = "git" +path = "examples/git.rs" + +[[example]] +name = "git-derive" +path = "examples/git-derive.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "hostname" +path = "examples/multicall-hostname.rs" +doc-scrape-examples = true + +[[example]] +name = "interop_augment_args" +path = "examples/derive_ref/augment_args.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "interop_augment_subcommands" +path = "examples/derive_ref/augment_subcommands.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "interop_flatten_hand_args" +path = "examples/derive_ref/flatten_hand_args.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "interop_hand_subcommand" +path = "examples/derive_ref/hand_subcommand.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "pacman" +path = "examples/pacman.rs" + +[[example]] +name = "repl" +path = "examples/repl.rs" +doc-scrape-examples = true +required-features = ["help"] + +[[example]] +name = "repl-derive" +path = "examples/repl-derive.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[[example]] +name = "typed-derive" +path = "examples/typed-derive/main.rs" +doc-scrape-examples = true +required-features = ["derive"] + +[dependencies.clap_builder] +version = "=4.5.60" +default-features = false + +[dependencies.clap_derive] +version = "=4.5.55" +optional = true + +[dev-dependencies.automod] +version = "1.0.14" + +[dev-dependencies.clap-cargo] +version = "0.15.0" +default-features = false + +[dev-dependencies.jiff] +version = "0.2.3" + +[dev-dependencies.rustversion] +version = "1.0.15" + +[dev-dependencies.semver] +version = "1.0.26" + +[dev-dependencies.shlex] +version = "1.3.0" + +[dev-dependencies.snapbox] +version = "0.6.16" +features = ["term-svg"] + +[dev-dependencies.trybuild] +version = "1.0.91" + +[dev-dependencies.trycmd] +version = "0.15.3" +features = [ + "color-auto", + "diff", + "examples", +] +default-features = false + +[lints.clippy] +assigning_clones = "allow" +blocks_in_conditions = "allow" +bool_assert_comparison = "allow" +branches_sharing_code = "allow" +checked_conversions = "warn" +collapsible_else_if = "allow" +create_dir = "warn" +dbg_macro = "warn" +debug_assert_with_mut_call = "warn" +doc_markdown = "warn" +empty_enums = "warn" +enum_glob_use = "warn" +expl_impl_clone_on_copy = "warn" +explicit_deref_methods = "warn" +explicit_into_iter_loop = "warn" +fallible_impl_from = "warn" +filter_map_next = "warn" +flat_map_option = "warn" +float_cmp_const = "warn" +fn_params_excessive_bools = "warn" +from_iter_instead_of_collect = "warn" +if_same_then_else = "allow" +implicit_clone = "warn" +imprecise_flops = "warn" +inconsistent_struct_constructor = "warn" +inefficient_to_string = "warn" +infinite_loop = "warn" +invalid_upcast_comparisons = "warn" +large_digit_groups = "warn" +large_stack_arrays = "warn" +large_types_passed_by_value = "warn" +let_and_return = "allow" +linkedlist = "warn" +lossy_float_literal = "warn" +macro_use_imports = "warn" +mem_forget = "warn" +multiple_bound_locations = "allow" +mutex_integer = "warn" +needless_continue = "allow" +needless_for_each = "warn" +negative_feature_names = "warn" +path_buf_push_overwrite = "warn" +ptr_as_ptr = "warn" +rc_mutex = "warn" +redundant_feature_names = "warn" +ref_option_ref = "warn" +rest_pat_in_fully_bound_structs = "warn" +result_large_err = "allow" +same_functions_in_if_condition = "warn" +self_named_module_files = "warn" +semicolon_if_nothing_returned = "warn" +string_add_assign = "warn" +string_lit_as_bytes = "warn" +todo = "warn" +trait_duplication_in_bounds = "warn" +uninlined_format_args = "warn" +verbose_file_reads = "warn" +zero_sized_map_values = "warn" + +[lints.rust] +unnameable_types = "allow" +unreachable_pub = "warn" +unsafe_op_in_unsafe_fn = "warn" +unused_lifetimes = "warn" +unused_macro_rules = "warn" +unused_qualifications = "warn" + +[lints.rust.rust_2018_idioms] +level = "warn" +priority = -1 + +[profile.bench] +lto = true +codegen-units = 1 + +[profile.dev] +panic = "abort" + +[profile.release] +lto = true +codegen-units = 1 +panic = "abort" + +[profile.test] +opt-level = 1 diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..2d23b6879aba4aba00cdc93075b42377d2b3908e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/Cargo.toml.orig @@ -0,0 +1,538 @@ +[workspace] +resolver = "2" +members = [ + "clap_bench", + "clap_builder", + "clap_derive", + "clap_lex", + "clap_complete", + "clap_complete_nushell", + "clap_mangen", +] + +[workspace.package] +repository = "https://github.com/clap-rs/clap" +license = "MIT OR Apache-2.0" +edition = "2021" +rust-version = "1.74" # MSRV +include = [ + "build.rs", + "src/**/*", + "Cargo.toml", + "LICENSE*", + "README.md", + "examples/**/*" +] + +[workspace.lints.rust] +rust_2018_idioms = { level = "warn", priority = -1 } +unnameable_types = "allow" +unreachable_pub = "warn" +unsafe_op_in_unsafe_fn = "warn" +unused_lifetimes = "warn" +unused_macro_rules = "warn" +unused_qualifications = "warn" + +[workspace.lints.clippy] +bool_assert_comparison = "allow" +branches_sharing_code = "allow" +checked_conversions = "warn" +collapsible_else_if = "allow" +create_dir = "warn" +dbg_macro = "warn" +debug_assert_with_mut_call = "warn" +doc_markdown = "warn" +empty_enums = "warn" +enum_glob_use = "warn" +expl_impl_clone_on_copy = "warn" +explicit_deref_methods = "warn" +explicit_into_iter_loop = "warn" +fallible_impl_from = "warn" +filter_map_next = "warn" +flat_map_option = "warn" +float_cmp_const = "warn" +fn_params_excessive_bools = "warn" +from_iter_instead_of_collect = "warn" +if_same_then_else = "allow" +implicit_clone = "warn" +imprecise_flops = "warn" +inconsistent_struct_constructor = "warn" +inefficient_to_string = "warn" +infinite_loop = "warn" +invalid_upcast_comparisons = "warn" +large_digit_groups = "warn" +large_stack_arrays = "warn" +large_types_passed_by_value = "warn" +let_and_return = "allow" # sometimes good to name what you are returning +linkedlist = "warn" +lossy_float_literal = "warn" +macro_use_imports = "warn" +mem_forget = "warn" +mutex_integer = "warn" +needless_continue = "allow" +needless_for_each = "warn" +negative_feature_names = "warn" +path_buf_push_overwrite = "warn" +ptr_as_ptr = "warn" +rc_mutex = "warn" +redundant_feature_names = "warn" +ref_option_ref = "warn" +rest_pat_in_fully_bound_structs = "warn" +result_large_err = "allow" +same_functions_in_if_condition = "warn" +self_named_module_files = "warn" +semicolon_if_nothing_returned = "warn" +# str_to_string = "warn" +# string_add = "warn" +string_add_assign = "warn" +string_lit_as_bytes = "warn" +todo = "warn" +trait_duplication_in_bounds = "warn" +uninlined_format_args = "warn" +verbose_file_reads = "warn" +# wildcard_imports = "warn" +zero_sized_map_values = "warn" +# Fix later: +multiple_bound_locations = "allow" +assigning_clones = "allow" +blocks_in_conditions = "allow" + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +codegen-units = 1 +lto = true +# debug = "line-tables-only" # requires Cargo 1.71 + +[package] +name = "clap" +version = "4.5.60" +description = "A simple to use, efficient, and full-featured Command Line Argument Parser" +categories = ["command-line-interface"] +keywords = [ + "argument", + "cli", + "arg", + "parser", + "parse" +] +repository.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true +include.workspace = true + +[package.metadata.docs.rs] +features = ["unstable-doc"] +rustdoc-args = ["--generate-link-to-definition"] + +[package.metadata.playground] +features = ["unstable-doc"] + +[package.metadata.release] +shared-version = true +tag-name = "v{{version}}" +pre-release-replacements = [ + {file="CHANGELOG.md", search="Unreleased", replace="{{version}}", min=1}, + {file="CHANGELOG.md", search="\\.\\.\\.HEAD", replace="...{{tag_name}}", exactly=1}, + {file="CHANGELOG.md", search="ReleaseDate", replace="{{date}}", min=1}, + {file="CHANGELOG.md", search="", replace="\n## [Unreleased] - ReleaseDate\n", exactly=1}, + {file="CHANGELOG.md", search="", replace="\n[Unreleased]: https://github.com/clap-rs/clap/compare/{{tag_name}}...HEAD", exactly=1}, + {file="CITATION.cff", search="^date-released: ....-..-..", replace="date-released: {{date}}"}, + {file="CITATION.cff", search="^version: .+\\..+\\..+", replace="version: {{version}}"}, + {file="src/lib.rs", search="blob/v.+\\..+\\..+/CHANGELOG.md", replace="blob/v{{version}}/CHANGELOG.md", exactly=1}, +] + +[features] +default = [ + "std", + "color", + "help", + "usage", + "error-context", + "suggestions", +] +debug = ["clap_builder/debug", "clap_derive?/debug"] # Enables debug messages +unstable-doc = ["clap_builder/unstable-doc", "derive"] # for docs.rs + +# Used in default +std = ["clap_builder/std"] # support for no_std in a backwards-compatible way +color = ["clap_builder/color"] +help = ["clap_builder/help"] +usage = ["clap_builder/usage"] +error-context = ["clap_builder/error-context"] +suggestions = ["clap_builder/suggestions"] + +# Optional +deprecated = ["clap_builder/deprecated", "clap_derive?/deprecated"] # Guided experience to prepare for next breaking release (at different stages of development, this may become default) +derive = ["dep:clap_derive"] +cargo = ["clap_builder/cargo"] # Disable if you're not using Cargo, enables Cargo-env-var-dependent macros +wrap_help = ["clap_builder/wrap_help"] +env = ["clap_builder/env"] # Use environment variables during arg parsing +unicode = ["clap_builder/unicode"] # Support for unicode characters in arguments and help messages +string = ["clap_builder/string"] # Allow runtime generated strings + +# In-work features +unstable-v5 = ["clap_builder/unstable-v5", "clap_derive?/unstable-v5", "deprecated"] +unstable-ext = ["clap_builder/unstable-ext"] +unstable-styles = ["clap_builder/unstable-styles"] # deprecated +unstable-derive-ui-tests = [] +unstable-markdown = ["clap_derive/unstable-markdown"] + +[lib] +bench = false + +[dependencies] +clap_builder = { path = "./clap_builder", version = "=4.5.60", default-features = false } +clap_derive = { path = "./clap_derive", version = "=4.5.55", optional = true } + +[dev-dependencies] +trybuild = "1.0.91" +rustversion = "1.0.15" +# Cutting out `filesystem` feature +trycmd = { version = "0.15.3", default-features = false, features = ["color-auto", "diff", "examples"] } +jiff = "0.2.3" +snapbox = { version = "0.6.16", features = ["term-svg"] } +shlex = "1.3.0" +automod = "1.0.14" +clap-cargo = { version = "0.15.0", default-features = false } +semver = "1.0.26" + +[[example]] +name = "demo" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "cargo-example" +required-features = ["cargo", "color"] +doc-scrape-examples = true + +[[example]] +name = "cargo-example-derive" +required-features = ["derive", "color"] +doc-scrape-examples = true + +[[example]] +name = "escaped-positional" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "escaped-positional-derive" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "find" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "git-derive" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "typed-derive" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "busybox" +path = "examples/multicall-busybox.rs" +doc-scrape-examples = true + +[[example]] +name = "hostname" +path = "examples/multicall-hostname.rs" +doc-scrape-examples = true + +[[example]] +name = "repl" +path = "examples/repl.rs" +required-features = ["help"] +doc-scrape-examples = true + +[[example]] +name = "repl-derive" +path = "examples/repl-derive.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "01_quick" +path = "examples/tutorial_builder/01_quick.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "02_apps" +path = "examples/tutorial_builder/02_apps.rs" +doc-scrape-examples = true + +[[example]] +name = "02_crate" +path = "examples/tutorial_builder/02_crate.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "02_app_settings" +path = "examples/tutorial_builder/02_app_settings.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_01_flag_bool" +path = "examples/tutorial_builder/03_01_flag_bool.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_01_flag_count" +path = "examples/tutorial_builder/03_01_flag_count.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_02_option" +path = "examples/tutorial_builder/03_02_option.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_02_option_mult" +path = "examples/tutorial_builder/03_02_option_mult.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_03_positional" +path = "examples/tutorial_builder/03_03_positional.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_03_positional_mult" +path = "examples/tutorial_builder/03_03_positional_mult.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_04_subcommands" +path = "examples/tutorial_builder/03_04_subcommands.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_05_default_values" +path = "examples/tutorial_builder/03_05_default_values.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "03_06_required" +path = "examples/tutorial_builder/03_06_required.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_01_possible" +path = "examples/tutorial_builder/04_01_possible.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_01_enum" +path = "examples/tutorial_builder/04_01_enum.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_02_parse" +path = "examples/tutorial_builder/04_02_parse.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_02_validate" +path = "examples/tutorial_builder/04_02_validate.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_03_relations" +path = "examples/tutorial_builder/04_03_relations.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "04_04_custom" +path = "examples/tutorial_builder/04_04_custom.rs" +required-features = ["cargo"] +doc-scrape-examples = true + +[[example]] +name = "05_01_assert" +path = "examples/tutorial_builder/05_01_assert.rs" +required-features = ["cargo"] +test = true +doc-scrape-examples = true + +[[example]] +name = "01_quick_derive" +path = "examples/tutorial_derive/01_quick.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "02_apps_derive" +path = "examples/tutorial_derive/02_apps.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "02_crate_derive" +path = "examples/tutorial_derive/02_crate.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "02_app_settings_derive" +path = "examples/tutorial_derive/02_app_settings.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_01_flag_bool_derive" +path = "examples/tutorial_derive/03_01_flag_bool.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_01_flag_count_derive" +path = "examples/tutorial_derive/03_01_flag_count.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_02_option_derive" +path = "examples/tutorial_derive/03_02_option.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_02_option_mult_derive" +path = "examples/tutorial_derive/03_02_option_mult.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_03_positional_derive" +path = "examples/tutorial_derive/03_03_positional.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_03_positional_mult_derive" +path = "examples/tutorial_derive/03_03_positional_mult.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_04_subcommands_derive" +path = "examples/tutorial_derive/03_04_subcommands.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_04_subcommands_alt_derive" +path = "examples/tutorial_derive/03_04_subcommands_alt.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_05_default_values_derive" +path = "examples/tutorial_derive/03_05_default_values.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "03_06_optional_derive" +path = "examples/tutorial_derive/03_06_optional.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "04_01_enum_derive" +path = "examples/tutorial_derive/04_01_enum.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "04_02_parse_derive" +path = "examples/tutorial_derive/04_02_parse.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "04_02_validate_derive" +path = "examples/tutorial_derive/04_02_validate.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "04_03_relations_derive" +path = "examples/tutorial_derive/04_03_relations.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "04_04_custom_derive" +path = "examples/tutorial_derive/04_04_custom.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "05_01_assert_derive" +path = "examples/tutorial_derive/05_01_assert.rs" +required-features = ["derive"] +test = true +doc-scrape-examples = true + +[[example]] +name = "interop_augment_args" +path = "examples/derive_ref/augment_args.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "interop_augment_subcommands" +path = "examples/derive_ref/augment_subcommands.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "interop_hand_subcommand" +path = "examples/derive_ref/hand_subcommand.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[[example]] +name = "interop_flatten_hand_args" +path = "examples/derive_ref/flatten_hand_args.rs" +required-features = ["derive"] +doc-scrape-examples = true + +[profile.test] +opt-level = 1 + +[profile.bench] +lto = true +codegen-units = 1 + +[lints] +workspace = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..8f71f43fee3f78649d238238cbde51e6d7055c82 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..a2d01088b6ce55e837a6d193943580f978fb2d2e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) Individual contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b957926c2eb2290df6e0a41a72bc03e78cfff2f7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/README.md @@ -0,0 +1,49 @@ +# clap + +> **Command Line Argument Parser for Rust** + +[![Crates.io](https://img.shields.io/crates/v/clap?style=flat-square)](https://crates.io/crates/clap) +[![Crates.io](https://img.shields.io/crates/d/clap?style=flat-square)](https://crates.io/crates/clap) +[![License](https://img.shields.io/badge/license-Apache%202.0-blue?style=flat-square)](LICENSE-APACHE) +[![License](https://img.shields.io/badge/license-MIT-blue?style=flat-square)](LICENSE-MIT) +[![Build Status](https://img.shields.io/github/actions/workflow/status/clap-rs/clap/ci.yml?branch=master&style=flat-square)](https://github.com/clap-rs/clap/actions/workflows/ci.yml?query=branch%3Amaster) +[![Coverage Status](https://img.shields.io/coveralls/github/clap-rs/clap/master?style=flat-square)](https://coveralls.io/github/clap-rs/clap?branch=master) +[![Contributors](https://img.shields.io/github/contributors/clap-rs/clap?style=flat-square)](https://github.com/clap-rs/clap/graphs/contributors) + +Dual-licensed under [Apache 2.0](LICENSE-APACHE) or [MIT](LICENSE-MIT). + +## Get Started + +```console +$ cargo add clap +``` + +## About + +Create your command-line parser, with all of the bells and whistles, declaratively or procedurally. + +For more details, see: +- [docs.rs](https://docs.rs/clap/latest/clap/) +- [examples](examples/) + +## Sponsors + + +### Gold + +[![](https://opencollective.com/clap/tiers/gold.svg?width=890)](https://opencollective.com/clap) + + +### Silver + +[![](https://opencollective.com/clap/tiers/silver.svg?width=890)](https://opencollective.com/clap) + + +### Bronze + +[![](https://opencollective.com/clap/tiers/bronze.svg?width=890)](https://opencollective.com/clap) + + +### Backer + +[![](https://opencollective.com/clap/tiers/backer.svg?width=890)](https://opencollective.com/clap) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f98c370c820e62cd1b3ff2a8552963bd5fca6e1f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/README.md @@ -0,0 +1,16 @@ +# Examples + +We try to focus our documentation on the [four types of +documentation](https://documentation.divio.com/). Examples fit into this by +providing: +- [Cookbook / How-To Guides](https://docs.rs/clap/latest/clap/_cookbook/index.html) +- Tutorials ([derive](https://docs.rs/clap/latest/clap/_derive/_tutorial/index.html), [builder](https://docs.rs/clap/latest/clap/_tutorial/index.html)) + +This directory contains the source for the above. + +## Contributing + +New examples should fit within the above structure and support their narrative +- Add the example to [Cargo.toml](../Cargo.toml) for any `required-features` +- Document how the example works with a `.md` file which will be verified using [trycmd](https://docs.rs/trycmd) +- Pull the `.rs` and `.md` file into the appropriate module doc comment to be accessible on docs.rs diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.md new file mode 100644 index 0000000000000000000000000000000000000000..a3dcef386f80eb96130d6afe690391c41913228b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.md @@ -0,0 +1,38 @@ +For more on creating a custom subcommand, see [the cargo +book](https://doc.rust-lang.org/cargo/reference/external-tools.html#custom-subcommands). +The crate [`clap-cargo`](https://github.com/crate-ci/clap-cargo) can help in +mimicking cargo's interface. + +The help looks like: +```console +$ cargo-example-derive --help +Usage: cargo + +Commands: + example-derive A simple to use, efficient, and full-featured Command Line Argument Parser + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ cargo-example-derive example-derive --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: cargo example-derive [OPTIONS] + +Options: + --manifest-path + -h, --help Print help + -V, --version Print version + +``` + +Then to directly invoke the command, run: +```console +$ cargo-example-derive example-derive +None + +$ cargo-example-derive example-derive --manifest-path Cargo.toml +Some("Cargo.toml") + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..80ad93fac8fa1adab16ae0c53df2a0d3c18cfc1f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example-derive.rs @@ -0,0 +1,31 @@ +use clap::Parser; + +#[derive(Parser)] // requires `derive` feature +#[command(name = "cargo")] +#[command(bin_name = "cargo")] +#[command(styles = CLAP_STYLING)] +enum CargoCli { + ExampleDerive(ExampleDeriveArgs), +} + +// See also `clap_cargo::style::CLAP_STYLING` +pub const CLAP_STYLING: clap::builder::styling::Styles = clap::builder::styling::Styles::styled() + .header(clap_cargo::style::HEADER) + .usage(clap_cargo::style::USAGE) + .literal(clap_cargo::style::LITERAL) + .placeholder(clap_cargo::style::PLACEHOLDER) + .error(clap_cargo::style::ERROR) + .valid(clap_cargo::style::VALID) + .invalid(clap_cargo::style::INVALID); + +#[derive(clap::Args)] +#[command(version, about, long_about = None)] +struct ExampleDeriveArgs { + #[arg(long)] + manifest_path: Option, +} + +fn main() { + let CargoCli::ExampleDerive(args) = CargoCli::parse(); + println!("{:?}", args.manifest_path); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.md new file mode 100644 index 0000000000000000000000000000000000000000..edf8ee18a31b290ec9a0e9450efc0c7e80c276ff --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.md @@ -0,0 +1,38 @@ +For more on creating a custom subcommand, see [the cargo +book](https://doc.rust-lang.org/cargo/reference/external-tools.html#custom-subcommands). +The crate [`clap-cargo`](https://github.com/crate-ci/clap-cargo) can help in +mimicking cargo's interface. + +The help looks like: +```console +$ cargo-example --help +Usage: cargo + +Commands: + example A simple to use, efficient, and full-featured Command Line Argument Parser + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ cargo-example example --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: cargo example [OPTIONS] + +Options: + --manifest-path + -h, --help Print help + -V, --version Print version + +``` + +Then to directly invoke the command, run: +```console +$ cargo-example example +None + +$ cargo-example example --manifest-path Cargo.toml +Some("Cargo.toml") + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.rs new file mode 100644 index 0000000000000000000000000000000000000000..ecf74e5848099438acf5f39d7e80ef300f317200 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/cargo-example.rs @@ -0,0 +1,29 @@ +fn main() { + let cmd = clap::Command::new("cargo") + .bin_name("cargo") + .styles(CLAP_STYLING) + .subcommand_required(true) + .subcommand( + clap::command!("example").arg( + clap::arg!(--"manifest-path" ) + .value_parser(clap::value_parser!(std::path::PathBuf)), + ), + ); + let matches = cmd.get_matches(); + let matches = match matches.subcommand() { + Some(("example", matches)) => matches, + _ => unreachable!("clap should ensure we don't get here"), + }; + let manifest_path = matches.get_one::("manifest-path"); + println!("{manifest_path:?}"); +} + +// See also `clap_cargo::style::CLAP_STYLING` +pub const CLAP_STYLING: clap::builder::styling::Styles = clap::builder::styling::Styles::styled() + .header(clap_cargo::style::HEADER) + .usage(clap_cargo::style::USAGE) + .literal(clap_cargo::style::LITERAL) + .placeholder(clap_cargo::style::PLACEHOLDER) + .error(clap_cargo::style::ERROR) + .valid(clap_cargo::style::VALID) + .invalid(clap_cargo::style::INVALID); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.md new file mode 100644 index 0000000000000000000000000000000000000000..772d98dca07d2ae12ab9d3a7ea0be704d6b00974 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.md @@ -0,0 +1,17 @@ +```console +$ demo --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: demo[EXE] [OPTIONS] --name + +Options: + -n, --name Name of the person to greet + -c, --count Number of times to greet [default: 1] + -h, --help Print help + -V, --version Print version + +$ demo --name Me +Hello Me! + +``` +*(version number and `.exe` extension on windows replaced by placeholders)* diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.rs new file mode 100644 index 0000000000000000000000000000000000000000..d107972aa4de7be08c01a5e6e5280b6f14053b52 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/demo.rs @@ -0,0 +1,22 @@ +use clap::Parser; + +/// Simple program to greet a person +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + /// Name of the person to greet + #[arg(short, long)] + name: String, + + /// Number of times to greet + #[arg(short, long, default_value_t = 1)] + count: u8, +} + +fn main() { + let args = Args::parse(); + + for _ in 0..args.count { + println!("Hello {}!", args.name); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_args.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_args.rs new file mode 100644 index 0000000000000000000000000000000000000000..39d837cfaf56708271f494be7bad2f80589aa51a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_args.rs @@ -0,0 +1,27 @@ +use clap::{arg, Args, Command, FromArgMatches as _}; + +#[derive(Args, Debug)] +struct DerivedArgs { + #[arg(short, long)] + derived: bool, +} + +fn main() { + let cli = Command::new("CLI").arg(arg!(-b - -built).action(clap::ArgAction::SetTrue)); + // Augment built args with derived args + let cli = DerivedArgs::augment_args(cli); + + let matches = cli.get_matches(); + println!("Value of built: {:?}", matches.get_flag("built")); + println!( + "Value of derived via ArgMatches: {:?}", + matches.get_flag("derived") + ); + + // Since DerivedArgs implements FromArgMatches, we can extract it from the unstructured ArgMatches. + // This is the main benefit of using derived arguments. + let derived_matches = DerivedArgs::from_arg_matches(&matches) + .map_err(|err| err.exit()) + .unwrap(); + println!("Value of derived: {derived_matches:#?}"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_subcommands.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_subcommands.rs new file mode 100644 index 0000000000000000000000000000000000000000..51cbe75d2a286f615e8df17e667322df26a137b9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/augment_subcommands.rs @@ -0,0 +1,21 @@ +use clap::{Command, FromArgMatches as _, Parser, Subcommand as _}; + +#[derive(Parser, Debug)] +enum Subcommands { + Derived { + #[arg(short, long)] + derived_flag: bool, + }, +} + +fn main() { + let cli = Command::new("Built CLI"); + // Augment with derived subcommands + let cli = Subcommands::augment_subcommands(cli); + + let matches = cli.get_matches(); + let derived_subcommands = Subcommands::from_arg_matches(&matches) + .map_err(|err| err.exit()) + .unwrap(); + println!("Derived subcommands: {derived_subcommands:#?}"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/flatten_hand_args.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/flatten_hand_args.rs new file mode 100644 index 0000000000000000000000000000000000000000..36aac09323aef0a4499041e5f5c46c337e9234b7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/flatten_hand_args.rs @@ -0,0 +1,91 @@ +use clap::error::Error; +use clap::{Arg, ArgAction, ArgMatches, Args, Command, FromArgMatches, Parser}; + +#[derive(Debug)] +struct CliArgs { + foo: bool, + bar: bool, + quuz: Option, +} + +impl FromArgMatches for CliArgs { + fn from_arg_matches(matches: &ArgMatches) -> Result { + let mut matches = matches.clone(); + Self::from_arg_matches_mut(&mut matches) + } + fn from_arg_matches_mut(matches: &mut ArgMatches) -> Result { + Ok(Self { + foo: matches.get_flag("foo"), + bar: matches.get_flag("bar"), + quuz: matches.remove_one::("quuz"), + }) + } + fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> { + let mut matches = matches.clone(); + self.update_from_arg_matches_mut(&mut matches) + } + fn update_from_arg_matches_mut(&mut self, matches: &mut ArgMatches) -> Result<(), Error> { + self.foo |= matches.get_flag("foo"); + self.bar |= matches.get_flag("bar"); + if let Some(quuz) = matches.remove_one::("quuz") { + self.quuz = Some(quuz); + } + Ok(()) + } +} + +impl Args for CliArgs { + fn augment_args(cmd: Command) -> Command { + cmd.arg( + Arg::new("foo") + .short('f') + .long("foo") + .action(ArgAction::SetTrue), + ) + .arg( + Arg::new("bar") + .short('b') + .long("bar") + .action(ArgAction::SetTrue), + ) + .arg( + Arg::new("quuz") + .short('q') + .long("quuz") + .action(ArgAction::Set), + ) + } + fn augment_args_for_update(cmd: Command) -> Command { + cmd.arg( + Arg::new("foo") + .short('f') + .long("foo") + .action(ArgAction::SetTrue), + ) + .arg( + Arg::new("bar") + .short('b') + .long("bar") + .action(ArgAction::SetTrue), + ) + .arg( + Arg::new("quuz") + .short('q') + .long("quuz") + .action(ArgAction::Set), + ) + } +} + +#[derive(Parser, Debug)] +struct Cli { + #[arg(short, long)] + top_level: bool, + #[command(flatten)] + more_args: CliArgs, +} + +fn main() { + let args = Cli::parse(); + println!("{args:#?}"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/hand_subcommand.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/hand_subcommand.rs new file mode 100644 index 0000000000000000000000000000000000000000..059a535602a43a78846f8ad556454a715ff2f8fa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/hand_subcommand.rs @@ -0,0 +1,80 @@ +#![allow(dead_code)] +use clap::error::{Error, ErrorKind}; +use clap::{ArgMatches, Args as _, Command, FromArgMatches, Parser, Subcommand}; + +#[derive(Parser, Debug)] +struct AddArgs { + name: Vec, +} +#[derive(Parser, Debug)] +struct RemoveArgs { + #[arg(short, long)] + force: bool, + name: Vec, +} + +#[derive(Debug)] +enum CliSub { + Add(AddArgs), + Remove(RemoveArgs), +} + +impl FromArgMatches for CliSub { + fn from_arg_matches(matches: &ArgMatches) -> Result { + match matches.subcommand() { + Some(("add", args)) => Ok(Self::Add(AddArgs::from_arg_matches(args)?)), + Some(("remove", args)) => Ok(Self::Remove(RemoveArgs::from_arg_matches(args)?)), + Some((_, _)) => Err(Error::raw( + ErrorKind::InvalidSubcommand, + "Valid subcommands are `add` and `remove`", + )), + None => Err(Error::raw( + ErrorKind::MissingSubcommand, + "Valid subcommands are `add` and `remove`", + )), + } + } + fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> { + match matches.subcommand() { + Some(("add", args)) => *self = Self::Add(AddArgs::from_arg_matches(args)?), + Some(("remove", args)) => *self = Self::Remove(RemoveArgs::from_arg_matches(args)?), + Some((_, _)) => { + return Err(Error::raw( + ErrorKind::InvalidSubcommand, + "Valid subcommands are `add` and `remove`", + )) + } + None => (), + }; + Ok(()) + } +} + +impl Subcommand for CliSub { + fn augment_subcommands(cmd: Command) -> Command { + cmd.subcommand(AddArgs::augment_args(Command::new("add"))) + .subcommand(RemoveArgs::augment_args(Command::new("remove"))) + .subcommand_required(true) + } + fn augment_subcommands_for_update(cmd: Command) -> Command { + cmd.subcommand(AddArgs::augment_args(Command::new("add"))) + .subcommand(RemoveArgs::augment_args(Command::new("remove"))) + .subcommand_required(true) + } + fn has_subcommand(name: &str) -> bool { + matches!(name, "add" | "remove") + } +} + +#[derive(Parser, Debug)] +struct Cli { + #[arg(short, long)] + top_level: bool, + #[command(subcommand)] + subcommand: CliSub, +} + +fn main() { + let args = Cli::parse(); + println!("{args:#?}"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/interop_tests.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/interop_tests.md new file mode 100644 index 0000000000000000000000000000000000000000..b2f56466becefb32211676c51f70300c50f94927 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/derive_ref/interop_tests.md @@ -0,0 +1,248 @@ +Following are tests for the interop examples in this directory. + +## Augment Args + +```console +$ interop_augment_args +Value of built: false +Value of derived via ArgMatches: false +Value of derived: DerivedArgs { + derived: false, +} + +``` + +```console +$ interop_augment_args -b --derived +Value of built: true +Value of derived via ArgMatches: true +Value of derived: DerivedArgs { + derived: true, +} + +``` + +```console +$ interop_augment_args -d --built +Value of built: true +Value of derived via ArgMatches: true +Value of derived: DerivedArgs { + derived: true, +} + +``` + +```console +$ interop_augment_args --unknown +? failed +error: unexpected argument '--unknown' found + +Usage: interop_augment_args[EXE] [OPTIONS] + +For more information, try '--help'. + +``` + +## Augment Subcommands + +```console +$ interop_augment_subcommands +? failed +error: a subcommand is required but one was not provided +``` + +```console +$ interop_augment_subcommands derived +Derived subcommands: Derived { + derived_flag: false, +} + +``` + +```console +$ interop_augment_subcommands derived --derived-flag +Derived subcommands: Derived { + derived_flag: true, +} + +``` + +```console +$ interop_augment_subcommands derived --unknown +? failed +error: unexpected argument '--unknown' found + +Usage: interop_augment_subcommands[EXE] derived [OPTIONS] + +For more information, try '--help'. + +``` + +```console +$ interop_augment_subcommands unknown +? failed +error: unrecognized subcommand 'unknown' + +Usage: interop_augment_subcommands[EXE] [COMMAND] + +For more information, try '--help'. + +``` + +## Hand-Implemented Subcommand + +```console +$ interop_hand_subcommand +? failed +Usage: interop_hand_subcommand[EXE] [OPTIONS] + +Commands: + add + remove + help Print this message or the help of the given subcommand(s) + +Options: + -t, --top-level + -h, --help Print help + +``` + +```console +$ interop_hand_subcommand add +Cli { + top_level: false, + subcommand: Add( + AddArgs { + name: [], + }, + ), +} + +``` + +```console +$ interop_hand_subcommand add a b c +Cli { + top_level: false, + subcommand: Add( + AddArgs { + name: [ + "a", + "b", + "c", + ], + }, + ), +} + +``` + +```console +$ interop_hand_subcommand add --unknown +? failed +error: unexpected argument '--unknown' found + + tip: to pass '--unknown' as a value, use '-- --unknown' + +Usage: interop_hand_subcommand[EXE] add [NAME]... + +For more information, try '--help'. + +``` + +```console +$ interop_hand_subcommand remove +Cli { + top_level: false, + subcommand: Remove( + RemoveArgs { + force: false, + name: [], + }, + ), +} + +``` + +```console +$ interop_hand_subcommand remove --force a b c +Cli { + top_level: false, + subcommand: Remove( + RemoveArgs { + force: true, + name: [ + "a", + "b", + "c", + ], + }, + ), +} + +``` + +```console +$ interop_hand_subcommand unknown +? failed +error: unrecognized subcommand 'unknown' + +Usage: interop_hand_subcommand[EXE] [OPTIONS] + +For more information, try '--help'. + +``` + +## Flatten Hand-Implemented Args + +```console +$ interop_flatten_hand_args +Cli { + top_level: false, + more_args: CliArgs { + foo: false, + bar: false, + quuz: None, + }, +} + +``` + +```console +$ interop_flatten_hand_args -f --bar +Cli { + top_level: false, + more_args: CliArgs { + foo: true, + bar: true, + quuz: None, + }, +} + +``` + +```console +$ interop_flatten_hand_args --quuz abc +Cli { + top_level: false, + more_args: CliArgs { + foo: false, + bar: false, + quuz: Some( + "abc", + ), + }, +} + +``` + +```console +$ interop_flatten_hand_args --unknown +? failed +error: unexpected argument '--unknown' found + +Usage: interop_flatten_hand_args[EXE] [OPTIONS] + +For more information, try '--help'. + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.md new file mode 100644 index 0000000000000000000000000000000000000000..82990b59f1364b4a02724ffae038d108230a289f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.md @@ -0,0 +1,60 @@ +**This requires enabling the [`derive` feature flag][crate::_features].** + +You can use `--` to escape further arguments. + +Let's see what this looks like in the help: +```console +$ escaped-positional-derive --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: escaped-positional-derive[EXE] [OPTIONS] [-- ...] + +Arguments: + [SLOP]... + +Options: + -f + -p + -h, --help Print help + -V, --version Print version + +``` + +Here is a baseline without any arguments: +```console +$ escaped-positional-derive +-f used: false +-p's value: None +'slops' values: [] + +``` + +Notice that we can't pass positional arguments before `--`: +```console +$ escaped-positional-derive foo bar +? failed +error: unexpected argument 'foo' found + +Usage: escaped-positional-derive[EXE] [OPTIONS] [-- ...] + +For more information, try '--help'. + +``` + +But you can after: +```console +$ escaped-positional-derive -f -p=bob -- sloppy slop slop +-f used: true +-p's value: Some("bob") +'slops' values: ["sloppy", "slop", "slop"] + +``` + +As mentioned, the parser will directly pass everything through: +```console +$ escaped-positional-derive -- -f -p=bob sloppy slop slop +-f used: false +-p's value: None +'slops' values: ["-f", "-p=bob", "sloppy", "slop", "slop"] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..decffab9c8e382edc0549fbd18ba94c637b3084d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional-derive.rs @@ -0,0 +1,25 @@ +use clap::Parser; + +#[derive(Parser)] // requires `derive` feature +#[command(version, about, long_about = None)] +struct Cli { + #[arg(short = 'f')] + eff: bool, + + #[arg(short = 'p', value_name = "PEAR")] + pea: Option, + + #[arg(last = true)] + slop: Vec, +} + +fn main() { + let args = Cli::parse(); + + // This is what will happen with `myprog -f -p=bob -- sloppy slop slop`... + println!("-f used: {:?}", args.eff); // -f used: true + println!("-p's value: {:?}", args.pea); // -p's value: Some("bob") + println!("'slops' values: {:?}", args.slop); // 'slops' values: Some(["sloppy", "slop", "slop"]) + + // Continued program logic goes here... +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.md new file mode 100644 index 0000000000000000000000000000000000000000..d94e3993c8b1d4596e37a6c826d57f467bf877ee --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.md @@ -0,0 +1,60 @@ +**This requires enabling the [`cargo` feature flag][crate::_features].** + +You can use `--` to escape further arguments. + +Let's see what this looks like in the help: +```console +$ escaped-positional --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: escaped-positional[EXE] [OPTIONS] [-- ...] + +Arguments: + [SLOP]... + +Options: + -f + -p + -h, --help Print help + -V, --version Print version + +``` + +Here is a baseline without any arguments: +```console +$ escaped-positional +-f used: false +-p's value: None +'slops' values: [] + +``` + +Notice that we can't pass positional arguments before `--`: +```console +$ escaped-positional foo bar +? failed +error: unexpected argument 'foo' found + +Usage: escaped-positional[EXE] [OPTIONS] [-- ...] + +For more information, try '--help'. + +``` + +But you can after: +```console +$ escaped-positional -f -p=bob -- sloppy slop slop +-f used: true +-p's value: Some("bob") +'slops' values: ["sloppy", "slop", "slop"] + +``` + +As mentioned, the parser will directly pass everything through: +```console +$ escaped-positional -- -f -p=bob sloppy slop slop +-f used: false +-p's value: None +'slops' values: ["-f", "-p=bob", "sloppy", "slop", "slop"] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.rs new file mode 100644 index 0000000000000000000000000000000000000000..d107e47db4b4af180d18615586a714f7e699e20f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/escaped-positional.rs @@ -0,0 +1,32 @@ +use clap::{arg, command, value_parser, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(arg!(eff: -f).action(ArgAction::SetTrue)) + .arg(arg!(pea: -p ).value_parser(value_parser!(String))) + .arg( + // Indicates that `slop` is only accessible after `--`. + arg!(slop: [SLOP]) + .num_args(1..) + .last(true) + .value_parser(value_parser!(String)), + ) + .get_matches(); + + // This is what will happen with `myprog -f -p=bob -- sloppy slop slop`... + + // -f used: true + println!("-f used: {:?}", matches.get_flag("eff")); + // -p's value: Some("bob") + println!("-p's value: {:?}", matches.get_one::("pea")); + // 'slops' values: Some(["sloppy", "slop", "slop"]) + println!( + "'slops' values: {:?}", + matches + .get_many::("slop") + .map(|vals| vals.collect::>()) + .unwrap_or_default() + ); + + // Continued program logic goes here... +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.md new file mode 100644 index 0000000000000000000000000000000000000000..c52cbd391ce005d391e934299938b4e2e2c2fcad --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.md @@ -0,0 +1,79 @@ +`find` is an example of position-sensitive flags + +```console +$ find --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: find[EXE] [OPTIONS] + +Options: + -h, --help Print help + -V, --version Print version + +TESTS: + --empty File is empty and is either a regular file or a directory + --name Base of file name (the path with the leading directories removed) matches shell + pattern pattern + +OPERATORS: + -o, --or expr2 is not evaluate if exp1 is true + -a, --and Same as `expr1 expr1` + +$ find --empty -o --name .keep +[ + ( + "empty", + Bool( + true, + ), + ), + ( + "or", + Bool( + true, + ), + ), + ( + "name", + String( + ".keep", + ), + ), +] + +$ find --empty -o --name .keep -o --name foo +[ + ( + "empty", + Bool( + true, + ), + ), + ( + "or", + Bool( + true, + ), + ), + ( + "name", + String( + ".keep", + ), + ), + ( + "or", + Bool( + true, + ), + ), + ( + "name", + String( + "foo", + ), + ), +] + +``` + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.rs new file mode 100644 index 0000000000000000000000000000000000000000..61ce066f4dc74e35133d531d2e436f2cb52cde78 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/find.rs @@ -0,0 +1,126 @@ +use std::collections::BTreeMap; + +use clap::{command, value_parser, Arg, ArgAction, ArgGroup, ArgMatches, Command}; + +fn main() { + let matches = cli().get_matches(); + let values = Value::from_matches(&matches); + println!("{values:#?}"); +} + +fn cli() -> Command { + command!() + .group(ArgGroup::new("tests").multiple(true)) + .next_help_heading("TESTS") + .args([ + position_sensitive_flag(Arg::new("empty")) + .long("empty") + .action(ArgAction::Append) + .help("File is empty and is either a regular file or a directory") + .group("tests"), + Arg::new("name") + .long("name") + .action(ArgAction::Append) + .help("Base of file name (the path with the leading directories removed) matches shell pattern pattern") + .group("tests") + ]) + .group(ArgGroup::new("operators").multiple(true)) + .next_help_heading("OPERATORS") + .args([ + position_sensitive_flag(Arg::new("or")) + .short('o') + .long("or") + .action(ArgAction::Append) + .help("expr2 is not evaluate if exp1 is true") + .group("operators"), + position_sensitive_flag(Arg::new("and")) + .short('a') + .long("and") + .action(ArgAction::Append) + .help("Same as `expr1 expr1`") + .group("operators"), + ]) +} + +fn position_sensitive_flag(arg: Arg) -> Arg { + // Flags don't track the position of each occurrence, so we need to emulate flags with + // value-less options to get the same result + arg.num_args(0) + .value_parser(value_parser!(bool)) + .default_missing_value("true") + .default_value("false") +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum Value { + Bool(bool), + String(String), +} + +impl Value { + pub fn from_matches(matches: &ArgMatches) -> Vec<(clap::Id, Self)> { + let mut values = BTreeMap::new(); + for id in matches.ids() { + if matches.try_get_many::(id.as_str()).is_ok() { + // ignore groups + continue; + } + let value_source = matches + .value_source(id.as_str()) + .expect("id came from matches"); + if value_source != clap::parser::ValueSource::CommandLine { + // Any other source just gets tacked on at the end (like default values) + continue; + } + if Self::extract::(matches, id, &mut values) { + continue; + } + if Self::extract::(matches, id, &mut values) { + continue; + } + unimplemented!("unknown type for {id}: {matches:?}"); + } + values.into_values().collect::>() + } + + fn extract + Send + Sync + 'static>( + matches: &ArgMatches, + id: &clap::Id, + output: &mut BTreeMap, + ) -> bool { + match matches.try_get_many::(id.as_str()) { + Ok(Some(values)) => { + for (value, index) in values.zip( + matches + .indices_of(id.as_str()) + .expect("id came from matches"), + ) { + output.insert(index, (id.clone(), value.clone().into())); + } + true + } + Ok(None) => { + unreachable!("`ids` only reports what is present") + } + Err(clap::parser::MatchesError::UnknownArgument { .. }) => { + unreachable!("id came from matches") + } + Err(clap::parser::MatchesError::Downcast { .. }) => false, + Err(_) => { + unreachable!("id came from matches") + } + } + } +} + +impl From for Value { + fn from(other: String) -> Self { + Self::String(other) + } +} + +impl From for Value { + fn from(other: bool) -> Self { + Self::Bool(other) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.md new file mode 100644 index 0000000000000000000000000000000000000000..490f4e2e52a3e9f654f87909354bf97e905165b2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.md @@ -0,0 +1,172 @@ +**This requires enabling the [`derive` feature flag][crate::_features].** + +Git is an example of several common subcommand patterns. + +Help: +```console +$ git-derive +? failed +A fictional versioning CLI + +Usage: git-derive[EXE] + +Commands: + clone Clones repos + diff Compare two commits + push pushes things + add adds things + stash + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ git-derive help +A fictional versioning CLI + +Usage: git-derive[EXE] + +Commands: + clone Clones repos + diff Compare two commits + push pushes things + add adds things + stash + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ git-derive help add +adds things + +Usage: git-derive[EXE] add ... + +Arguments: + ... Stuff to add + +Options: + -h, --help Print help + +``` + +A basic argument: +```console +$ git-derive add +? failed +adds things + +Usage: git-derive[EXE] add ... + +Arguments: + ... Stuff to add + +Options: + -h, --help Print help + +$ git-derive add Cargo.toml Cargo.lock +Adding ["Cargo.toml", "Cargo.lock"] + +``` + +Default subcommand: +```console +$ git-derive stash -h +Usage: git-derive[EXE] stash [OPTIONS] + git-derive[EXE] stash push [OPTIONS] + git-derive[EXE] stash pop [STASH] + git-derive[EXE] stash apply [STASH] + git-derive[EXE] stash help [COMMAND]... + +Options: + -m, --message + -h, --help Print help + +git-derive[EXE] stash push: + -m, --message + -h, --help Print help + +git-derive[EXE] stash pop: + -h, --help Print help + [STASH] + +git-derive[EXE] stash apply: + -h, --help Print help + [STASH] + +git-derive[EXE] stash help: +Print this message or the help of the given subcommand(s) + [COMMAND]... Print help for the subcommand(s) + +$ git-derive stash push -h +Usage: git-derive[EXE] stash push [OPTIONS] + +Options: + -m, --message + -h, --help Print help + +$ git-derive stash pop -h +Usage: git-derive[EXE] stash pop [STASH] + +Arguments: + [STASH] + +Options: + -h, --help Print help + +$ git-derive stash -m "Prototype" +Pushing StashPushArgs { message: Some("Prototype") } + +$ git-derive stash pop +Popping None + +$ git-derive stash push -m "Prototype" +Pushing StashPushArgs { message: Some("Prototype") } + +$ git-derive stash pop +Popping None + +``` + +External subcommands: +```console +$ git-derive custom-tool arg1 --foo bar +Calling out to "custom-tool" with ["arg1", "--foo", "bar"] + +``` + +Last argument: +```console +$ git-derive diff --help +Compare two commits + +Usage: git-derive[EXE] diff [OPTIONS] [COMMIT] [COMMIT] [-- ] + +Arguments: + [COMMIT] + [COMMIT] + [PATH] + +Options: + --color[=] [default: auto] [possible values: always, auto, never] + -h, --help Print help + +$ git-derive diff +Diffing stage..worktree (color=auto) + +$ git-derive diff ./src +Diffing stage..worktree ./src (color=auto) + +$ git-derive diff HEAD ./src +Diffing HEAD..worktree ./src (color=auto) + +$ git-derive diff HEAD~~ -- HEAD +Diffing HEAD~~..worktree HEAD (color=auto) + +$ git-derive diff --color +Diffing stage..worktree (color=always) + +$ git-derive diff --color=never +Diffing stage..worktree (color=never) + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d171373d9f163a80568f3cf42fe0f6dd0d2b324 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git-derive.rs @@ -0,0 +1,165 @@ +use std::ffi::OsStr; +use std::ffi::OsString; +use std::path::PathBuf; + +use clap::{Args, Parser, Subcommand, ValueEnum}; + +/// A fictional versioning CLI +#[derive(Debug, Parser)] // requires `derive` feature +#[command(name = "git")] +#[command(about = "A fictional versioning CLI", long_about = None)] +struct Cli { + #[command(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + /// Clones repos + #[command(arg_required_else_help = true)] + Clone { + /// The remote to clone + remote: String, + }, + /// Compare two commits + Diff { + #[arg(value_name = "COMMIT")] + base: Option, + #[arg(value_name = "COMMIT")] + head: Option, + #[arg(last = true)] + path: Option, + #[arg( + long, + require_equals = true, + value_name = "WHEN", + num_args = 0..=1, + default_value_t = ColorWhen::Auto, + default_missing_value = "always", + value_enum + )] + color: ColorWhen, + }, + /// pushes things + #[command(arg_required_else_help = true)] + Push { + /// The remote to target + remote: String, + }, + /// adds things + #[command(arg_required_else_help = true)] + Add { + /// Stuff to add + #[arg(required = true)] + path: Vec, + }, + Stash(StashArgs), + #[command(external_subcommand)] + External(Vec), +} + +#[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq)] +enum ColorWhen { + Always, + Auto, + Never, +} + +impl std::fmt::Display for ColorWhen { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.to_possible_value() + .expect("no values are skipped") + .get_name() + .fmt(f) + } +} + +#[derive(Debug, Args)] +#[command(args_conflicts_with_subcommands = true)] +#[command(flatten_help = true)] +struct StashArgs { + #[command(subcommand)] + command: Option, + + #[command(flatten)] + push: StashPushArgs, +} + +#[derive(Debug, Subcommand)] +enum StashCommands { + Push(StashPushArgs), + Pop { stash: Option }, + Apply { stash: Option }, +} + +#[derive(Debug, Args)] +struct StashPushArgs { + #[arg(short, long)] + message: Option, +} + +fn main() { + let args = Cli::parse(); + + match args.command { + Commands::Clone { remote } => { + println!("Cloning {remote}"); + } + Commands::Diff { + mut base, + mut head, + mut path, + color, + } => { + if path.is_none() { + path = head; + head = None; + if path.is_none() { + path = base; + base = None; + } + } + let base = base + .as_deref() + .map(|s| s.to_str().unwrap()) + .unwrap_or("stage"); + let head = head + .as_deref() + .map(|s| s.to_str().unwrap()) + .unwrap_or("worktree"); + let path = path.as_deref().unwrap_or_else(|| OsStr::new("")); + println!( + "Diffing {}..{} {} (color={})", + base, + head, + path.to_string_lossy(), + color + ); + } + Commands::Push { remote } => { + println!("Pushing to {remote}"); + } + Commands::Add { path } => { + println!("Adding {path:?}"); + } + Commands::Stash(stash) => { + let stash_cmd = stash.command.unwrap_or(StashCommands::Push(stash.push)); + match stash_cmd { + StashCommands::Push(push) => { + println!("Pushing {push:?}"); + } + StashCommands::Pop { stash } => { + println!("Popping {stash:?}"); + } + StashCommands::Apply { stash } => { + println!("Applying {stash:?}"); + } + } + } + Commands::External(args) => { + println!("Calling out to {:?} with {:?}", &args[0], &args[1..]); + } + } + + // Continued program logic goes here... +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.md new file mode 100644 index 0000000000000000000000000000000000000000..f389c8cbe58196fe836aa3f6f766efb1d7f170b5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.md @@ -0,0 +1,170 @@ +Git is an example of several common subcommand patterns. + +Help: +```console +$ git +? failed +A fictional versioning CLI + +Usage: git[EXE] + +Commands: + clone Clones repos + diff Compare two commits + push pushes things + add adds things + stash + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ git help +A fictional versioning CLI + +Usage: git[EXE] + +Commands: + clone Clones repos + diff Compare two commits + push pushes things + add adds things + stash + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + +$ git help add +adds things + +Usage: git[EXE] add ... + +Arguments: + ... Stuff to add + +Options: + -h, --help Print help + +``` + +A basic argument: +```console +$ git add +? failed +adds things + +Usage: git[EXE] add ... + +Arguments: + ... Stuff to add + +Options: + -h, --help Print help + +$ git add Cargo.toml Cargo.lock +Adding ["Cargo.toml", "Cargo.lock"] + +``` + +Default subcommand: +```console +$ git stash -h +Usage: git[EXE] stash [OPTIONS] + git[EXE] stash push [OPTIONS] + git[EXE] stash pop [STASH] + git[EXE] stash apply [STASH] + git[EXE] stash help [COMMAND]... + +Options: + -m, --message + -h, --help Print help + +git[EXE] stash push: + -m, --message + -h, --help Print help + +git[EXE] stash pop: + -h, --help Print help + [STASH] + +git[EXE] stash apply: + -h, --help Print help + [STASH] + +git[EXE] stash help: +Print this message or the help of the given subcommand(s) + [COMMAND]... Print help for the subcommand(s) + +$ git stash push -h +Usage: git[EXE] stash push [OPTIONS] + +Options: + -m, --message + -h, --help Print help + +$ git stash pop -h +Usage: git[EXE] stash pop [STASH] + +Arguments: + [STASH] + +Options: + -h, --help Print help + +$ git stash -m "Prototype" +Pushing Some("Prototype") + +$ git stash pop +Popping None + +$ git stash push -m "Prototype" +Pushing Some("Prototype") + +$ git stash pop +Popping None + +``` + +External subcommands: +```console +$ git custom-tool arg1 --foo bar +Calling out to "custom-tool" with ["arg1", "--foo", "bar"] + +``` + +Last argument: +```console +$ git diff --help +Compare two commits + +Usage: git[EXE] diff [OPTIONS] [COMMIT] [COMMIT] [-- ] + +Arguments: + [COMMIT] + [COMMIT] + [PATH] + +Options: + --color[=] [default: auto] [possible values: always, auto, never] + -h, --help Print help + +$ git diff +Diffing stage..worktree (color=auto) + +$ git diff ./src +Diffing stage..worktree ./src (color=auto) + +$ git diff HEAD ./src +Diffing HEAD..worktree ./src (color=auto) + +$ git diff HEAD~~ -- HEAD +Diffing HEAD~~..worktree HEAD (color=auto) + +$ git diff --color +Diffing stage..worktree (color=always) + +$ git diff --color=never +Diffing stage..worktree (color=never) + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc8fd01f79e315274a6199ed36b1f21ca9a8b08e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/git.rs @@ -0,0 +1,138 @@ +use std::ffi::OsString; +use std::path::PathBuf; + +use clap::{arg, Command}; + +fn cli() -> Command { + Command::new("git") + .about("A fictional versioning CLI") + .subcommand_required(true) + .arg_required_else_help(true) + .allow_external_subcommands(true) + .subcommand( + Command::new("clone") + .about("Clones repos") + .arg(arg!( "The remote to clone")) + .arg_required_else_help(true), + ) + .subcommand( + Command::new("diff") + .about("Compare two commits") + .arg(arg!(base: [COMMIT])) + .arg(arg!(head: [COMMIT])) + .arg(arg!(path: [PATH]).last(true)) + .arg( + arg!(--color ) + .value_parser(["always", "auto", "never"]) + .num_args(0..=1) + .require_equals(true) + .default_value("auto") + .default_missing_value("always"), + ), + ) + .subcommand( + Command::new("push") + .about("pushes things") + .arg(arg!( "The remote to target")) + .arg_required_else_help(true), + ) + .subcommand( + Command::new("add") + .about("adds things") + .arg_required_else_help(true) + .arg(arg!( ... "Stuff to add").value_parser(clap::value_parser!(PathBuf))), + ) + .subcommand( + Command::new("stash") + .args_conflicts_with_subcommands(true) + .flatten_help(true) + .args(push_args()) + .subcommand(Command::new("push").args(push_args())) + .subcommand(Command::new("pop").arg(arg!([STASH]))) + .subcommand(Command::new("apply").arg(arg!([STASH]))), + ) +} + +fn push_args() -> Vec { + vec![arg!(-m --message )] +} + +fn main() { + let matches = cli().get_matches(); + + match matches.subcommand() { + Some(("clone", sub_matches)) => { + println!( + "Cloning {}", + sub_matches.get_one::("REMOTE").expect("required") + ); + } + Some(("diff", sub_matches)) => { + let color = sub_matches + .get_one::("color") + .map(|s| s.as_str()) + .expect("defaulted in clap"); + + let mut base = sub_matches.get_one::("base").map(|s| s.as_str()); + let mut head = sub_matches.get_one::("head").map(|s| s.as_str()); + let mut path = sub_matches.get_one::("path").map(|s| s.as_str()); + if path.is_none() { + path = head; + head = None; + if path.is_none() { + path = base; + base = None; + } + } + let base = base.unwrap_or("stage"); + let head = head.unwrap_or("worktree"); + let path = path.unwrap_or(""); + println!("Diffing {base}..{head} {path} (color={color})"); + } + Some(("push", sub_matches)) => { + println!( + "Pushing to {}", + sub_matches.get_one::("REMOTE").expect("required") + ); + } + Some(("add", sub_matches)) => { + let paths = sub_matches + .get_many::("PATH") + .into_iter() + .flatten() + .collect::>(); + println!("Adding {paths:?}"); + } + Some(("stash", sub_matches)) => { + let stash_command = sub_matches.subcommand().unwrap_or(("push", sub_matches)); + match stash_command { + ("apply", sub_matches) => { + let stash = sub_matches.get_one::("STASH"); + println!("Applying {stash:?}"); + } + ("pop", sub_matches) => { + let stash = sub_matches.get_one::("STASH"); + println!("Popping {stash:?}"); + } + ("push", sub_matches) => { + let message = sub_matches.get_one::("message"); + println!("Pushing {message:?}"); + } + (name, _) => { + unreachable!("Unsupported subcommand `{name}`") + } + } + } + Some((ext, sub_matches)) => { + let args = sub_matches + .get_many::("") + .into_iter() + .flatten() + .collect::>(); + println!("Calling out to {ext:?} with {args:?}"); + } + _ => unreachable!(), // If all subcommands are defined above, anything else is unreachable!() + } + + // Continued program logic goes here... +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.md new file mode 100644 index 0000000000000000000000000000000000000000..e84b263514d1760d88fd002d0fbefac7c82263f3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.md @@ -0,0 +1,39 @@ +See the documentation for [`Command::multicall`][crate::Command::multicall] for rationale. + +This example omits every command except true and false, +which are the most trivial to implement, +```console +$ busybox true +? 0 + +$ busybox false +? 1 + +``` +*Note: without the links setup, we can't demonstrate the multicall behavior* + +But includes the `--install` option as an example of why it can be useful +for the main program to take arguments that aren't applet subcommands. +```console +$ busybox --install +? failed +... + +``` + +Though users must pass something: +```console +$ busybox +? failed +Usage: busybox [OPTIONS] [APPLET] + +APPLETS: + true does nothing successfully + false does nothing unsuccessfully + help Print this message or the help of the given subcommand(s) + +Options: + --install Install hardlinks for all subcommands in path + -h, --help Print help + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.rs new file mode 100644 index 0000000000000000000000000000000000000000..9260e1d70e81b0b7a2435602d45c429b9046c14c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-busybox.rs @@ -0,0 +1,47 @@ +use std::path::PathBuf; +use std::process::exit; + +use clap::{value_parser, Arg, ArgAction, Command}; + +fn applet_commands() -> [Command; 2] { + [ + Command::new("true").about("does nothing successfully"), + Command::new("false").about("does nothing unsuccessfully"), + ] +} + +fn main() { + let cmd = Command::new(env!("CARGO_CRATE_NAME")) + .multicall(true) + .subcommand( + Command::new("busybox") + .arg_required_else_help(true) + .subcommand_value_name("APPLET") + .subcommand_help_heading("APPLETS") + .arg( + Arg::new("install") + .long("install") + .help("Install hardlinks for all subcommands in path") + .exclusive(true) + .action(ArgAction::Set) + .default_missing_value("/usr/local/bin") + .value_parser(value_parser!(PathBuf)), + ) + .subcommands(applet_commands()), + ) + .subcommands(applet_commands()); + + let matches = cmd.get_matches(); + let mut subcommand = matches.subcommand(); + if let Some(("busybox", cmd)) = subcommand { + if cmd.contains_id("install") { + unimplemented!("Make hardlinks to the executable here"); + } + subcommand = cmd.subcommand(); + } + match subcommand { + Some(("false", _)) => exit(1), + Some(("true", _)) => exit(0), + _ => unreachable!("parser should ensure only valid subcommand names are used"), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.md new file mode 100644 index 0000000000000000000000000000000000000000..0df1274e152149cf2a87baca9d6a108dfb1eca78 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.md @@ -0,0 +1,10 @@ +See the documentation for [`Command::multicall`][crate::Command::multicall] for rationale. + +This example omits the implementation of displaying address config + +```console +$ hostname +www + +``` +*Note: without the links setup, we can't demonstrate the multicall behavior* diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.rs new file mode 100644 index 0000000000000000000000000000000000000000..b57680a5c11b4222f35687aaef7f99f5eb0dcaf0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/multicall-hostname.rs @@ -0,0 +1,17 @@ +use clap::Command; + +fn main() { + let cmd = Command::new(env!("CARGO_CRATE_NAME")) + .multicall(true) + .arg_required_else_help(true) + .subcommand_value_name("APPLET") + .subcommand_help_heading("APPLETS") + .subcommand(Command::new("hostname").about("show hostname part of FQDN")) + .subcommand(Command::new("dnsdomainname").about("show domain name part of FQDN")); + + match cmd.get_matches().subcommand_name() { + Some("hostname") => println!("www"), + Some("dnsdomainname") => println!("example.com"), + _ => unreachable!("parser should ensure only valid subcommand names are used"), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.md new file mode 100644 index 0000000000000000000000000000000000000000..1b8e58d978d0d725ee1ad5d32c5cf6812238bc6d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.md @@ -0,0 +1,83 @@ +[`pacman`](https://wiki.archlinux.org/index.php/pacman) defines subcommands via flags. + +Here, `-S` is a short flag subcommand: +```console +$ pacman -S package +Installing package... + +``` + +Here `--sync` is a long flag subcommand: +```console +$ pacman --sync package +Installing package... + +``` + +Now the short flag subcommand (`-S`) with a long flag: +```console +$ pacman -S --search name +Searching for name... + +``` + +And the various forms of short flags that work: +```console +$ pacman -S -s name +Searching for name... + +$ pacman -Ss name +Searching for name... + +``` +*(users can "stack" short subcommands with short flags or with other short flag subcommands)* + +In the help, this looks like: +```console +$ pacman -h +package manager utility + +Usage: pacman[EXE] + +Commands: + query, -Q, --query Query the package database. + sync, -S, --sync Synchronize packages. + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + -V, --version Print version + +$ pacman -S -h +Synchronize packages. + +Usage: pacman[EXE] {sync|--sync|-S} [OPTIONS] [package]... + +Arguments: + [package]... packages + +Options: + -s, --search ... search remote repositories for matching strings + -i, --info view package information + -h, --help Print help + +``` + +And errors: +```console +$ pacman -S -s foo -i bar +? failed +error: the argument '--search ...' cannot be used with '--info' + +Usage: pacman[EXE] {sync|--sync|-S} --search ... ... + +For more information, try '--help'. + +``` + +
+ +**NOTE:** Keep in mind that subcommands, flags, and long flags are *case sensitive*: `-Q` and `-q` are different flags/subcommands. For example, you can have both `-Q` subcommand and `-q` flag, and they will be properly disambiguated. +Let's make a quick program to illustrate. + +
diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.rs new file mode 100644 index 0000000000000000000000000000000000000000..07c14013b02163c4400af6f0cf1a36d7a323d2e2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/pacman.rs @@ -0,0 +1,110 @@ +use clap::{Arg, ArgAction, Command}; + +fn main() { + let matches = Command::new("pacman") + .about("package manager utility") + .version("5.2.1") + .subcommand_required(true) + .arg_required_else_help(true) + // Query subcommand + // + // Only a few of its arguments are implemented below. + .subcommand( + Command::new("query") + .short_flag('Q') + .long_flag("query") + .about("Query the package database.") + .arg( + Arg::new("search") + .short('s') + .long("search") + .help("search locally installed packages for matching strings") + .conflicts_with("info") + .action(ArgAction::Set) + .num_args(1..), + ) + .arg( + Arg::new("info") + .long("info") + .short('i') + .conflicts_with("search") + .help("view package information") + .action(ArgAction::Set) + .num_args(1..), + ), + ) + // Sync subcommand + // + // Only a few of its arguments are implemented below. + .subcommand( + Command::new("sync") + .short_flag('S') + .long_flag("sync") + .about("Synchronize packages.") + .arg( + Arg::new("search") + .short('s') + .long("search") + .conflicts_with("info") + .action(ArgAction::Set) + .num_args(1..) + .help("search remote repositories for matching strings"), + ) + .arg( + Arg::new("info") + .long("info") + .conflicts_with("search") + .short('i') + .action(ArgAction::SetTrue) + .help("view package information"), + ) + .arg( + Arg::new("package") + .help("packages") + .required_unless_present("search") + .action(ArgAction::Set) + .num_args(1..), + ), + ) + .get_matches(); + + match matches.subcommand() { + Some(("sync", sync_matches)) => { + if sync_matches.contains_id("search") { + let packages: Vec<_> = sync_matches + .get_many::("search") + .expect("contains_id") + .map(|s| s.as_str()) + .collect(); + let values = packages.join(", "); + println!("Searching for {values}..."); + return; + } + + let packages: Vec<_> = sync_matches + .get_many::("package") + .expect("is present") + .map(|s| s.as_str()) + .collect(); + let values = packages.join(", "); + + if sync_matches.get_flag("info") { + println!("Retrieving info for {values}..."); + } else { + println!("Installing {values}..."); + } + } + Some(("query", query_matches)) => { + if let Some(packages) = query_matches.get_many::("info") { + let comma_sep = packages.map(|s| s.as_str()).collect::>().join(", "); + println!("Retrieving info for {comma_sep}..."); + } else if let Some(queries) = query_matches.get_many::("search") { + let comma_sep = queries.map(|s| s.as_str()).collect::>().join(", "); + println!("Searching Locally for {comma_sep}..."); + } else { + println!("Displaying all locally installed packages..."); + } + } + _ => unreachable!(), // If all subcommands are defined above, anything else is unreachable + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl-derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl-derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..8de9b33e4ed92f88611709766802f3386bf56ee8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl-derive.rs @@ -0,0 +1,67 @@ +use std::io::Write; + +use clap::{Parser, Subcommand}; + +fn main() -> Result<(), String> { + loop { + let line = readline()?; + let line = line.trim(); + if line.is_empty() { + continue; + } + + match respond(line) { + Ok(quit) => { + if quit { + break; + } + } + Err(err) => { + write!(std::io::stdout(), "{err}").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + } + } + } + + Ok(()) +} + +fn respond(line: &str) -> Result { + let args = shlex::split(line).ok_or("error: Invalid quoting")?; + let cli = Cli::try_parse_from(args).map_err(|e| e.to_string())?; + match cli.command { + Commands::Ping => { + write!(std::io::stdout(), "Pong").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + } + Commands::Exit => { + write!(std::io::stdout(), "Exiting ...").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + return Ok(true); + } + } + Ok(false) +} + +#[derive(Debug, Parser)] +#[command(multicall = true)] +struct Cli { + #[command(subcommand)] + command: Commands, +} + +#[derive(Debug, Subcommand)] +enum Commands { + Ping, + Exit, +} + +fn readline() -> Result { + write!(std::io::stdout(), "$ ").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + let mut buffer = String::new(); + std::io::stdin() + .read_line(&mut buffer) + .map_err(|e| e.to_string())?; + Ok(buffer) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0baaed8d2017b21fa1793a171f41b045c4a3b9a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/repl.rs @@ -0,0 +1,92 @@ +use std::io::Write; + +use clap::Command; + +fn main() -> Result<(), String> { + loop { + let line = readline()?; + let line = line.trim(); + if line.is_empty() { + continue; + } + + match respond(line) { + Ok(quit) => { + if quit { + break; + } + } + Err(err) => { + write!(std::io::stdout(), "{err}").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + } + } + } + + Ok(()) +} + +fn respond(line: &str) -> Result { + let args = shlex::split(line).ok_or("error: Invalid quoting")?; + let matches = cli() + .try_get_matches_from(args) + .map_err(|e| e.to_string())?; + match matches.subcommand() { + Some(("ping", _matches)) => { + write!(std::io::stdout(), "Pong").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + } + Some(("quit", _matches)) => { + write!(std::io::stdout(), "Exiting ...").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + return Ok(true); + } + Some((name, _matches)) => unimplemented!("{name}"), + None => unreachable!("subcommand required"), + } + + Ok(false) +} + +fn cli() -> Command { + // strip out usage + const PARSER_TEMPLATE: &str = "\ + {all-args} + "; + // strip out name/version + const APPLET_TEMPLATE: &str = "\ + {about-with-newline}\n\ + {usage-heading}\n {usage}\n\ + \n\ + {all-args}{after-help}\ + "; + + Command::new("repl") + .multicall(true) + .arg_required_else_help(true) + .subcommand_required(true) + .subcommand_value_name("APPLET") + .subcommand_help_heading("APPLETS") + .help_template(PARSER_TEMPLATE) + .subcommand( + Command::new("ping") + .about("Get a response") + .help_template(APPLET_TEMPLATE), + ) + .subcommand( + Command::new("quit") + .alias("exit") + .about("Quit the REPL") + .help_template(APPLET_TEMPLATE), + ) +} + +fn readline() -> Result { + write!(std::io::stdout(), "$ ").map_err(|e| e.to_string())?; + std::io::stdout().flush().map_err(|e| e.to_string())?; + let mut buffer = String::new(); + std::io::stdin() + .read_line(&mut buffer) + .map_err(|e| e.to_string())?; + Ok(buffer) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.md new file mode 100644 index 0000000000000000000000000000000000000000..bdba15cd2c8f205a4e0f4941fd088dd7e2eac427 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.md @@ -0,0 +1,35 @@ +```console +$ 01_quick --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 01_quick[EXE] [OPTIONS] [name] [COMMAND] + +Commands: + test does testing things + help Print this message or the help of the given subcommand(s) + +Arguments: + [name] Optional name to operate on + +Options: + -c, --config Sets a custom config file + -d, --debug... Turn debugging information on + -h, --help Print help + -V, --version Print version + +``` + +By default, the program does nothing: +```console +$ 01_quick +Debug mode is off + +``` + +But you can mix and match the various features +```console +$ 01_quick -dd test +Debug mode is on +Not printing testing lists... + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc1beb789fe13fc894cf0a95926191a46a4353c2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/01_quick.rs @@ -0,0 +1,60 @@ +use std::path::PathBuf; + +use clap::{arg, command, value_parser, ArgAction, Command}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(arg!([name] "Optional name to operate on")) + .arg( + arg!( + -c --config "Sets a custom config file" + ) + // We don't have syntax yet for optional options, so manually calling `required` + .required(false) + .value_parser(value_parser!(PathBuf)), + ) + .arg(arg!( + -d --debug ... "Turn debugging information on" + )) + .subcommand( + Command::new("test") + .about("does testing things") + .arg(arg!(-l --list "lists test values").action(ArgAction::SetTrue)), + ) + .get_matches(); + + // You can check the value provided by positional arguments, or option arguments + if let Some(name) = matches.get_one::("name") { + println!("Value for name: {name}"); + } + + if let Some(config_path) = matches.get_one::("config") { + println!("Value for config: {}", config_path.display()); + } + + // You can see how many times a particular flag or argument occurred + // Note, only flags can have multiple occurrences + match matches + .get_one::("debug") + .expect("Counts are defaulted") + { + 0 => println!("Debug mode is off"), + 1 => println!("Debug mode is kind of on"), + 2 => println!("Debug mode is on"), + _ => println!("Don't be crazy"), + } + + // You can check for the existence of subcommands, and if found use their + // matches just as you would the top level cmd + if let Some(matches) = matches.subcommand_matches("test") { + // "$ myapp test" was run + if matches.get_flag("list") { + // "$ myapp test -l" was run + println!("Printing testing lists..."); + } else { + println!("Not printing testing lists..."); + } + } + + // Continued program logic goes here... +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.md new file mode 100644 index 0000000000000000000000000000000000000000..3ac9a95e7847c0f489e45adc880a7ba844d81b52 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.md @@ -0,0 +1,17 @@ +```console +$ 02_app_settings --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 02_app_settings[EXE] --two --one + +Options: + --two + + --one + + -h, --help + Print help + -V, --version + Print version + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e30ec9a523623e48e19e01e5fc58d08cbee29dd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_app_settings.rs @@ -0,0 +1,18 @@ +use clap::{arg, command, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .next_line_help(true) + .arg(arg!(--two ).required(true).action(ArgAction::Set)) + .arg(arg!(--one ).required(true).action(ArgAction::Set)) + .get_matches(); + + println!( + "two: {:?}", + matches.get_one::("two").expect("required") + ); + println!( + "one: {:?}", + matches.get_one::("one").expect("required") + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.md new file mode 100644 index 0000000000000000000000000000000000000000..1f8b071b194fb99d8facea2c804eb1480e9dacdc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.md @@ -0,0 +1,16 @@ +```console +$ 02_apps --help +Does awesome things + +Usage: 02_apps[EXE] --two --one + +Options: + --two + --one + -h, --help Print help + -V, --version Print version + +$ 02_apps --version +MyApp 1.0 + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.rs new file mode 100644 index 0000000000000000000000000000000000000000..b085e0edc21cd2d8bdd3e27570418ab3f81f7925 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_apps.rs @@ -0,0 +1,19 @@ +use clap::{arg, Command}; + +fn main() { + let matches = Command::new("MyApp") + .version("1.0") + .about("Does awesome things") + .arg(arg!(--two ).required(true)) + .arg(arg!(--one ).required(true)) + .get_matches(); + + println!( + "two: {:?}", + matches.get_one::("two").expect("required") + ); + println!( + "one: {:?}", + matches.get_one::("one").expect("required") + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.md new file mode 100644 index 0000000000000000000000000000000000000000..a0d21b3ece8d6b515da191de9131c8ce904dedec --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.md @@ -0,0 +1,16 @@ +```console +$ 02_crate --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 02_crate[EXE] --two --one + +Options: + --two + --one + -h, --help Print help + -V, --version Print version + +$ 02_crate --version +clap [..] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad6bb4713bcab6ce397dc57a8b3ac66c7854bfb0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/02_crate.rs @@ -0,0 +1,18 @@ +use clap::{arg, command}; + +fn main() { + // requires `cargo` feature, reading name, version, author, and description from `Cargo.toml` + let matches = command!() + .arg(arg!(--two ).required(true)) + .arg(arg!(--one ).required(true)) + .get_matches(); + + println!( + "two: {:?}", + matches.get_one::("two").expect("required") + ); + println!( + "one: {:?}", + matches.get_one::("one").expect("required") + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.md new file mode 100644 index 0000000000000000000000000000000000000000..feec8e0a7a3db0da3100464a3069455ece106977 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.md @@ -0,0 +1,26 @@ +```console +$ 03_01_flag_bool --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_01_flag_bool[EXE] [OPTIONS] + +Options: + -v, --verbose + -h, --help Print help + -V, --version Print version + +$ 03_01_flag_bool +verbose: false + +$ 03_01_flag_bool --verbose +verbose: true + +$ 03_01_flag_bool --verbose --verbose +? failed +error: the argument '--verbose' cannot be used multiple times + +Usage: 03_01_flag_bool[EXE] [OPTIONS] + +For more information, try '--help'. + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.rs new file mode 100644 index 0000000000000000000000000000000000000000..03f2f1756f6bed6f52d41ce28df689986e942eeb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_bool.rs @@ -0,0 +1,14 @@ +use clap::{command, Arg, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + Arg::new("verbose") + .short('v') + .long("verbose") + .action(ArgAction::SetTrue), + ) + .get_matches(); + + println!("verbose: {:?}", matches.get_flag("verbose")); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.md new file mode 100644 index 0000000000000000000000000000000000000000..5e964b67a70aa6473e73dd9a3e0a9f1f2638ca7f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.md @@ -0,0 +1,21 @@ +```console +$ 03_01_flag_count --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_01_flag_count[EXE] [OPTIONS] + +Options: + -v, --verbose... + -h, --help Print help + -V, --version Print version + +$ 03_01_flag_count +verbose: 0 + +$ 03_01_flag_count --verbose +verbose: 1 + +$ 03_01_flag_count --verbose --verbose +verbose: 2 + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.rs new file mode 100644 index 0000000000000000000000000000000000000000..492f7d4c1fa586b7dde629e8d8f7227772599ed9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_01_flag_count.rs @@ -0,0 +1,14 @@ +use clap::{command, Arg, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + Arg::new("verbose") + .short('v') + .long("verbose") + .action(ArgAction::Count), + ) + .get_matches(); + + println!("verbose: {:?}", matches.get_count("verbose")); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.md new file mode 100644 index 0000000000000000000000000000000000000000..42b81fd42664af1d99724980fef9c16aaf8ac36a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.md @@ -0,0 +1,30 @@ +```console +$ 03_02_option --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_02_option[EXE] [OPTIONS] + +Options: + -n, --name + -h, --help Print help + -V, --version Print version + +$ 03_02_option +name: None + +$ 03_02_option --name bob +name: Some("bob") + +$ 03_02_option --name=bob +name: Some("bob") + +$ 03_02_option -n bob +name: Some("bob") + +$ 03_02_option -n=bob +name: Some("bob") + +$ 03_02_option -nbob +name: Some("bob") + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.rs new file mode 100644 index 0000000000000000000000000000000000000000..e9ba3e41a9325b0ae89dc4d704a6d6430b7baa9b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option.rs @@ -0,0 +1,9 @@ +use clap::{command, Arg}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(Arg::new("name").short('n').long("name")) + .get_matches(); + + println!("name: {:?}", matches.get_one::("name")); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.md new file mode 100644 index 0000000000000000000000000000000000000000..1658f779c1e78c19b8728d96ba179f0390996bd2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.md @@ -0,0 +1,24 @@ +```console +$ 03_02_option_mult --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_02_option_mult[EXE] [OPTIONS] + +Options: + -n, --name + -h, --help Print help + -V, --version Print version + +$ 03_02_option_mult +names: [] + +$ 03_02_option_mult --name bob +names: ["bob"] + +$ 03_02_option_mult --name bob --name john +names: ["bob", "john"] + +$ 03_02_option_mult_derive --name bob --name=john -n tom -n=chris -nsteve +name: ["bob", "john", "tom", "chris", "steve"] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.rs new file mode 100644 index 0000000000000000000000000000000000000000..0145661040a7edb806afba2f30fda1281c6d9f05 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_02_option_mult.rs @@ -0,0 +1,20 @@ +use clap::{command, Arg, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + Arg::new("name") + .short('n') + .long("name") + .action(ArgAction::Append), + ) + .get_matches(); + + let args = matches + .get_many::("name") + .unwrap_or_default() + .map(|v| v.as_str()) + .collect::>(); + + println!("names: {:?}", &args); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.md new file mode 100644 index 0000000000000000000000000000000000000000..d8d49f39b6ce9b15b160f5b8048d3f81825635d3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.md @@ -0,0 +1,20 @@ +```console +$ 03_03_positional --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_03_positional[EXE] [name] + +Arguments: + [name] + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_03_positional +name: None + +$ 03_03_positional bob +name: Some("bob") + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.rs new file mode 100644 index 0000000000000000000000000000000000000000..f652d27ca5472b1108def5d87e3643a7521d96bd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional.rs @@ -0,0 +1,9 @@ +use clap::{command, Arg}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(Arg::new("name")) + .get_matches(); + + println!("name: {:?}", matches.get_one::("name")); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.md new file mode 100644 index 0000000000000000000000000000000000000000..174ddd9f7ba2afebc7a87ec6702a0a784835f88b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.md @@ -0,0 +1,23 @@ +```console +$ 03_03_positional_mult --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_03_positional_mult[EXE] [name]... + +Arguments: + [name]... + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_03_positional_mult +names: [] + +$ 03_03_positional_mult bob +names: ["bob"] + +$ 03_03_positional_mult bob john +names: ["bob", "john"] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.rs new file mode 100644 index 0000000000000000000000000000000000000000..bcd288d8dc1ccbddc1a79a9fdf4466f26a83cf43 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_03_positional_mult.rs @@ -0,0 +1,15 @@ +use clap::{command, Arg, ArgAction}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(Arg::new("name").action(ArgAction::Append)) + .get_matches(); + + let args = matches + .get_many::("name") + .unwrap_or_default() + .map(|v| v.as_str()) + .collect::>(); + + println!("names: {:?}", &args); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.md new file mode 100644 index 0000000000000000000000000000000000000000..24d44889aca1c6d599e3488a4cecf0836865333d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.md @@ -0,0 +1,62 @@ +```console +$ 03_04_subcommands help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_04_subcommands[EXE] + +Commands: + add Adds files to myapp + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_04_subcommands help add +Adds files to myapp + +Usage: 03_04_subcommands[EXE] add [NAME] + +Arguments: + [NAME] + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_04_subcommands add bob +'myapp add' was used, name is: Some("bob") + +``` + +We set +[`Command::arg_required_else_help`][crate::Command::arg_required_else_help] to +show the help, rather than an error, when the +[required subcommand][crate::Command::subcommand_required] is missing: +```console +$ 03_04_subcommands +? failed +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_04_subcommands[EXE] + +Commands: + add Adds files to myapp + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help Print help + -V, --version Print version + +``` + +Since we specified [`Command::propagate_version`][crate::Command::propagate_version], the `--version` flag +is available in all subcommands: +```console +$ 03_04_subcommands --version +clap [..] + +$ 03_04_subcommands add --version +clap-add [..] + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.rs new file mode 100644 index 0000000000000000000000000000000000000000..fbe23809e92a45edf4a680aaf8cbb99ff9bbfff4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_04_subcommands.rs @@ -0,0 +1,22 @@ +use clap::{arg, command, Command}; + +fn main() { + let matches = command!() // requires `cargo` feature + .propagate_version(true) + .subcommand_required(true) + .arg_required_else_help(true) + .subcommand( + Command::new("add") + .about("Adds files to myapp") + .arg(arg!([NAME])), + ) + .get_matches(); + + match matches.subcommand() { + Some(("add", sub_matches)) => println!( + "'myapp add' was used, name is: {:?}", + sub_matches.get_one::("NAME") + ), + _ => unreachable!("Exhausted list of subcommands and subcommand_required prevents `None`"), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.md new file mode 100644 index 0000000000000000000000000000000000000000..b62a41550824c94093ec754c57a0e184da5120b5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.md @@ -0,0 +1,20 @@ +```console +$ 03_05_default_values --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_05_default_values[EXE] [PORT] + +Arguments: + [PORT] [default: 2020] + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_05_default_values +port: 2020 + +$ 03_05_default_values 22 +port: 22 + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.rs new file mode 100644 index 0000000000000000000000000000000000000000..d259ee06edf1f5ad0af7b847212233e6a05c5cf9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_05_default_values.rs @@ -0,0 +1,18 @@ +use clap::{arg, command, value_parser}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + arg!([PORT]) + .value_parser(value_parser!(u16)) + .default_value("2020"), + ) + .get_matches(); + + println!( + "port: {:?}", + matches + .get_one::("PORT") + .expect("default ensures there is always a value") + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.md new file mode 100644 index 0000000000000000000000000000000000000000..a12f198f8b03a5a004c80aad8b709f5d37ca8d48 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.md @@ -0,0 +1,26 @@ +```console +$ 03_06_required --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 03_06_required[EXE] + +Arguments: + + +Options: + -h, --help Print help + -V, --version Print version + +$ 03_06_required +? 2 +error: the following required arguments were not provided: + + +Usage: 03_06_required[EXE] + +For more information, try '--help'. + +$ 03_06_required bob +name: "bob" + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.rs new file mode 100644 index 0000000000000000000000000000000000000000..31df3155507a03f36e85c7eeacc424ce2f9ecb85 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/03_06_required.rs @@ -0,0 +1,14 @@ +use clap::{command, Arg}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg(Arg::new("name").required(true)) + .get_matches(); + + println!( + "name: {:?}", + matches + .get_one::("name") + .expect("clap `required` ensures its present") + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.md new file mode 100644 index 0000000000000000000000000000000000000000..ec4c0aea1d64d3b73f9d4d8517ced12e03ec590e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.md @@ -0,0 +1,47 @@ +```console +$ 04_01_enum --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 04_01_enum[EXE] + +Arguments: + + What mode to run the program in + + Possible values: + - fast: Run swiftly + - slow: Crawl slowly but steadily + +Options: + -h, --help + Print help (see a summary with '-h') + + -V, --version + Print version + +$ 04_01_enum -h +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 04_01_enum[EXE] + +Arguments: + What mode to run the program in [possible values: fast, slow] + +Options: + -h, --help Print help (see more with '--help') + -V, --version Print version + +$ 04_01_enum fast +Hare + +$ 04_01_enum slow +Tortoise + +$ 04_01_enum medium +? failed +error: invalid value 'medium' for '' + [possible values: fast, slow] + +For more information, try '--help'. + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c419b29d79ee06c6e261bcbed71a9b3ee789282 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_enum.rs @@ -0,0 +1,66 @@ +use clap::{arg, builder::PossibleValue, command, value_parser, ValueEnum}; + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum Mode { + Fast, + Slow, +} + +// Can also be derived with feature flag `derive` +impl ValueEnum for Mode { + fn value_variants<'a>() -> &'a [Self] { + &[Mode::Fast, Mode::Slow] + } + + fn to_possible_value(&self) -> Option { + Some(match self { + Mode::Fast => PossibleValue::new("fast").help("Run swiftly"), + Mode::Slow => PossibleValue::new("slow").help("Crawl slowly but steadily"), + }) + } +} + +impl std::fmt::Display for Mode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.to_possible_value() + .expect("no values are skipped") + .get_name() + .fmt(f) + } +} + +impl std::str::FromStr for Mode { + type Err = String; + + fn from_str(s: &str) -> Result { + for variant in Self::value_variants() { + if variant.to_possible_value().unwrap().matches(s, false) { + return Ok(*variant); + } + } + Err(format!("invalid variant: {s}")) + } +} + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + arg!() + .help("What mode to run the program in") + .value_parser(value_parser!(Mode)), + ) + .get_matches(); + + // Note, it's safe to call unwrap() because the arg is required + match matches + .get_one::("MODE") + .expect("'MODE' is required and parsing will fail if its missing") + { + Mode::Fast => { + println!("Hare"); + } + Mode::Slow => { + println!("Tortoise"); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.md new file mode 100644 index 0000000000000000000000000000000000000000..fa2c8353902f53c3a60e8c3f5132d0f6909b934f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.md @@ -0,0 +1,27 @@ +```console +$ 04_01_possible --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 04_01_possible[EXE] + +Arguments: + What mode to run the program in [possible values: fast, slow] + +Options: + -h, --help Print help + -V, --version Print version + +$ 04_01_possible fast +Hare + +$ 04_01_possible slow +Tortoise + +$ 04_01_possible medium +? failed +error: invalid value 'medium' for '' + [possible values: fast, slow] + +For more information, try '--help'. + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.rs new file mode 100644 index 0000000000000000000000000000000000000000..3da7aca74b69c7d3949801e38973f56266fa77c0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_01_possible.rs @@ -0,0 +1,26 @@ +use clap::{arg, command}; + +fn main() { + let matches = command!() // requires `cargo` feature + .arg( + arg!() + .help("What mode to run the program in") + .value_parser(["fast", "slow"]), + ) + .get_matches(); + + // Note, it's safe to call unwrap() because the arg is required + match matches + .get_one::("MODE") + .expect("'MODE' is required and parsing will fail if its missing") + .as_str() + { + "fast" => { + println!("Hare"); + } + "slow" => { + println!("Tortoise"); + } + _ => unreachable!(), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_02_parse.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_02_parse.md new file mode 100644 index 0000000000000000000000000000000000000000..af03e95cbc5bc1caf59ac094f47444c053a829e1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/examples/tutorial_builder/04_02_parse.md @@ -0,0 +1,29 @@ +```console +$ 04_02_parse --help +A simple to use, efficient, and full-featured Command Line Argument Parser + +Usage: 04_02_parse[EXE] + +Arguments: + Network port to use + +Options: + -h, --help Print help + -V, --version Print version + +$ 04_02_parse 22 +PORT = 22 + +$ 04_02_parse foobar +? failed +error: invalid value 'foobar' for '': invalid digit found in string + +For more information, try '--help'. + +$ 04_02_parse_derive 0 +? failed +error: invalid value '0' for '': 0 is not in 1..=65535 + +For more information, try '--help'. + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_concepts.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_concepts.rs new file mode 100644 index 0000000000000000000000000000000000000000..b02518ed8f5bf030602bb1429d262b90088faf56 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_concepts.rs @@ -0,0 +1,108 @@ +//! ## CLI Concepts +//! +//! Note: this will be speaking towards the general case. +//! +//! ### Environmental context +//! +//! When you run a command line application, it is inside a terminal emulator, or terminal. +//! This handles integration with the rest of your system including user input, +//! rendering, etc. +//! +//! The terminal will run inside of itself an interactive shell. +//! The shell is responsible for showing the prompt, receiving input including the command you are writing, +//! letting that command take over until completion, and then repeating. +//! This is called a read-eval-print loop, or REPL. +//! Typically the shell will take the command you typed and split it into separate arguments, +//! including handling of quoting, escaping, and globbing. +//! The parsing and evaluation of the command is shell specific. +//! The shell will then determine which application to run and then pass the full command-line as +//! individual arguments to your program. +//! These arguments are exposed in Rust as [`std::env::args_os`]. +//! +//! Windows is an exception in Shell behavior in that the command is passed as an individual +//! string, verbatim, and the application must split the arguments. +//! [`std::env::args_os`] will handle the splitting for you but will not handle globs. +//! +//! Takeaways: +//! - Your application will only see quotes that have been escaped within the shell +//! - e.g. to receive `message="hello world"`, you may need to type `'message="hello world"'` or `message=\"hello world\"` +//! - If your applications needs to parse a string into arguments, +//! you will need to pick a syntax and do it yourself +//! - POSIX's shell syntax is a common choice and available in packages like [shlex](https://docs.rs/shlex) +//! - See also our [REPL cookbook entry][crate::_cookbook::repl] +//! - On Windows, you will need to handle globbing yourself if desired +//! - [`wild`](https://docs.rs/wild) can help with that +//! +//! ### Argument Parsing +//! +//! The first argument of [`std::env::args_os`] is the [`Command::bin_name`] +//! which is usually limited to affecting [`Command::render_usage`]. +//! [`Command::no_binary_name`] and [`Command::multicall`] exist for rare cases when this assumption is not valid. +//! +//! Command-lines are a context-sensitive grammar, +//! meaning the interpretation of an argument is based on the arguments that came before. +//! Arguments come in one of several flavors: +//! - Values +//! - Flags +//! - Subcommands +//! +//! When examining the next argument, +//! 1. If it starts with a `--`, +//! then that is a long Flag and all remaining text up to a `=` or the end is +//! matched to a [`Arg::long`], [`Command::long_flag`], or alias. +//! - Everything after the `=` is taken as a Value and parsing a new argument is examined. +//! - If no `=` is present, then Values will be taken according to [`Arg::num_args`] +//! - We generally call a Flag that takes a Value an Option +//! 2. If it starts with a `-`, +//! then that is a sequence of short Flags where each character is matched against a [`Arg::short`], [`Command::short_flag`] or +//! alias until `=`, the end, or a short Flag takes Values (see [`Arg::num_args`]) +//! 3. If its a `--`, that is an escape and all future arguments are considered to be a Value, even if +//! they start with `--` or `-` +//! 4. If it matches a [`Command::name`], +//! then the argument is a subcommand +//! 5. If there is an [`Arg`] at the next [`Arg::index`], +//! then the argument is considered a Positional argument +//! +//! When a subcommand matches, +//! all further arguments are parsed by that [`Command`]. +//! +//! There are many settings that tweak this behavior, including: +//! - [`Arg::last`]: a positional that can only come after `--` +//! - [`Arg::trailing_var_arg`]: all further arguments are captured as additional Values +//! - [`Arg::allow_hyphen_values`] and [`Arg::allow_negative_numbers`]: assumes arguments +//! starting with `-` are Values and not Flags. +//! - [`Command::subcommand_precedence_over_arg`]: when an [`Arg::num_args`] takes Values, +//! stop if one matches a subCommand +//! - [`Command::allow_missing_positional`]: in limited cases a [`Arg::index`] may be skipped +//! - [`Command::allow_external_subcommands`]: treat any unknown argument as a subcommand, capturing +//! all remaining arguments. +//! +//! Takeaways +//! - Values that start with a `-` either need to be escaped by the user with `--` +//! (if a positional), +//! or you need to set [`Arg::allow_hyphen_values`] or [`Arg::allow_negative_numbers`] +//! - [`Arg::num_args`], +//! [`ArgAction::Append`] (on a positional), +//! [`Arg::trailing_var_arg`], +//! and [`Command::allow_external_subcommands`] +//! all affect the parser in similar but slightly different ways and which to use depends on your +//! application +//! +//! ### Value Parsing +//! +//! When reacting to a Flag (no Value), +//! [`Arg::default_missing_values`] will be applied. +//! +//! The Value will be split by [`Arg::value_delimiter`]. +//! +//! The Value will then be stored according to its [`ArgAction`]. +//! For most [`ArgAction`]s, +//! the Value will be parsed according to [`ValueParser`] +//! and stored in the [`ArgMatches`]. + +#![allow(unused_imports)] +use clap_builder::builder::ValueParser; +use clap_builder::Arg; +use clap_builder::ArgAction; +use clap_builder::ArgMatches; +use clap_builder::Command; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example.rs new file mode 100644 index 0000000000000000000000000000000000000000..ec5d582db05b1b75e87fbd8dec33cc04c949cefc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example.rs @@ -0,0 +1,7 @@ +//! # Example: cargo subcommand (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/cargo-example.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/cargo-example.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example_derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..d49f956f9c0e877b4adf703c918456395036c055 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/cargo_example_derive.rs @@ -0,0 +1,7 @@ +//! # Example: cargo subcommand (Derive API) +//! +//! ```rust +#![doc = include_str!("../../examples/cargo-example-derive.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/cargo-example-derive.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional.rs new file mode 100644 index 0000000000000000000000000000000000000000..99a3f83f3b50e4d3986f85a3fb95854673276512 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional.rs @@ -0,0 +1,7 @@ +//! # Example (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/escaped-positional.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/escaped-positional.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional_derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6f99ad17191c1dd24e69f9b2dd56e85b369c076 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/escaped_positional_derive.rs @@ -0,0 +1,7 @@ +//! # Example (Derive API) +//! +//! ```rust +#![doc = include_str!("../../examples/escaped-positional-derive.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/escaped-positional-derive.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/find.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/find.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ca11c665d21677b88d0ac2e26ad6e91355c6135 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/find.rs @@ -0,0 +1,7 @@ +//! # Example: find-like CLI (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/find.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/find.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git.rs new file mode 100644 index 0000000000000000000000000000000000000000..03a926ca85b6cfed46d3b254a367405db27272db --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git.rs @@ -0,0 +1,7 @@ +//! # Example: git-like CLI (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/git.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/git.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git_derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..d3119736de24d9e4cfc1f98aff9063aecc08f472 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/git_derive.rs @@ -0,0 +1,7 @@ +//! # Example: git-like CLI (Derive API) +//! +//! ```rust +#![doc = include_str!("../../examples/git-derive.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/git-derive.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..364e43b11953313b437739aff310e8c107d60541 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/mod.rs @@ -0,0 +1,63 @@ +// Contributing +// +// New examples: +// - Building: They must be added to `Cargo.toml` with the appropriate `required-features`. +// - Testing: Ensure there is a markdown file with [trycmd](https://docs.rs/trycmd) syntax +// - Link the `.md` file from here + +//! # Documentation: Cookbook +//! +//! Typed arguments: [derive][typed_derive] +//! - Topics: +//! - Custom `parse()` +//! +//! Custom cargo command: [builder][cargo_example], [derive][cargo_example_derive] +//! - Topics: +//! - Subcommands +//! - Cargo plugins +//! - custom terminal [styles][crate::Command::styles] (colors) +//! +//! find-like interface: [builder][find] +//! - Topics: +//! - Position-sensitive flags +//! +//! git-like interface: [builder][git], [derive][git_derive] +//! - Topics: +//! - Subcommands +//! - External subcommands +//! - Optional subcommands +//! - Default subcommands +//! - [`last`][crate::Arg::last] +//! +//! pacman-like interface: [builder][pacman] +//! - Topics: +//! - Flag subcommands +//! - Conflicting arguments +//! +//! Escaped positionals with `--`: [builder][escaped_positional], [derive][escaped_positional_derive] +//! +//! Multi-call +//! - busybox: [builder][multicall_busybox] +//! - Topics: +//! - Subcommands +//! - hostname: [builder][multicall_hostname] +//! - Topics: +//! - Subcommands +//! +//! repl: [builder][repl], [derive][repl_derive] +//! - Topics: +//! - Read-Eval-Print Loops / Custom command lines + +pub mod cargo_example; +pub mod cargo_example_derive; +pub mod escaped_positional; +pub mod escaped_positional_derive; +pub mod find; +pub mod git; +pub mod git_derive; +pub mod multicall_busybox; +pub mod multicall_hostname; +pub mod pacman; +pub mod repl; +pub mod repl_derive; +pub mod typed_derive; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_busybox.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_busybox.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3384d682f5fd3e6ac02835f2f0d65e7fd7c5507 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_busybox.rs @@ -0,0 +1,7 @@ +//! # Example: busybox-like CLI (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/multicall-busybox.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/multicall-busybox.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_hostname.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_hostname.rs new file mode 100644 index 0000000000000000000000000000000000000000..9777654dc131359ef79937555d33b9ea74398519 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/multicall_hostname.rs @@ -0,0 +1,7 @@ +//! # Example: hostname-like CLI (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/multicall-hostname.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/multicall-hostname.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/pacman.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/pacman.rs new file mode 100644 index 0000000000000000000000000000000000000000..880c58158bc37918f478fd1977aeb74a37358d34 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/pacman.rs @@ -0,0 +1,7 @@ +//! # Example: pacman-like CLI (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/pacman.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/pacman.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl.rs new file mode 100644 index 0000000000000000000000000000000000000000..549ec825908b2731c712e1f98b079e43e3e42490 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl.rs @@ -0,0 +1,5 @@ +//! # Example: Command REPL (Builder API) +//! +//! ```rust +#![doc = include_str!("../../examples/repl.rs")] +//! ``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl_derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..3cadf2652e16f06528e3076e5bdb69df5d6f7a3f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/repl_derive.rs @@ -0,0 +1,4 @@ +//! # Example: REPL (Derive API) +//! +//! ```rust +#![doc = include_str!("../../examples/repl-derive.rs")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/typed_derive.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/typed_derive.rs new file mode 100644 index 0000000000000000000000000000000000000000..cad6ea0a86d2ee195af03eb40c695bbb2b635f5d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_cookbook/typed_derive.rs @@ -0,0 +1,35 @@ +//! # Example: Custom Types (Derive API) +//! +//! **This requires enabling the [`derive` feature flag][crate::_features].** +//! +//! ## Implicit [`Arg::value_parser`][crate::Arg::value_parser] +//! +//! ```rust +#![doc = include_str!("../../examples/typed-derive/implicit.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/typed-derive/implicit.md")] +//! +//! ## Built-in [`TypedValueParser`][crate::builder::TypedValueParser] +//! +//! ```rust +#![doc = include_str!("../../examples/typed-derive/builtin.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/typed-derive/builtin.md")] +//! +//! ## Custom parser function +//! +//! ```rust +#![doc = include_str!("../../examples/typed-derive/fn_parser.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/typed-derive/fn_parser.md")] +//! +//! ## Custom [`TypedValueParser`][crate::builder::TypedValueParser] +//! +//! ```rust +#![doc = include_str!("../../examples/typed-derive/custom.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/typed-derive/custom.md")] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/_tutorial.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/_tutorial.rs new file mode 100644 index 0000000000000000000000000000000000000000..51f3dcd2550a9a22fff8d9320c7d86ddfd81be19 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/_tutorial.rs @@ -0,0 +1,257 @@ +// Contributing +// +// New example code: +// - Please update the corresponding section in the derive tutorial +// - Building: They must be added to `Cargo.toml` with the appropriate `required-features`. +// - Testing: Ensure there is a markdown file with [trycmd](https://docs.rs/trycmd) syntax +// +// See also the general CONTRIBUTING + +//! ## Tutorial for the Derive API +//! +//! *See the side bar for the Table of Contents* +//! +//! ## Quick Start +//! +//! You can create an application declaratively with a `struct` and some +//! attributes. +//! +//! First, ensure `clap` is available with the [`derive` feature flag][crate::_features]: +//! ```console +//! $ cargo add clap --features derive +//! ``` +//! +//! Here is a preview of the type of application you can make: +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/01_quick.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/tutorial_derive/01_quick.md")] +//! +//! See also +//! - [FAQ: When should I use the builder vs derive APIs?][crate::_faq#when-should-i-use-the-builder-vs-derive-apis] +//! - The [cookbook][crate::_cookbook] for more application-focused examples +//! +//! ## Configuring the Parser +//! +//! You use derive [`Parser`][crate::Parser] to start building a parser. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/02_apps.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/tutorial_derive/02_apps.md")] +//! +//! You can use [`#[command(version, about)]` attribute defaults][super#command-attributes] on the struct to fill these fields in from your `Cargo.toml` file. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/02_crate.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/02_crate.md")] +//! +//! You can use `#[command]` attributes on the struct to change the application level behavior of clap. Any [`Command`][crate::Command] builder function can be used as an attribute, like [`Command::next_line_help`]. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/02_app_settings.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/02_app_settings.md")] +//! +//! ## Adding Arguments +//! +//! 1. [Positionals](#positionals) +//! 2. [Options](#options) +//! 3. [Flags](#flags) +//! 4. [Optional](#optional) +//! 5. [Defaults](#defaults) +//! 6. [Subcommands](#subcommands) +//! +//! Arguments are inferred from the fields of your struct. +//! +//! ### Positionals +//! +//! By default, struct fields define positional arguments: +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_03_positional.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_03_positional.md")] +//! +//! Note that the [default `ArgAction` is `Set`][super#arg-types]. To +//! accept multiple values, override the [action][Arg::action] with [`Append`][crate::ArgAction::Append] via `Vec`: +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_03_positional_mult.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_03_positional_mult.md")] +//! +//! ### Options +//! +//! You can name your arguments with a flag: +//! - Intent of the value is clearer +//! - Order doesn't matter +//! +//! To specify the flags for an argument, you can use [`#[arg(short = 'n')]`][Arg::short] and/or +//! [`#[arg(long = "name")]`][Arg::long] attributes on a field. When no value is given (e.g. +//! `#[arg(short)]`), the flag is inferred from the field's name. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_02_option.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_02_option.md")] +//! +//! Note that the [default `ArgAction` is `Set`][super#arg-types]. To +//! accept multiple occurrences, override the [action][Arg::action] with [`Append`][crate::ArgAction::Append] via `Vec`: +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_02_option_mult.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_02_option_mult.md")] +//! +//! ### Flags +//! +//! Flags can also be switches that can be on/off: +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_01_flag_bool.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_01_flag_bool.md")] +//! +//! Note that the [default `ArgAction` for a `bool` field is +//! `SetTrue`][super#arg-types]. To accept multiple flags, override the [action][Arg::action] with +//! [`Count`][crate::ArgAction::Count]: +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_01_flag_count.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_01_flag_count.md")] +//! +//! This also shows that any[`Arg`][crate::Args] method may be used as an attribute. +//! +//! ### Optional +//! +//! By default, arguments are assumed to be [`required`][crate::Arg::required]. +//! To make an argument optional, wrap the field's type in `Option`: +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_06_optional.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_06_optional.md")] +//! +//! ### Defaults +//! +//! We've previously showed that arguments can be [`required`][crate::Arg::required] or optional. +//! When optional, you work with a `Option` and can `unwrap_or`. Alternatively, you can +//! set [`#[arg(default_value_t)]`][super#arg-attributes]. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_05_default_values.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/03_05_default_values.md")] +//! +//! ### Subcommands +//! +//! Subcommands are derived with `#[derive(Subcommand)]` and be added via +//! [`#[command(subcommand)]` attribute][super#command-attributes] on the field using that type. +//! Each instance of a [Subcommand][crate::Subcommand] can have its own version, author(s), Args, +//! and even its own subcommands. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_04_subcommands.rs")] +//! ``` +//! We used a struct-variant to define the `add` subcommand. +//! Alternatively, you can use a struct for your subcommand's arguments: +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/03_04_subcommands_alt.rs")] +//! ``` +//! +#![doc = include_str!("../../examples/tutorial_derive/03_04_subcommands.md")] +//! +//! ## Validation +//! +//! 1. [Enumerated values](#enumerated-values) +//! 2. [Validated values](#validated-values) +//! 3. [Argument Relations](#argument-relations) +//! 4. [Custom Validation](#custom-validation) +//! +//! An appropriate default parser/validator will be selected for the field's type. See +//! [`value_parser!`][crate::value_parser!] for more details. +//! +//! ### Enumerated values +//! +//! For example, if you have arguments of specific values you want to test for, you can derive +//! [`ValueEnum`][super#valueenum-attributes] +//! (any [`PossibleValue`] builder function can be used as a `#[value]` attribute on enum variants). +//! +//! This allows you specify the valid values for that argument. If the user does not use one of +//! those specific values, they will receive a graceful exit with error message informing them +//! of the mistake, and what the possible valid values are +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/04_01_enum.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/04_01_enum.md")] +//! +//! ### Validated values +//! +//! More generally, you can validate and parse into any data type with [`Arg::value_parser`]. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/04_02_parse.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/04_02_parse.md")] +//! +//! A [custom parser][TypedValueParser] can be used to improve the error messages or provide additional validation: +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/04_02_validate.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/04_02_validate.md")] +//! +//! See [`Arg::value_parser`][crate::Arg::value_parser] for more details. +//! +//! ### Argument Relations +//! +//! You can declare dependencies or conflicts between [`Arg`][crate::Arg]s or even +//! [`ArgGroup`][crate::ArgGroup]s. +//! +//! [`ArgGroup`][crate::ArgGroup]s make it easier to declare relations instead of having to list +//! each individually, or when you want a rule to apply "any but not all" arguments. +//! +//! Perhaps the most common use of [`ArgGroup`][crate::ArgGroup]s is to require one and *only* one +//! argument to be present out of a given set. Imagine that you had multiple arguments, and you +//! want one of them to be required, but making all of them required isn't feasible because perhaps +//! they conflict with each other. +//! +//! [`ArgGroup`][crate::ArgGroup]s are automatically created for a `struct` with its +//! [`ArgGroup::id`][crate::ArgGroup::id] being the struct's name. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/04_03_relations.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/04_03_relations.md")] +//! +//! ### Custom Validation +//! +//! As a last resort, you can create custom errors with the basics of clap's formatting. +//! +//! ```rust +#![doc = include_str!("../../examples/tutorial_derive/04_04_custom.rs")] +//! ``` +#![doc = include_str!("../../examples/tutorial_derive/04_04_custom.md")] +//! +//! ## Testing +//! +//! clap reports most development errors as `debug_assert!`s. Rather than checking every +//! subcommand, you should have a test that calls +//! [`Command::debug_assert`][crate::Command::debug_assert]: +//! ```rust,no_run +#![doc = include_str!("../../examples/tutorial_derive/05_01_assert.rs")] +//! ``` +//! +//! ## Next Steps +//! +//! - [Cookbook][crate::_cookbook] for application-focused examples +//! - Explore more features in the [Derive reference][super] +//! - See also [`Command`], [`Arg`], [`ArgGroup`], and [`PossibleValue`] builder functions which +//! can be used as attributes +//! +//! For support, see [Discussions](https://github.com/clap-rs/clap/discussions) +#![allow(unused_imports)] +use crate::builder::*; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..04d6c65a9eaf1ce6300036ed1386e5a20dbdc0d4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_derive/mod.rs @@ -0,0 +1,542 @@ +//! # Documentation: Derive Reference +//! +//! 1. [Overview](#overview) +//! 2. [Attributes](#attributes) +//! 1. [Terminology](#terminology) +//! 2. [Command Attributes](#command-attributes) +//! 2. [ArgGroup Attributes](#arggroup-attributes) +//! 3. [Arg Attributes](#arg-attributes) +//! 4. [ValueEnum Attributes](#valueenum-attributes) +//! 5. [Possible Value Attributes](#possible-value-attributes) +//! 3. [Field Types](#field-types) +//! 4. [Doc Comments](#doc-comments) +//! 5. [Mixing Builder and Derive APIs](#mixing-builder-and-derive-apis) +//! 6. [Tips](#tips) +//! +//! ## Overview +//! +//! To derive `clap` types, you need to enable the [`derive` feature flag][crate::_features]. +//! +//! Example: +//! ```rust +#![doc = include_str!("../../examples/demo.rs")] +//! ``` +//! +//! Let's start by breaking down the anatomy of the derive attributes: +//! ```rust +//! use clap::{Parser, Args, Subcommand, ValueEnum}; +//! +//! /// Doc comment +//! #[derive(Parser)] +//! #[command(CMD ATTRIBUTE)] +//! #[group(GROUP ATTRIBUTE)] +//! struct Cli { +//! /// Doc comment +//! #[arg(ARG ATTRIBUTE)] +//! field: UserType, +//! +//! #[arg(value_enum, ARG ATTRIBUTE...)] +//! field: EnumValues, +//! +//! #[command(flatten)] +//! delegate: Struct, +//! +//! #[command(subcommand)] +//! command: Command, +//! } +//! +//! /// Doc comment +//! #[derive(Args)] +//! #[command(PARENT CMD ATTRIBUTE)] +//! #[group(GROUP ATTRIBUTE)] +//! struct Struct { +//! /// Doc comment +//! #[command(ARG ATTRIBUTE)] +//! field: UserType, +//! } +//! +//! /// Doc comment +//! #[derive(Subcommand)] +//! #[command(PARENT CMD ATTRIBUTE)] +//! enum Command { +//! /// Doc comment +//! #[command(CMD ATTRIBUTE)] +//! Variant1(Struct), +//! +//! /// Doc comment +//! #[command(CMD ATTRIBUTE)] +//! Variant2 { +//! /// Doc comment +//! #[arg(ARG ATTRIBUTE)] +//! field: UserType, +//! } +//! } +//! +//! /// Doc comment +//! #[derive(ValueEnum)] +//! #[value(VALUE ENUM ATTRIBUTE)] +//! enum EnumValues { +//! /// Doc comment +//! #[value(POSSIBLE VALUE ATTRIBUTE)] +//! Variant1, +//! } +//! +//! fn main() { +//! let cli = Cli::parse(); +//! } +//! ``` +//! +//! Traits: +//! - [`Parser`][crate::Parser] parses arguments into a `struct` (arguments) or `enum` (subcommands). +//! - [`Args`][crate::Args] allows defining a set of re-usable arguments that get merged into their parent container. +//! - [`Subcommand`][crate::Subcommand] defines available subcommands. +//! - Subcommand arguments can be defined in a struct-variant or automatically flattened with a tuple-variant. +//! - [`ValueEnum`][crate::ValueEnum] allows parsing a value directly into an `enum`, erroring on unsupported values. +//! - The derive doesn't work on enums that contain non-unit variants, unless they are skipped +//! +//! *See also the [derive tutorial][crate::_derive::_tutorial] and [cookbook][crate::_cookbook]* +//! +//! ## Attributes +//! +//! ### Terminology +//! +//! **Raw attributes** are forwarded directly to the underlying [`clap` builder][crate::builder]. Any +//! [`Command`][crate::Command], [`Arg`][crate::Arg], or [`PossibleValue`][crate::builder::PossibleValue] method can be used as an attribute. +//! +//! Raw attributes come in two different syntaxes: +//! ```rust,ignore +//! #[arg( +//! global = true, // name = arg form, neat for one-arg methods +//! required_if_eq("out", "file") // name(arg1, arg2, ...) form. +//! )] +//! ``` +//! +//! - `method = arg` can only be used for methods which take only one argument. +//! - `method(arg1, arg2)` can be used with any method. +//! +//! As long as `method_name` is not one of the magical methods it will be +//! translated into a mere method call. +//! +//! **Magic attributes** have post-processing done to them, whether that is +//! - Providing of defaults +//! - Special behavior is triggered off of it +//! +//! Magic attributes are more constrained in the syntax they support, usually just +//! ` = ` though some use `()` instead. See the specific +//! magic attributes documentation for details. This allows users to access the +//! raw behavior of an attribute via `()` syntax. +//! +//!
+//! +//! **NOTE:** Some attributes are inferred from [Arg Types](#arg-types) and [Doc +//! Comments](#doc-comments). Explicit attributes take precedence over inferred +//! attributes. +//! +//!
+//! +//! ### Command Attributes +//! +//! These correspond to a [`Command`][crate::Command] which is used for both top-level parsers and +//! when defining subcommands. +//! +//! **Raw attributes:** Any [`Command` method][crate::Command] can also be used as an attribute, +//! see [Terminology](#terminology) for syntax. +//! - e.g. `#[command(arg_required_else_help(true))]` would translate to `cmd.arg_required_else_help(true)` +//! +//! **Magic attributes:** +//! - `name = `: [`Command::name`][crate::Command::name] +//! - When not present: [package `name`](https://doc.rust-lang.org/cargo/reference/manifest.html#the-name-field) (if on [`Parser`][crate::Parser] container), variant name (if on [`Subcommand`][crate::Subcommand] variant) +//! - `version [= ]`: [`Command::version`][crate::Command::version] +//! - When not present: no version set +//! - Without ``: defaults to [crate `version`](https://doc.rust-lang.org/cargo/reference/manifest.html#the-version-field) +//! - `author [= ]`: [`Command::author`][crate::Command::author] +//! - When not present: no author set +//! - Without ``: defaults to [crate `authors`](https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field) +//! - **NOTE:** A custom [`help_template`][crate::Command::help_template] is needed for author to show up. +//! - `about [= ]`: [`Command::about`][crate::Command::about] +//! - When not present: [Doc comment summary](#doc-comments) +//! - Without ``: [crate `description`](https://doc.rust-lang.org/cargo/reference/manifest.html#the-description-field) ([`Parser`][crate::Parser] container) +//! - **TIP:** When a doc comment is also present, you most likely want to add +//! `#[arg(long_about = None)]` to clear the doc comment so only [`about`][crate::Command::about] +//! gets shown with both `-h` and `--help`. +//! - `long_about[ = ]`: [`Command::long_about`][crate::Command::long_about] +//! - When not present: [Doc comment](#doc-comments) if there is a blank line, else nothing +//! - When present without a value: [Doc comment](#doc-comments) +//! - `verbatim_doc_comment`: Minimizes pre-processing when converting doc comments to [`about`][crate::Command::about] / [`long_about`][crate::Command::long_about] +//! - `next_display_order`: [`Command::next_display_order`][crate::Command::next_display_order] +//! - `next_help_heading`: [`Command::next_help_heading`][crate::Command::next_help_heading] +//! - When `flatten`ing [`Args`][crate::Args], this is scoped to just the args in this struct and any struct `flatten`ed into it +//! - `rename_all = `: Override default field / variant name case conversion for [`Command::name`][crate::Command::name] / [`Arg::id`][crate::Arg::id] +//! - When not present: `"kebab-case"` +//! - Available values: `"camelCase"`, `"kebab-case"`, `"PascalCase"`, `"SCREAMING_SNAKE_CASE"`, `"snake_case"`, `"lower"`, `"UPPER"`, `"verbatim"` +//! - `rename_all_env = `: Override default field name case conversion for env variables for [`Arg::env`][crate::Arg::env] +//! - When not present: `"SCREAMING_SNAKE_CASE"` +//! - Available values: `"camelCase"`, `"kebab-case"`, `"PascalCase"`, `"SCREAMING_SNAKE_CASE"`, `"snake_case"`, `"lower"`, `"UPPER"`, `"verbatim"` +//! +//! And for [`Subcommand`][crate::Subcommand] variants: +//! - `skip`: Ignore this variant +//! - `flatten`: Delegates to the variant for more subcommands (must implement +//! [`Subcommand`][crate::Subcommand]) +//! - `subcommand`: Nest subcommands under the current set of subcommands (must implement +//! [`Subcommand`][crate::Subcommand]) +//! - `external_subcommand`: [`Command::allow_external_subcommand(true)`][crate::Command::allow_external_subcommands] +//! - Variant must be either `Variant(Vec)` or `Variant(Vec)` +//! +//! And for [`Args`][crate::Args] fields: +//! - `flatten`: Delegates to the field for more arguments (must implement [`Args`][crate::Args]) +//! - Only [`next_help_heading`][crate::Command::next_help_heading] can be used with `flatten`. See +//! [clap-rs/clap#3269](https://github.com/clap-rs/clap/issues/3269) for why +//! arg attributes are not generally supported. +//! - **Tip:** Though we do apply a flattened [`Args`][crate::Args]'s Parent Command Attributes, this +//! makes reuse harder. Generally prefer putting the cmd attributes on the +//! [`Parser`][crate::Parser] or on the flattened field. +//! - `subcommand`: Delegates definition of subcommands to the field (must implement +//! [`Subcommand`][crate::Subcommand]) +//! - When `Option`, the subcommand becomes optional +//! +//! See [Configuring the Parser][_tutorial#configuring-the-parser] and +//! [Subcommands][_tutorial#subcommands] from the tutorial. +//! +//! ### ArgGroup Attributes +//! +//! These correspond to the [`ArgGroup`][crate::ArgGroup] which is implicitly created for each +//! `Args` derive. +//! +//! **Raw attributes:** Any [`ArgGroup` method][crate::ArgGroup] can also be used as an attribute, see [Terminology](#terminology) for syntax. +//! - e.g. `#[group(required = true)]` would translate to `arg_group.required(true)` +//! +//! **Magic attributes**: +//! - `id = `: [`ArgGroup::id`][crate::ArgGroup::id] +//! - When not present: struct's name is used +//! - `skip [= ]`: Ignore this field, filling in with `` +//! - Without ``: fills the field with `Default::default()` +//! +//! Note: +//! - For `struct`s, [`multiple = true`][crate::ArgGroup::multiple] is implied +//! - `enum` support is tracked at [#2621](https://github.com/clap-rs/clap/issues/2621) +//! +//! See [Argument Relations][_tutorial#argument-relations] from the tutorial. +//! +//! ### Arg Attributes +//! +//! These correspond to a [`Arg`][crate::Arg]. +//! The default state for a field without attributes is to be a positional argument with [behavior +//! inferred from the field type](#arg-types). +//! `#[arg(...)]` attributes allow overriding or extending those defaults. +//! +//! **Raw attributes:** Any [`Arg` method][crate::Arg] can also be used as an attribute, see [Terminology](#terminology) for syntax. +//! - e.g. `#[arg(num_args(..=3))]` would translate to `arg.num_args(..=3)` +//! +//! **Magic attributes**: +//! - `id = `: [`Arg::id`][crate::Arg::id] +//! - When not present: field's name is used +//! - `value_parser [= ]`: [`Arg::value_parser`][crate::Arg::value_parser] +//! - When not present: will auto-select an implementation based on the field type using +//! [`value_parser!`][crate::value_parser!] +//! - `action [= ]`: [`Arg::action`][crate::Arg::action] +//! - When not present: will auto-select an action based on the field type +//! - `help = `: [`Arg::help`][crate::Arg::help] +//! - When not present: [Doc comment summary](#doc-comments) +//! - `long_help[ = ]`: [`Arg::long_help`][crate::Arg::long_help] +//! - When not present: [Doc comment](#doc-comments) if there is a blank line, else nothing +//! - When present without a value: [Doc comment](#doc-comments) +//! - `verbatim_doc_comment`: Minimizes pre-processing when converting doc comments to [`help`][crate::Arg::help] / [`long_help`][crate::Arg::long_help] +//! - `short [= ]`: [`Arg::short`][crate::Arg::short] +//! - When not present: no short set +//! - Without ``: defaults to first character in the case-converted field name +//! - `long [= ]`: [`Arg::long`][crate::Arg::long] +//! - When not present: no long set +//! - Without ``: defaults to the case-converted field name +//! - `env [= ]`: [`Arg::env`][crate::Arg::env] (needs [`env` feature][crate::_features] enabled) +//! - When not present: no env set +//! - Without ``: defaults to the case-converted field name +//! - `from_global`: Read a [`Arg::global`][crate::Arg::global] argument (raw attribute), regardless of what subcommand you are in +//! - `value_enum`: Parse the value using the [`ValueEnum`][crate::ValueEnum] +//! - `skip [= ]`: Ignore this field, filling in with `` +//! - Without ``: fills the field with `Default::default()` +//! - `default_value = `: [`Arg::default_value`][crate::Arg::default_value] and [`Arg::required(false)`][crate::Arg::required] +//! - `default_value_t [= ]`: [`Arg::default_value`][crate::Arg::default_value] and [`Arg::required(false)`][crate::Arg::required] +//! - Requires `std::fmt::Display` that roundtrips correctly with the +//! [`Arg::value_parser`][crate::Arg::value_parser] or `#[arg(value_enum)]` +//! - Without ``, relies on `Default::default()` +//! - `default_values_t = `: [`Arg::default_values`][crate::Arg::default_values] and [`Arg::required(false)`][crate::Arg::required] +//! - Requires field arg to be of type `Vec` and `T` to implement `std::fmt::Display` or `#[arg(value_enum)]` +//! - `` must implement `IntoIterator` +//! - `default_value_os_t [= ]`: [`Arg::default_value_os`][crate::Arg::default_value_os] and [`Arg::required(false)`][crate::Arg::required] +//! - Requires `std::convert::Into` or `#[arg(value_enum)]` +//! - Without ``, relies on `Default::default()` +//! - `default_values_os_t = `: [`Arg::default_values_os`][crate::Arg::default_values_os] and [`Arg::required(false)`][crate::Arg::required] +//! - Requires field arg to be of type `Vec` and `T` to implement `std::convert::Into` or `#[arg(value_enum)]` +//! - `` must implement `IntoIterator` +//! +//! See [Adding Arguments][_tutorial#adding-arguments] and [Validation][_tutorial#validation] from the +//! tutorial. +//! +//! ### ValueEnum Attributes +//! +//! - `rename_all = `: Override default field / variant name case conversion for [`PossibleValue::new`][crate::builder::PossibleValue] +//! - When not present: `"kebab-case"` +//! - Available values: `"camelCase"`, `"kebab-case"`, `"PascalCase"`, `"SCREAMING_SNAKE_CASE"`, `"snake_case"`, `"lower"`, `"UPPER"`, `"verbatim"` +//! +//! See [Enumerated values][_tutorial#enumerated-values] from the tutorial. +//! +//! ### Possible Value Attributes +//! +//! These correspond to a [`PossibleValue`][crate::builder::PossibleValue]. +//! +//! **Raw attributes:** Any [`PossibleValue` method][crate::builder::PossibleValue] can also be used as an attribute, see [Terminology](#terminology) for syntax. +//! - e.g. `#[value(alias("foo"))]` would translate to `pv.alias("foo")` +//! +//! **Magic attributes**: +//! - `name = `: [`PossibleValue::new`][crate::builder::PossibleValue::new] +//! - When not present: case-converted field name is used +//! - `help = `: [`PossibleValue::help`][crate::builder::PossibleValue::help] +//! - When not present: [Doc comment summary](#doc-comments) +//! - `skip`: Ignore this variant +//! +//! ## Field Types +//! +//! `clap` assumes some intent based on the type used. +//! +//! ### Subcommand Types +//! +//! | Type | Effect | Implies | +//! |-----------------------|---------------------|-----------------------------------------------------------| +//! | `Option` | optional subcommand | | +//! | `T` | required subcommand | `.subcommand_required(true).arg_required_else_help(true)` | +//! +//! ### Arg Types +//! +//! | Type | Effect | Implies | Notes | +//! |-----------------------|------------------------------------------------------|-------------------------------------------------------------|-------| +//! | `()` | user-defined | `.action(ArgAction::Set).required(false)` | | +//! | `bool` | flag | `.action(ArgAction::SetTrue)` | | +//! | `Option` | optional argument | `.action(ArgAction::Set).required(false)` | | +//! | `Option>` | optional value for optional argument | `.action(ArgAction::Set).required(false).num_args(0..=1)` | | +//! | `T` | required argument | `.action(ArgAction::Set).required(!has_default)` | | +//! | `Vec` | `0..` occurrences of argument | `.action(ArgAction::Append).required(false)` | | +//! | `Option>` | `0..` occurrences of argument | `.action(ArgAction::Append).required(false)` | | +//! | `Vec>` | `0..` occurrences of argument, grouped by occurrence | `.action(ArgAction::Append).required(false)` | requires `unstable-v5` | +//! | `Option>>` | `0..` occurrences of argument, grouped by occurrence | `.action(ArgAction::Append).required(false)` | requires `unstable-v5` | +//! +//! In addition, [`.value_parser(value_parser!(T))`][crate::value_parser!] is called for each +//! field in the absence of a [`#[arg(value_parser)]` attribute](#arg-attributes). +//! +//! Notes: +//! - For custom type behavior, you can override the implied attributes/settings and/or set additional ones +//! - To force any inferred type (like `Vec`) to be treated as `T`, you can refer to the type +//! by another means, like using `std::vec::Vec` instead of `Vec`. For improving this, see +//! [#4626](https://github.com/clap-rs/clap/issues/4626). +//! - `Option>` and `Option>` will be `None` instead of `vec![]` if no arguments are provided. +//! - This gives the user some flexibility in designing their argument, like with `num_args(0..)` +//! - `Vec>` will need [`Arg::num_args`][crate::Arg::num_args] set to be meaningful +//! +//! ## Doc Comments +//! +//! In clap, help messages for the whole binary can be specified +//! via [`Command::about`][crate::Command::about] and [`Command::long_about`][crate::Command::long_about] while help messages +//! for individual arguments can be specified via [`Arg::help`][crate::Arg::help] and [`Arg::long_help`][crate::Arg::long_help]. +//! +//! `long_*` variants are used when user calls the program with +//! `--help` and "short" variants are used with `-h` flag. +//! +//! ```rust +//! # use clap::Parser; +//! +//! #[derive(Parser)] +//! #[command(about = "I am a program and I work, just pass `-h`", long_about = None)] +//! struct Foo { +//! #[arg(short, help = "Pass `-h` and you'll see me!")] +//! bar: String, +//! } +//! ``` +//! +//! For convenience, doc comments can be used instead of raw methods +//! (this example works exactly like the one above): +//! +//! ```rust +//! # use clap::Parser; +//! +//! #[derive(Parser)] +//! /// I am a program and I work, just pass `-h` +//! struct Foo { +//! /// Pass `-h` and you'll see me! +//! bar: String, +//! } +//! ``` +//! +//!
+//! +//! **NOTE:** Attributes have priority over doc comments! +//! +//! **Top level doc comments always generate `Command::about/long_about` calls!** +//! If you really want to use the `Command::about/long_about` methods (you likely don't), +//! use the `about` / `long_about` attributes to override the calls generated from +//! the doc comment. To clear `long_about`, you can use +//! `#[command(long_about = None)]`. +//! +//!
+//! +//! ### Pre-processing +//! +//! ```rust +//! # use clap::Parser; +//! #[derive(Parser)] +//! /// Hi there, I'm Robo! +//! /// +//! /// I like beeping, stumbling, eating your electricity, +//! /// and making records of you singing in a shower. +//! /// Pay up, or I'll upload it to youtube! +//! struct Robo { +//! /// Call my brother SkyNet. +//! /// +//! /// I am artificial superintelligence. I won't rest +//! /// until I'll have destroyed humanity. Enjoy your +//! /// pathetic existence, you mere mortals. +//! #[arg(long, action)] +//! kill_all_humans: bool, +//! } +//! ``` +//! +//! A doc comment consists of three parts: +//! - Short summary +//! - A blank line (whitespace only) +//! - Detailed description, all the rest +//! +//! The summary corresponds with `Command::about` / `Arg::help`. When a blank line is +//! present, the whole doc comment will be passed to `Command::long_about` / +//! `Arg::long_help`. Or in other words, a doc may result in just a `Command::about` / +//! `Arg::help` or `Command::about` / `Arg::help` and `Command::long_about` / +//! `Arg::long_help` +//! +//! In addition, when `verbatim_doc_comment` is not present, `clap` applies some preprocessing, including: +//! +//! - Strip leading and trailing whitespace from every line, if present. +//! +//! - Strip leading and trailing blank lines, if present. +//! +//! - Interpret each group of non-empty lines as a word-wrapped paragraph. +//! +//! We replace newlines within paragraphs with spaces to allow the output +//! to be re-wrapped to the terminal width. +//! +//! - Strip any excess blank lines so that there is exactly one per paragraph break. +//! +//! - If the first paragraph ends in exactly one period, +//! remove the trailing period (i.e. strip trailing periods but not trailing ellipses). +//! +//! Sometimes you don't want this preprocessing to apply, for example the comment contains +//! some ASCII art or markdown tables, you would need to preserve LFs along with +//! blank lines and the leading/trailing whitespace. When you pass use the +//! `verbatim_doc_comment` magic attribute, you preserve +//! them. +//! +//! **Note:** Keep in mind that `verbatim_doc_comment` will *still* +//! - Remove one leading space from each line, even if this attribute is present, +//! to allow for a space between `///` and the content. +//! - Remove leading and trailing blank lines +//! +//! ## Mixing Builder and Derive APIs +//! +//! The builder and derive APIs do not live in isolation. They can work together, which is +//! especially helpful if some arguments can be specified at compile-time while others must be +//! specified at runtime. +//! +//! ### Using derived arguments in a builder application +//! +//! When using the derive API, you can `#[command(flatten)]` a struct deriving `Args` into a struct +//! deriving `Args` or `Parser`. This example shows how you can augment a `Command` instance +//! created using the builder API with `Args` created using the derive API. +//! +//! It uses the [`Args::augment_args`][crate::Args::augment_args] method to add the arguments to +//! the `Command` instance. +//! +//! Crates such as [clap-verbosity-flag](https://github.com/rust-cli/clap-verbosity-flag) provide +//! structs that implement `Args`. Without the technique shown in this example, it would not be +//! possible to use such crates with the builder API. +//! +//! For example: +//! ```rust +#![doc = include_str!("../../examples/derive_ref/augment_args.rs")] +//! ``` +//! +//! ### Using derived subcommands in a builder application +//! +//! When using the derive API, you can use `#[command(subcommand)]` inside the struct to add +//! subcommands. The type of the field is usually an enum that derived `Parser`. However, you can +//! also add the subcommands in that enum to a `Command` instance created with the builder API. +//! +//! It uses the [`Subcommand::augment_subcommands`][crate::Subcommand::augment_subcommands] method +//! to add the subcommands to the `Command` instance. +//! +//! For example: +//! ```rust +#![doc = include_str!("../../examples/derive_ref/augment_subcommands.rs")] +//! ``` +//! +//! ### Adding hand-implemented subcommands to a derived application +//! +//! When using the derive API, you can use `#[command(subcommand)]` inside the struct to add +//! subcommands. The type of the field is usually an enum that derived `Parser`. However, you can +//! also implement the `Subcommand` trait manually on this enum (or any other type) and it can +//! still be used inside the struct created with the derive API. The implementation of the +//! `Subcommand` trait will use the builder API to add the subcommands to the `Command` instance +//! created behind the scenes for you by the derive API. +//! +//! Notice how in the previous example we used +//! [`augment_subcommands`][crate::Subcommand::augment_subcommands] on an enum that derived +//! `Parser`, whereas now we implement +//! [`augment_subcommands`][crate::Subcommand::augment_subcommands] ourselves, but the derive API +//! calls it automatically since we used the `#[command(subcommand)]` attribute. +//! +//! For example: +//! ```rust +#![doc = include_str!("../../examples/derive_ref/hand_subcommand.rs")] +//! ``` +//! +//! ### Flattening hand-implemented args into a derived application +//! +//! When using the derive API, you can use `#[command(flatten)]` inside the struct to add arguments as +//! if they were added directly to the containing struct. The type of the field is usually an +//! struct that derived `Args`. However, you can also implement the `Args` trait manually on this +//! struct (or any other type) and it can still be used inside the struct created with the derive +//! API. The implementation of the `Args` trait will use the builder API to add the arguments to +//! the `Command` instance created behind the scenes for you by the derive API. +//! +//! Notice how in the previous example we used [`augment_args`][crate::Args::augment_args] on the +//! struct that derived `Parser`, whereas now we implement +//! [`augment_args`][crate::Args::augment_args] ourselves, but the derive API calls it +//! automatically since we used the `#[command(flatten)]` attribute. +//! +//! For example: +//! ```rust +#![doc = include_str!("../../examples/derive_ref/flatten_hand_args.rs")] +//! ``` +//! +//! ## Tips +//! +//! - To get access to a [`Command`][crate::Command] call +//! [`CommandFactory::command`][crate::CommandFactory::command] (implemented when deriving +//! [`Parser`][crate::Parser]) +//! - Proactively check for bad [`Command`][crate::Command] configurations by calling +//! [`Command::debug_assert`][crate::Command::debug_assert] in a test +//! ([example][_tutorial#testing]) +//! - Always remember to [document](#doc-comments) args and commands with `#![deny(missing_docs)]` + +// Point people here that search for attributes that don't exist in the derive (a subset of magic +// attributes) +#![doc(alias = "skip")] +#![doc(alias = "verbatim_doc_comment")] +#![doc(alias = "flatten")] +#![doc(alias = "external_subcommand")] +#![doc(alias = "subcommand")] +#![doc(alias = "rename_all")] +#![doc(alias = "rename_all_env")] +#![doc(alias = "default_value_t")] +#![doc(alias = "default_values_t")] +#![doc(alias = "default_value_os_t")] +#![doc(alias = "default_values_os_t")] + +pub mod _tutorial; +#[doc(inline)] +pub use crate::_cookbook; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_faq.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_faq.rs new file mode 100644 index 0000000000000000000000000000000000000000..02657848482c58f1de3e2d5eb142829fbc4a509c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_faq.rs @@ -0,0 +1,95 @@ +//! # Documentation: FAQ +//! +//! 1. [Comparisons](#comparisons) +//! 1. [How does `clap` compare to structopt?](#how-does-clap-compare-to-structopt) +//! 2. [What are some reasons to use `clap`? (The Pitch)](#what-are-some-reasons-to-use-clap-the-pitch) +//! 3. [What are some reasons *not* to use `clap`? (The Anti Pitch)](#what-are-some-reasons-not-to-use-clap-the-anti-pitch) +//! 4. [Reasons to use `clap`](#reasons-to-use-clap) +//! 2. [How many approaches are there to create a parser?](#how-many-approaches-are-there-to-create-a-parser) +//! 3. [When should I use the builder vs derive APIs?](#when-should-i-use-the-builder-vs-derive-apis) +//! 4. [Why is there a default subcommand of help?](#why-is-there-a-default-subcommand-of-help) +//! +//! ### Comparisons +//! +//! First, let me say that these comparisons are highly subjective, and not meant +//! in a critical or harsh manner. All the argument parsing libraries out there (to +//! include `clap`) have their own strengths and weaknesses. Sometimes it just +//! comes down to personal taste when all other factors are equal. When in doubt, +//! try them all and pick one that you enjoy :). There's plenty of room in the Rust +//! community for multiple implementations! +//! +//! For less detailed but more broad comparisons, see +//! [argparse-benchmarks](https://github.com/rust-cli/argparse-benchmarks-rs). +//! +//! #### How does `clap` compare to [structopt](https://github.com/TeXitoi/structopt)? +//! +//! Simple! `clap` *is* `structopt`. `structopt` started as a derive API built on +//! top of clap v2. With clap v3, we've forked structopt and integrated it +//! directly into clap. structopt is in +//! [maintenance mode](https://github.com/TeXitoi/structopt/issues/516#issuecomment-989566094) +//! with the release of `clap_derive`. +//! +//! The benefits of integrating `structopt` and `clap` are: +//! - Easier cross-linking in documentation +//! - Documentation parity +//! - Tighter design feedback loop, ensuring all new features are designed with +//! derives in mind and easier to change `clap` in response to `structopt` bugs. +//! - Clearer endorsement of `structopt` +//! +//! See also +//! - [`clap` v3 CHANGELOG](https://github.com/clap-rs/clap/blob/v3-master/CHANGELOG.md#300---2021-12-31) +//! - [`structopt` migration guide](https://github.com/clap-rs/clap/blob/v3-master/CHANGELOG.md#migrate-structopt) +//! +//! #### What are some reasons to use `clap`? (The Pitch) +//! +//! `clap` is as fast, and as lightweight as possible while still giving all the features you'd expect from a modern argument parser. In fact, for the amount and type of features `clap` offers it remains about as fast as `getopts`. If you use `clap`, when you just need some simple arguments parsed, you'll find it's a walk in the park. `clap` also makes it possible to represent extremely complex and advanced requirements without too much thought. `clap` aims to be intuitive, easy to use, and fully capable for wide variety use cases and needs. +//! +//! #### What are some reasons *not* to use `clap`? (The Anti Pitch) +//! +//! Depending on the style in which you choose to define the valid arguments, `clap` can be very verbose. `clap` also offers so many finetuning knobs and dials, that learning everything can seem overwhelming. I strive to keep the simple cases simple, but when turning all those custom dials it can get complex. `clap` is also opinionated about parsing. Even though so much can be tweaked and tuned with `clap` (and I'm adding more all the time), there are still certain features which `clap` implements in specific ways that may be contrary to some users' use-cases. +//! +//! #### Reasons to use `clap` +//! +//! * You want all the nice CLI features your users may expect, yet you don't want to implement them all yourself. You'd like to focus on your application, not argument parsing. +//! * In addition to the point above, you don't want to sacrifice performance to get all those nice features. +//! * You have complex requirements/conflicts between your various valid args. +//! * You want to use subcommands (although other libraries also support subcommands, they are not nearly as feature rich as those provided by `clap`). +//! * You want some sort of custom validation built into the argument parsing process, instead of as part of your application (which allows for earlier failures, better error messages, more cohesive experience, etc.). +//! +//! ### How many approaches are there to create a parser? +//! +//! The following APIs are supported: +//! - [Derive][crate::_derive::_tutorial] +//! - [Builder][crate::_tutorial] +//! +//! Previously, we supported: +//! - [YAML](https://github.com/clap-rs/clap/issues/3087) +//! - [docopt](http://docopt.org/)-inspired [usage parser](https://github.com/clap-rs/clap/issues/3086) +//! - [`clap_app!`](https://github.com/clap-rs/clap/issues/2835) +//! +//! There are also experiments with other APIs: +//! - [fncmd](https://github.com/yuhr/fncmd): function attribute +//! - [clap-serde](https://github.com/aobatact/clap-serde): create a `Command` from a deserializer +//! +//! ### When should I use the builder vs derive APIs? +//! +//! Our default answer is to use the [Derive API][crate::_derive::_tutorial]: +//! - Easier to read, write, and modify +//! - Easier to keep the argument declaration and reading of argument in sync +//! - Easier to reuse, e.g. [clap-verbosity-flag](https://crates.io/crates/clap-verbosity-flag) +//! +//! The [Builder API][crate::_tutorial] is a lower-level API that someone might want to use for +//! - Faster compile times if you aren't already using other procedural macros +//! - More flexibility, e.g. you can look up the [argument's values][crate::ArgMatches::get_many], +//! their [ordering with other arguments][crate::ArgMatches::indices_of], and [what set +//! them][crate::ArgMatches::value_source]. The Derive API can only report values and not +//! indices of or other data. +//! +//! You can [interop between Derive and Builder APIs][crate::_derive#mixing-builder-and-derive-apis]. +//! +//! ### Why is there a default subcommand of help? +//! +//! There is only a default subcommand of `help` when other subcommands have been defined manually. So it's opt-in(ish), being that you only get a `help` subcommand if you're actually using subcommands. +//! +//! Also, if the user defined a `help` subcommand themselves, the auto-generated one wouldn't be added (meaning it's only generated if the user hasn't defined one themselves). +//! diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_features.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_features.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9c2eee10b449d377d1cf4a2266b53a98e8206e7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_features.rs @@ -0,0 +1,29 @@ +//! ## Documentation: Feature Flags +//! +//! Available [compile-time feature flags](https://doc.rust-lang.org/cargo/reference/features.html#dependency-features) +//! +//! #### Default Features +//! +//! * `std`: _Not Currently Used._ Placeholder for supporting `no_std` environments in a backwards compatible manner. +//! * `color`: Turns on terminal styling of help and error messages. See +//! [`Command::styles`][crate::Command::styles] to customize this. +//! * `help`: Auto-generate help output +//! * `usage`: Auto-generate usage +//! * `error-context`: Include contextual information for errors (which arg failed, etc) +//! * `suggestions`: Turns on the `Did you mean '--myoption'?` feature for when users make typos. +//! +//! #### Optional features +//! +//! * `deprecated`: Guided experience to prepare for next breaking release (at different stages of development, this may become default) +//! * `derive`: Enables the custom derive (i.e. `#[derive(Parser)]`). Without this you must use one of the other methods of creating a `clap` CLI listed above. +//! * `cargo`: Turns on macros that read values from [`CARGO_*` environment variables](https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-crates). +//! * `env`: Turns on the usage of environment variables during parsing. +//! * `unicode`: Turns on support for unicode characters (including emoji) in arguments and help messages. +//! * ``wrap_help``: Turns on the help text wrapping feature, based on the terminal size. +//! * `string`: Allow runtime generated strings (e.g. with [`Str`][crate::builder::Str]). +//! +//! #### Experimental features +//! +//! **Warning:** These may contain breaking changes between minor releases. +//! +//! * `unstable-v5`: Preview features which will be stable on the v5.0 release diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_tutorial.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_tutorial.rs new file mode 100644 index 0000000000000000000000000000000000000000..34407eb97b64ce62b780df638eb14cebfcd40a87 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/_tutorial.rs @@ -0,0 +1,246 @@ +// Contributing +// +// New example code: +// - Please update the corresponding section in the derive tutorial +// - Building: They must be added to `Cargo.toml` with the appropriate `required-features`. +// - Testing: Ensure there is a markdown file with [trycmd](https://docs.rs/trycmd) syntax +// +// See also the general CONTRIBUTING + +//! ## Tutorial for the Builder API +//! +//! *See the side bar for the Table of Contents* +//! +//! ## Quick Start +//! +//! You can create an application with several arguments using usage strings. +//! +//! First, ensure `clap` is available: +//! ```console +//! $ cargo add clap +//! ``` +//! +//! Here is a preview of the type of application you can make: +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/01_quick.rs")] +//! ``` +//! +#![doc = include_str!("../examples/tutorial_builder/01_quick.md")] +//! +//! See also +//! - [FAQ: When should I use the builder vs derive APIs?][crate::_faq#when-should-i-use-the-builder-vs-derive-apis] +//! - The [cookbook][crate::_cookbook] for more application-focused examples +//! +//! ## Configuring the Parser +//! +//! You use [`Command`][crate::Command] to start building a parser. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/02_apps.rs")] +//! ``` +//! +#![doc = include_str!("../examples/tutorial_builder/02_apps.md")] +//! +//! You can use [`command!()`][crate::command!] to fill these fields in from your `Cargo.toml` +//! file. **This requires the [`cargo` feature flag][crate::_features].** +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/02_crate.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/02_crate.md")] +//! +//! You can use [`Command`][crate::Command] methods to change the application level behavior of +//! clap, like [`Command::next_line_help`]. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/02_app_settings.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/02_app_settings.md")] +//! +//! ## Adding Arguments +//! +//! 1. [Positionals](#positionals) +//! 2. [Options](#options) +//! 3. [Flags](#flags) +//! 4. [Required](#required) +//! 5. [Defaults](#defaults) +//! 6. [Subcommands](#subcommands) +//! +//! +//! ### Positionals +//! +//! By default, an [`Arg`] defines a positional argument: +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_03_positional.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_03_positional.md")] +//! +//! Note that the default [`ArgAction`][crate::ArgAction] is [`Set`][crate::ArgAction::Set]. To +//! accept multiple values, override the [action][Arg::action] with [`Append`][crate::ArgAction::Append]: +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_03_positional_mult.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_03_positional_mult.md")] +//! +//! ### Options +//! +//! You can name your arguments with a flag: +//! - Intent of the value is clearer +//! - Order doesn't matter +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_02_option.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_02_option.md")] +//! +//! Note that the default [`ArgAction`][crate::ArgAction] is [`Set`][crate::ArgAction::Set]. To +//! accept multiple occurrences, override the [action][Arg::action] with [`Append`][crate::ArgAction::Append]: +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_02_option_mult.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_02_option_mult.md")] +//! +//! ### Flags +//! +//! Flags can also be switches that can be on/off: +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_01_flag_bool.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_01_flag_bool.md")] +//! +//! To accept multiple flags, use [`Count`][crate::ArgAction::Count]: +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_01_flag_count.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_01_flag_count.md")] +//! +//! ### Required +//! +//! By default, an [`Arg`] is optional which can be changed with +//! [`required`][crate::Arg::required]. +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_06_required.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_06_required.md")] +//! +//! ### Defaults +//! +//! We've previously showed that arguments can be [`required`][crate::Arg::required] or optional. +//! When optional, you work with a `Option` and can `unwrap_or`. Alternatively, you can set +//! [`Arg::default_value`][crate::Arg::default_value]. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_05_default_values.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_05_default_values.md")] +//! +//! ### Subcommands +//! +//! Subcommands are defined as [`Command`][crate::Command]s that get added via +//! [`Command::subcommand`][crate::Command::subcommand]. Each instance of a Subcommand can have its +//! own version, author(s), Args, and even its own subcommands. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/03_04_subcommands.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/03_04_subcommands.md")] +//! +//! ## Validation +//! +//! 1. [Enumerated values](#enumerated-values) +//! 2. [Validated values](#validated-values) +//! 3. [Argument Relations](#argument-relations) +//! 4. [Custom Validation](#custom-validation) +//! +//! An appropriate default parser/validator will be selected for the field's type. See +//! [`value_parser!`][crate::value_parser!] for more details. +//! +//! ### Enumerated values +//! +//! If you have arguments of specific values you want to test for, you can use the +//! [`PossibleValuesParser`] or [`Arg::value_parser(["val1", +//! ...])`][crate::Arg::value_parser] for short. +//! +//! This allows you to specify the valid values for that argument. If the user does not use one of +//! those specific values, they will receive a graceful exit with error message informing them +//! of the mistake, and what the possible valid values are +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_01_possible.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_01_possible.md")] +//! +//! When enabling the [`derive` feature][crate::_features], you can use +//! [`ValueEnum`][crate::ValueEnum] to take care of the boiler plate for you, giving the same +//! results. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_01_enum.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_01_enum.md")] +//! +//! ### Validated values +//! +//! More generally, you can validate and parse into any data type with [`Arg::value_parser`]. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_02_parse.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_02_parse.md")] +//! +//! A [custom parser][TypedValueParser] can be used to improve the error messages or provide additional validation: +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_02_validate.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_02_validate.md")] +//! +//! See [`Arg::value_parser`][crate::Arg::value_parser] for more details. +//! +//! ### Argument Relations +//! +//! You can declare dependencies or conflicts between [`Arg`][crate::Arg]s or even +//! [`ArgGroup`][crate::ArgGroup]s. +//! +//! [`ArgGroup`][crate::ArgGroup]s make it easier to declare relations instead of having to list +//! each individually, or when you want a rule to apply "any but not all" arguments. +//! +//! Perhaps the most common use of [`ArgGroup`][crate::ArgGroup]s is to require one and *only* one +//! argument to be present out of a given set. Imagine that you had multiple arguments, and you +//! want one of them to be required, but making all of them required isn't feasible because perhaps +//! they conflict with each other. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_03_relations.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_03_relations.md")] +//! +//! ### Custom Validation +//! +//! As a last resort, you can create custom errors with the basics of clap's formatting. +//! +//! ```rust +#![doc = include_str!("../examples/tutorial_builder/04_04_custom.rs")] +//! ``` +#![doc = include_str!("../examples/tutorial_builder/04_04_custom.md")] +//! +//! ## Testing +//! +//! clap reports most development errors as `debug_assert!`s. Rather than checking every +//! subcommand, you should have a test that calls +//! [`Command::debug_assert`][crate::Command::debug_assert]: +//! ```rust,no_run +#![doc = include_str!("../examples/tutorial_builder/05_01_assert.rs")] +//! ``` +//! +//! ## Next Steps +//! +//! - [Cookbook][crate::_cookbook] for application-focused examples +//! - Explore more features in the [API reference][super] +//! +//! For support, see [Discussions](https://github.com/clap-rs/clap/discussions) +#![allow(unused_imports)] +use crate::builder::*; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/bin/stdio-fixture.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/bin/stdio-fixture.rs new file mode 100644 index 0000000000000000000000000000000000000000..d1356a0c770d52a5e55099056232d1ea5b051e1b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/bin/stdio-fixture.rs @@ -0,0 +1,108 @@ +use clap::{builder::PossibleValue, Arg, ArgAction, Command}; + +fn main() { + #[allow(unused_mut)] + let mut cmd = Command::new("stdio-fixture") + .version("1.0") + .long_version("1.0 - a2132c") + .term_width(0) + .max_term_width(0) + .arg_required_else_help(true) + .subcommand(Command::new("more")) + .subcommand( + Command::new("test") + .visible_alias("do-stuff") + .long_about("Subcommand with one visible alias"), + ) + .subcommand( + Command::new("test_2") + .visible_aliases(["do-other-stuff", "tests"]) + .about("several visible aliases") + .long_about("Subcommand with multiple visible aliases"), + ) + .subcommand( + Command::new("test_3") + .long_flag("test") + .about("several visible long flag aliases") + .visible_long_flag_aliases(["testing", "testall", "test_all"]), + ) + .subcommand( + Command::new("test_4") + .short_flag('t') + .about("several visible short flag aliases") + .visible_short_flag_aliases(['q', 'w']), + ) + .subcommand( + Command::new("test_5") + .short_flag('e') + .long_flag("test-hdr") + .about("all kinds of visible aliases") + .visible_aliases(["tests_4k"]) + .visible_long_flag_aliases(["thetests", "t4k"]) + .visible_short_flag_aliases(['r', 'y']), + ) + .arg( + Arg::new("verbose") + .long("verbose") + .help("log") + .action(ArgAction::SetTrue) + .long_help("more log"), + ) + .arg( + Arg::new("config") + .action(ArgAction::Set) + .help("Speed configuration") + .short('c') + .long("config") + .value_name("MODE") + .value_parser([ + PossibleValue::new("fast"), + PossibleValue::new("slow").help("slower than fast"), + PossibleValue::new("secret speed").hide(true), + ]) + .default_value("fast"), + ) + .arg( + Arg::new("name") + .action(ArgAction::Set) + .help("App name") + .long_help("Set the instance app name") + .value_name("NAME") + .visible_alias("app-name") + .default_value("clap"), + ) + .arg( + Arg::new("fruits") + .short('f') + .visible_short_alias('b') + .action(ArgAction::Append) + .value_name("FRUITS") + .help("List of fruits") + .default_values(["apple", "banane", "orange"]), + ); + #[cfg(feature = "env")] + { + cmd = cmd.arg( + Arg::new("env_arg") + .help("Read from env var when arg is not present.") + .value_name("ENV") + .env("ENV_ARG"), + ); + } + #[cfg(feature = "color")] + { + use clap::builder::styling::{AnsiColor, Styles}; + const STYLES: Styles = Styles::styled() + .header(AnsiColor::Green.on_default().bold()) + .error(AnsiColor::Red.on_default().bold()) + .usage(AnsiColor::Green.on_default().bold().underline()) + .literal(AnsiColor::Blue.on_default().bold()) + .placeholder(AnsiColor::Cyan.on_default()) + .valid(AnsiColor::Green.on_default()) + .invalid(AnsiColor::Magenta.on_default().bold()) + .context(AnsiColor::Yellow.on_default().dimmed()) + .context_value(AnsiColor::Yellow.on_default().italic()); + cmd = cmd.styles(STYLES); + } + cmd.get_matches(); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1db8efd0e341e6a6f13dad67d350faeb4b32ad25 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/clap-4.5.60/src/lib.rs @@ -0,0 +1,110 @@ +// Copyright ⓒ 2015-2016 Kevin B. Knapp and [`clap-rs` contributors](https://github.com/clap-rs/clap/graphs/contributors). +// Licensed under the MIT license +// (see LICENSE or ) All files in the project carrying such +// notice may not be copied, modified, or distributed except according to those terms. + +//! > **Command Line Argument Parser for Rust** +//! +//! Quick Links: +//! - Derive [tutorial][_derive::_tutorial] and [reference][_derive] +//! - Builder [tutorial][_tutorial] and [reference][Command] +//! - [Cookbook][_cookbook] +//! - [CLI Concepts][_concepts] +//! - [FAQ][_faq] +//! - [Discussions](https://github.com/clap-rs/clap/discussions) +//! - [CHANGELOG](https://github.com/clap-rs/clap/blob/v4.5.60/CHANGELOG.md) (includes major version migration +//! guides) +//! +//! ## Aspirations +//! +//! - Out of the box, users get a polished CLI experience +//! - Including common argument behavior, help generation, suggested fixes for users, colored output, [shell completions](https://github.com/clap-rs/clap/tree/master/clap_complete), etc +//! - Flexible enough to port your existing CLI interface +//! - However, we won't necessarily streamline support for each use case +//! - Reasonable parse performance +//! - Resilient maintainership, including +//! - Willing to break compatibility rather than batching up breaking changes in large releases +//! - Leverage feature flags to keep to one active branch +//! - Being under [WG-CLI](https://github.com/rust-cli/team/) to increase the bus factor +//! - We follow semver and will wait about 6-9 months between major breaking changes +//! - We will support the last two minor Rust releases (MSRV, currently 1.74) +//! +//! While these aspirations can be at odds with fast build times and low binary +//! size, we will still strive to keep these reasonable for the flexibility you +//! get. Check out the +//! [argparse-benchmarks](https://github.com/rust-cli/argparse-benchmarks-rs) for +//! CLI parsers optimized for other use cases. +//! +//! ## Example +//! +//! Run +//! ```console +//! $ cargo add clap --features derive +//! ``` +//! *(See also [feature flag reference][_features])* +//! +//! Then define your CLI in `main.rs`: +//! ```rust +//! # #[cfg(feature = "derive")] { +#![doc = include_str!("../examples/demo.rs")] +//! # } +//! ``` +//! +//! And try it out: +#![doc = include_str!("../examples/demo.md")] +//! +//! See also the derive [tutorial][_derive::_tutorial] and [reference][_derive] +//! +//! ### Related Projects +//! +//! Augment clap: +//! - [wild](https://crates.io/crates/wild) for supporting wildcards (`*`) on Windows like you do Linux +//! - [argfile](https://crates.io/crates/argfile) for loading additional arguments from a file (aka response files) +//! - [shadow-rs](https://crates.io/crates/shadow-rs) for generating `Command::long_version` +//! - [clap_mangen](https://crates.io/crates/clap_mangen) for generating man page source (roff) +//! - [clap_complete](https://crates.io/crates/clap_complete) for shell completion support +//! - [clap-i18n-richformatter](https://crates.io/crates/clap-i18n-richformatter) for i18n support with `clap::error::RichFormatter` +//! +//! CLI Helpers +//! - [clio](https://crates.io/crates/clio) for reading/writing to files specified as arguments +//! - [clap-verbosity-flag](https://crates.io/crates/clap-verbosity-flag) +//! - [clap-cargo](https://crates.io/crates/clap-cargo) +//! - [colorchoice-clap](https://crates.io/crates/colorchoice-clap) +//! +//! Testing +//! - [`trycmd`](https://crates.io/crates/trycmd): Bulk snapshot testing +//! - [`snapbox`](https://crates.io/crates/snapbox): Specialized snapshot testing +//! - [`assert_cmd`](https://crates.io/crates/assert_cmd) and [`assert_fs`](https://crates.io/crates/assert_fs): Customized testing +//! +//! Documentation: +//! - [Command-line Apps for Rust](https://rust-cli.github.io/book/index.html) book +//! + +#![doc(html_logo_url = "https://raw.githubusercontent.com/clap-rs/clap/master/assets/clap.png")] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![warn(clippy::print_stderr)] +#![warn(clippy::print_stdout)] + +pub use clap_builder::*; +#[cfg(feature = "derive")] +#[doc(hidden)] +pub use clap_derive::{self, Args, Parser, Subcommand, ValueEnum}; + +#[cfg(feature = "unstable-doc")] +pub mod _concepts; +#[cfg(feature = "unstable-doc")] +pub mod _cookbook; +#[cfg(feature = "unstable-doc")] +pub mod _derive; +#[cfg(feature = "unstable-doc")] +pub mod _faq; +#[cfg(feature = "unstable-doc")] +pub mod _features; +#[cfg(feature = "unstable-doc")] +pub mod _tutorial; + +#[doc = include_str!("../README.md")] +#[cfg(doctest)] +pub struct ReadmeDoctests; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/arcs.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/arcs.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bf7a9a13e1019286bf5baf606a3e14c3908bd50 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/arcs.rs @@ -0,0 +1,170 @@ +//! Arcs are integer values which exist within an OID's hierarchy. + +use crate::{Error, ObjectIdentifier, Result}; +use core::mem; + +/// Type alias used to represent an "arc" (i.e. integer identifier value). +/// +/// X.660 does not define a maximum size of an arc. +/// +/// The current representation is `u32`, which has been selected as being +/// sufficient to cover the current PKCS/PKIX use cases this library has been +/// used in conjunction with. +/// +/// Future versions may potentially make it larger if a sufficiently important +/// use case is discovered. +pub type Arc = u32; + +/// Maximum value of the first arc in an OID. +pub(crate) const ARC_MAX_FIRST: Arc = 2; + +/// Maximum value of the second arc in an OID. +pub(crate) const ARC_MAX_SECOND: Arc = 39; + +/// Maximum number of bytes supported in an arc. +const ARC_MAX_BYTES: usize = mem::size_of::(); + +/// Maximum value of the last byte in an arc. +const ARC_MAX_LAST_OCTET: u8 = 0b11110000; // Max bytes of leading 1-bits + +/// [`Iterator`] over [`Arc`] values (a.k.a. nodes) in an [`ObjectIdentifier`]. +/// +/// This iterates over all arcs in an OID, including the root. +pub struct Arcs<'a> { + /// OID we're iterating over + oid: &'a ObjectIdentifier, + + /// Current position within the serialized DER bytes of this OID + cursor: Option, +} + +impl<'a> Arcs<'a> { + /// Create a new iterator over the arcs of this OID + pub(crate) fn new(oid: &'a ObjectIdentifier) -> Self { + Self { oid, cursor: None } + } + + /// Try to parse the next arc in this OID. + /// + /// This method is fallible so it can be used as a first pass to determine + /// that the arcs in the OID are well-formed. + pub(crate) fn try_next(&mut self) -> Result> { + match self.cursor { + // Indicates we're on the root OID + None => { + let root = RootArcs::try_from(self.oid.as_bytes()[0])?; + self.cursor = Some(0); + Ok(Some(root.first_arc())) + } + Some(0) => { + let root = RootArcs::try_from(self.oid.as_bytes()[0])?; + self.cursor = Some(1); + Ok(Some(root.second_arc())) + } + Some(offset) => { + let mut result = 0; + let mut arc_bytes = 0; + + loop { + let len = checked_add!(offset, arc_bytes); + + match self.oid.as_bytes().get(len).cloned() { + // The arithmetic below includes advance checks + // against `ARC_MAX_BYTES` and `ARC_MAX_LAST_OCTET` + // which ensure the operations will not overflow. + #[allow(clippy::integer_arithmetic)] + Some(byte) => { + arc_bytes = checked_add!(arc_bytes, 1); + + if (arc_bytes > ARC_MAX_BYTES) && (byte & ARC_MAX_LAST_OCTET != 0) { + return Err(Error::ArcTooBig); + } + + result = result << 7 | (byte & 0b1111111) as Arc; + + if byte & 0b10000000 == 0 { + self.cursor = Some(checked_add!(offset, arc_bytes)); + return Ok(Some(result)); + } + } + None => { + if arc_bytes == 0 { + return Ok(None); + } else { + return Err(Error::Base128); + } + } + } + } + } + } + } +} + +impl<'a> Iterator for Arcs<'a> { + type Item = Arc; + + fn next(&mut self) -> Option { + // ObjectIdentifier constructors should ensure the OID is well-formed + self.try_next().expect("OID malformed") + } +} + +/// Byte containing the first and second arcs of an OID. +/// +/// This is represented this way in order to reduce the overall size of the +/// [`ObjectIdentifier`] struct. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +struct RootArcs(u8); + +impl RootArcs { + /// Create [`RootArcs`] from the first and second arc values represented + /// as `Arc` integers. + pub(crate) const fn new(first_arc: Arc, second_arc: Arc) -> Result { + if first_arc > ARC_MAX_FIRST { + return Err(Error::ArcInvalid { arc: first_arc }); + } + + if second_arc > ARC_MAX_SECOND { + return Err(Error::ArcInvalid { arc: second_arc }); + } + + // The checks above ensure this operation will not overflow + #[allow(clippy::integer_arithmetic)] + let byte = (first_arc * (ARC_MAX_SECOND + 1)) as u8 + second_arc as u8; + + Ok(Self(byte)) + } + + /// Get the value of the first arc + #[allow(clippy::integer_arithmetic)] + pub(crate) const fn first_arc(self) -> Arc { + self.0 as Arc / (ARC_MAX_SECOND + 1) + } + + /// Get the value of the second arc + #[allow(clippy::integer_arithmetic)] + pub(crate) const fn second_arc(self) -> Arc { + self.0 as Arc % (ARC_MAX_SECOND + 1) + } +} + +impl TryFrom for RootArcs { + type Error = Error; + + // Ensured not to overflow by constructor invariants + #[allow(clippy::integer_arithmetic)] + fn try_from(octet: u8) -> Result { + let first = octet as Arc / (ARC_MAX_SECOND + 1); + let second = octet as Arc % (ARC_MAX_SECOND + 1); + let result = Self::new(first, second)?; + debug_assert_eq!(octet, result.0); + Ok(result) + } +} + +impl From for u8 { + fn from(root_arcs: RootArcs) -> u8 { + root_arcs.0 + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/checked.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/checked.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ff16a2a7b339df28a8c1b40e2f6d4e86f4dc96e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/checked.rs @@ -0,0 +1,11 @@ +//! Checked arithmetic helpers. + +/// `const fn`-friendly checked addition helper. +macro_rules! checked_add { + ($a:expr, $b:expr) => { + match $a.checked_add($b) { + Some(n) => n, + None => return Err(Error::Length), + } + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4b7a47b4b953e6d920d6fa5c0862dc88cc1d85d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db.rs @@ -0,0 +1,164 @@ +//! OID Names Database +//! +//! The contents of this database are generated from the official IANA +//! [Object Identifier Descriptors] Registry CSV file and from [RFC 5280]. +//! If we are missing values you care about, please contribute a patch to +//! `oiddbgen` (a subcrate in the source code) to generate the values from +//! the relevant standard. +//! +//! [RFC 5280]: https://datatracker.ietf.org/doc/html/rfc5280 +//! [Object Identifier Descriptors]: https://www.iana.org/assignments/ldap-parameters/ldap-parameters.xhtml#ldap-parameters-3 + +#![allow(clippy::integer_arithmetic, missing_docs)] + +mod gen; + +pub use gen::*; + +use crate::{Error, ObjectIdentifier}; + +/// A const implementation of byte equals. +const fn eq(lhs: &[u8], rhs: &[u8]) -> bool { + if lhs.len() != rhs.len() { + return false; + } + + let mut i = 0usize; + while i < lhs.len() { + if lhs[i] != rhs[i] { + return false; + } + + i += 1; + } + + true +} + +/// A const implementation of case-insensitive ASCII equals. +const fn eq_case(lhs: &[u8], rhs: &[u8]) -> bool { + if lhs.len() != rhs.len() { + return false; + } + + let mut i = 0usize; + while i < lhs.len() { + if !lhs[i].eq_ignore_ascii_case(&rhs[i]) { + return false; + } + + i += 1; + } + + true +} + +/// A query interface for OIDs/Names. +#[derive(Copy, Clone)] +pub struct Database<'a>(&'a [(&'a ObjectIdentifier, &'a str)]); + +impl<'a> Database<'a> { + /// Looks up a name for an OID. + /// + /// Errors if the input is not a valid OID. + /// Returns the input if no name is found. + pub fn resolve<'b>(&self, oid: &'b str) -> Result<&'b str, Error> + where + 'a: 'b, + { + Ok(self.by_oid(&oid.parse()?).unwrap_or(oid)) + } + + /// Finds a named oid by its associated OID. + pub const fn by_oid(&self, oid: &ObjectIdentifier) -> Option<&'a str> { + let mut i = 0; + + while i < self.0.len() { + let lhs = self.0[i].0; + if lhs.length == oid.length && eq(&lhs.bytes, &oid.bytes) { + return Some(self.0[i].1); + } + + i += 1; + } + + None + } + + /// Finds a named oid by its associated name. + pub const fn by_name(&self, name: &str) -> Option<&'a ObjectIdentifier> { + let mut i = 0; + + while i < self.0.len() { + let lhs = self.0[i].1; + if eq_case(lhs.as_bytes(), name.as_bytes()) { + return Some(self.0[i].0); + } + + i += 1; + } + + None + } + + /// Return the list of matched name for the OID. + pub const fn find_names_for_oid(&self, oid: ObjectIdentifier) -> Names<'a> { + Names { + database: *self, + oid, + position: 0, + } + } +} + +/// Iterator returning the multiple names that may be associated with an OID. +pub struct Names<'a> { + database: Database<'a>, + oid: ObjectIdentifier, + position: usize, +} + +impl<'a> Iterator for Names<'a> { + type Item = &'a str; + + fn next(&mut self) -> Option<&'a str> { + let mut i = self.position; + + while i < self.database.0.len() { + let lhs = self.database.0[i].0; + + if lhs.as_bytes().eq(self.oid.as_bytes()) { + self.position = i + 1; + return Some(self.database.0[i].1); + } + + i += 1; + } + + None + } +} + +#[cfg(test)] +mod tests { + use crate::ObjectIdentifier; + + use super::rfc4519::CN; + + #[test] + fn by_oid() { + let cn = super::DB.by_oid(&CN).expect("cn not found"); + assert_eq!("cn", cn); + + let none = ObjectIdentifier::new_unwrap("0.1.2.3.4.5.6.7.8.9"); + assert_eq!(None, super::DB.by_oid(&none)); + } + + #[test] + fn by_name() { + let cn = super::DB.by_name("CN").expect("cn not found"); + assert_eq!(&CN, cn); + + assert_eq!(None, super::DB.by_name("purplePeopleEater")); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db/gen.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db/gen.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c603d824fc2f108e3bd19cf34200040ab8a8e54 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/db/gen.rs @@ -0,0 +1,4248 @@ +#![doc = "!! DO NOT EDIT !!: This file is auto-generated by oiddbgen."] +pub mod rfc1274 { + pub const TEXT_ENCODED_OR_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.2"); + pub const OTHER_MAILBOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.22"); + pub const LAST_MODIFIED_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.23"); + pub const LAST_MODIFIED_BY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.24"); + pub const A_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.26"); + pub const MD_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.27"); + pub const MX_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.28"); + pub const NS_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.29"); + pub const SOA_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.30"); + pub const CNAME_RECORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.31"); + pub const JANET_MAILBOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.46"); + pub const MAIL_PREFERENCE_OPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.47"); + pub const DSA_QUALITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.49"); + pub const SUBTREE_MINIMUM_QUALITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.51"); + pub const SUBTREE_MAXIMUM_QUALITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.52"); + pub const PERSONAL_SIGNATURE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.53"); + pub const DIT_REDIRECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.54"); + pub const AUDIO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.55"); + pub const PHOTO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.7"); + pub const DNS_DOMAIN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.15"); + pub const PILOT_ORGANIZATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.20"); + pub const PILOT_DSA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.21"); + pub const QUALITY_LABELLED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.22"); + pub const PILOT_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.3"); + pub const PILOT_PERSON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.4"); +} +pub mod rfc2079 { + pub const LABELED_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.250.1.57"); + pub const LABELED_URI_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.250.3.15"); +} +pub mod rfc2164 { + pub const RFC_822_TO_X_400_MAPPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.1"); + pub const X_400_TO_RFC_822_MAPPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.2"); + pub const OMITTED_OR_ADDRESS_COMPONENT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.3"); + pub const MIXER_GATEWAY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.4"); + pub const ASSOCIATED_X_400_GATEWAY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.3"); + pub const ASSOCIATED_OR_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.6"); + pub const OR_ADDRESS_COMPONENT_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.7"); + pub const ASSOCIATED_INTERNET_GATEWAY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.8"); + pub const MCGAM_TABLES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.9"); +} +pub mod rfc2247 { + pub const DOMAIN_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.345"); +} +pub mod rfc2252 { + pub const PRESENTATION_ADDRESS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.22"); + pub const PROTOCOL_INFORMATION_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.24"); +} +pub mod rfc2256 { + pub const KNOWLEDGE_INFORMATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.2"); + pub const PRESENTATION_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.29"); + pub const SUPPORTED_APPLICATION_CONTEXT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.30"); + pub const PROTOCOL_INFORMATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.48"); + pub const DMD_NAME: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.54"); + pub const STATE_OR_PROVINCE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.8"); + pub const STREET_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.9"); + pub const APPLICATION_ENTITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.12"); + pub const DSA: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.13"); + pub const DMD: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.20"); +} +pub mod rfc2293 { + pub const SUBTREE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.1"); + pub const TABLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.2"); + pub const TABLE_ENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.3"); + pub const TEXT_TABLE_ENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.4"); + pub const DISTINGUISHED_NAME_TABLE_ENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.1.5"); + pub const TEXT_TABLE_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.1"); + pub const TEXT_TABLE_VALUE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.2"); + pub const DISTINGUISHED_NAME_TABLE_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.453.7.2.3"); +} +pub mod rfc2589 { + pub const DYNAMIC_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.119.2"); + pub const ENTRY_TTL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.119.3"); + pub const DYNAMIC_SUBTREES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.119.4"); +} +pub mod rfc2739 { + pub const CAL_CAL_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.478"); + pub const CAL_FBURL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.479"); + pub const CAL_CAPURI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.480"); + pub const CAL_CAL_ADR_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.481"); + pub const CAL_OTHER_CAL_UR_IS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.482"); + pub const CAL_OTHER_FBUR_LS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.483"); + pub const CAL_OTHER_CAPUR_IS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.484"); + pub const CAL_OTHER_CAL_ADR_UR_IS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.4.485"); + pub const CAL_ENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113556.1.5.87"); +} +pub mod rfc2798 { + pub const JPEG_PHOTO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.60"); + pub const CAR_LICENSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.1"); + pub const DEPARTMENT_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.2"); + pub const USER_PKCS_12: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.216"); + pub const DISPLAY_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.241"); + pub const EMPLOYEE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.3"); + pub const PREFERRED_LANGUAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.39"); + pub const EMPLOYEE_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.4"); + pub const USER_SMIME_CERTIFICATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.40"); + pub const INET_ORG_PERSON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.2.2"); +} +pub mod rfc3280 { + pub const EMAIL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.1"); + pub const EMAIL_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.1"); + pub const PSEUDONYM: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.65"); +} +pub mod rfc3296 { + pub const REF: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.1.34"); + pub const REFERRAL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113730.3.2.6"); +} +pub mod rfc3671 { + pub const COLLECTIVE_ATTRIBUTE_SUBENTRIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.12"); + pub const COLLECTIVE_EXCLUSIONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.7"); + pub const COLLECTIVE_ATTRIBUTE_SUBENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.20.2"); + pub const C_O: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.10.1"); + pub const C_OU: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.11.1"); + pub const C_POSTAL_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.16.1"); + pub const C_POSTAL_CODE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.17.1"); + pub const C_POST_OFFICE_BOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.18.1"); + pub const C_PHYSICAL_DELIVERY_OFFICE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.19.1"); + pub const C_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.20.1"); + pub const C_TELEX_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.21.1"); + pub const C_FACSIMILE_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.23.1"); + pub const C_INTERNATIONAL_ISDN_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.25.1"); + pub const C_L: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.7.1"); + pub const C_ST: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.8.1"); + pub const C_STREET: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.9.1"); +} +pub mod rfc3672 { + pub const SUBENTRY: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.17.0"); + pub const ADMINISTRATIVE_ROLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.5"); + pub const SUBTREE_SPECIFICATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.6"); + pub const AUTONOMOUS_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.1"); + pub const ACCESS_CONTROL_SPECIFIC_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.2"); + pub const ACCESS_CONTROL_INNER_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.3"); + pub const SUBSCHEMA_ADMIN_SPECIFIC_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.4"); + pub const COLLECTIVE_ATTRIBUTE_SPECIFIC_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.5"); + pub const COLLECTIVE_ATTRIBUTE_INNER_AREA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.23.6"); +} +pub mod rfc3687 { + pub const COMPONENT_FILTER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.36.79672281.1.13.2"); + pub const RDN_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.36.79672281.1.13.3"); + pub const PRESENT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.36.79672281.1.13.5"); + pub const ALL_COMPONENTS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.36.79672281.1.13.6"); + pub const DIRECTORY_COMPONENTS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.36.79672281.1.13.7"); +} +pub mod rfc3698 { + pub const STORED_PREFIX_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.41"); +} +pub mod rfc3703 { + pub const PCIM_POLICY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.1"); + pub const PCIM_RULE_ACTION_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.10"); + pub const PCIM_CONDITION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.11"); + pub const PCIM_TPC_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.12"); + pub const PCIM_CONDITION_VENDOR_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.13"); + pub const PCIM_ACTION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.14"); + pub const PCIM_ACTION_VENDOR_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.15"); + pub const PCIM_POLICY_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.16"); + pub const PCIM_ELEMENT_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.17"); + pub const PCIM_REPOSITORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.18"); + pub const PCIM_REPOSITORY_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.19"); + pub const PCIM_GROUP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.2"); + pub const PCIM_REPOSITORY_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.20"); + pub const PCIM_SUBTREES_PTR_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.21"); + pub const PCIM_GROUP_CONTAINMENT_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.22"); + pub const PCIM_RULE_CONTAINMENT_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.23"); + pub const PCIM_GROUP_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.3"); + pub const PCIM_GROUP_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.4"); + pub const PCIM_RULE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.5"); + pub const PCIM_RULE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.6"); + pub const PCIM_RULE_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.7"); + pub const PCIM_RULE_CONDITION_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.8"); + pub const PCIM_RULE_VALIDITY_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.1.9"); + pub const PCIM_RULE_VALIDITY_PERIOD_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.10"); + pub const PCIM_RULE_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.11"); + pub const PCIM_RULE_PRIORITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.12"); + pub const PCIM_RULE_MANDATORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.13"); + pub const PCIM_RULE_SEQUENCED_ACTIONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.14"); + pub const PCIM_ROLES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.15"); + pub const PCIM_CONDITION_GROUP_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.16"); + pub const PCIM_CONDITION_NEGATED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.17"); + pub const PCIM_CONDITION_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.18"); + pub const PCIM_CONDITION_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.19"); + pub const PCIM_VALIDITY_CONDITION_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.20"); + pub const PCIM_TIME_PERIOD_CONDITION_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.21"); + pub const PCIM_ACTION_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.22"); + pub const PCIM_ACTION_ORDER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.23"); + pub const PCIM_ACTION_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.24"); + pub const PCIM_TPC_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.25"); + pub const PCIM_TPC_MONTH_OF_YEAR_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.26"); + pub const PCIM_TPC_DAY_OF_MONTH_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.27"); + pub const PCIM_TPC_DAY_OF_WEEK_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.28"); + pub const PCIM_TPC_TIME_OF_DAY_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.29"); + pub const PCIM_KEYWORDS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.3"); + pub const PCIM_TPC_LOCAL_OR_UTC_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.30"); + pub const PCIM_VENDOR_CONSTRAINT_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.31"); + pub const PCIM_VENDOR_CONSTRAINT_ENCODING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.32"); + pub const PCIM_VENDOR_ACTION_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.33"); + pub const PCIM_VENDOR_ACTION_ENCODING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.34"); + pub const PCIM_POLICY_INSTANCE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.35"); + pub const PCIM_REPOSITORY_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.36"); + pub const PCIM_SUBTREES_AUX_CONTAINED_SET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.37"); + pub const PCIM_GROUPS_AUX_CONTAINED_SET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.38"); + pub const PCIM_RULES_AUX_CONTAINED_SET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.39"); + pub const PCIM_GROUP_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.4"); + pub const PCIM_RULE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.5"); + pub const PCIM_RULE_ENABLED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.6"); + pub const PCIM_RULE_CONDITION_LIST_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.7"); + pub const PCIM_RULE_CONDITION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.8"); + pub const PCIM_RULE_ACTION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.6.2.9"); +} +pub mod rfc3712 { + pub const PRINTER_XRI_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1107"); + pub const PRINTER_ALIASES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1108"); + pub const PRINTER_CHARSET_CONFIGURED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1109"); + pub const PRINTER_JOB_PRIORITY_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1110"); + pub const PRINTER_JOB_K_OCTETS_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1111"); + pub const PRINTER_CURRENT_OPERATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1112"); + pub const PRINTER_SERVICE_PERSON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1113"); + pub const PRINTER_DELIVERY_ORIENTATION_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1114"); + pub const PRINTER_STACKING_ORDER_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1115"); + pub const PRINTER_OUTPUT_FEATURES_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1116"); + pub const PRINTER_MEDIA_LOCAL_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1117"); + pub const PRINTER_COPIES_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1118"); + pub const PRINTER_NATURAL_LANGUAGE_CONFIGURED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1119"); + pub const PRINTER_PRINT_QUALITY_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1120"); + pub const PRINTER_RESOLUTION_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1121"); + pub const PRINTER_MEDIA_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1122"); + pub const PRINTER_SIDES_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1123"); + pub const PRINTER_NUMBER_UP_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1124"); + pub const PRINTER_FINISHINGS_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1125"); + pub const PRINTER_PAGES_PER_MINUTE_COLOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1126"); + pub const PRINTER_PAGES_PER_MINUTE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1127"); + pub const PRINTER_COMPRESSION_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1128"); + pub const PRINTER_COLOR_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1129"); + pub const PRINTER_DOCUMENT_FORMAT_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1130"); + pub const PRINTER_CHARSET_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1131"); + pub const PRINTER_MULTIPLE_DOCUMENT_JOBS_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1132"); + pub const PRINTER_IPP_VERSIONS_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1133"); + pub const PRINTER_MORE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1134"); + pub const PRINTER_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1135"); + pub const PRINTER_LOCATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1136"); + pub const PRINTER_GENERATED_NATURAL_LANGUAGE_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1137"); + pub const PRINTER_MAKE_AND_MODEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1138"); + pub const PRINTER_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1139"); + pub const PRINTER_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.4.1140"); + pub const PRINTER_LPR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.253"); + pub const SLP_SERVICE_PRINTER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.254"); + pub const PRINTER_SERVICE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.255"); + pub const PRINTER_IPP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.256"); + pub const PRINTER_SERVICE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.257"); + pub const PRINTER_ABSTRACT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.6.258"); +} +pub mod rfc4104 { + pub const PCELS_POLICY_SET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.1"); + pub const PCELS_ACTION_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.10"); + pub const PCELS_SIMPLE_CONDITION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.11"); + pub const PCELS_COMPOUND_CONDITION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.12"); + pub const PCELS_COMPOUND_FILTER_CONDITION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.13"); + pub const PCELS_SIMPLE_ACTION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.14"); + pub const PCELS_COMPOUND_ACTION_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.15"); + pub const PCELS_VARIABLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.16"); + pub const PCELS_EXPLICIT_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.17"); + pub const PCELS_IMPLICIT_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.18"); + pub const PCELS_SOURCE_I_PV_4_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.19"); + pub const PCELS_POLICY_SET_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.2"); + pub const PCELS_SOURCE_I_PV_6_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.20"); + pub const PCELS_DESTINATION_I_PV_4_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.21"); + pub const PCELS_DESTINATION_I_PV_6_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.22"); + pub const PCELS_SOURCE_PORT_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.23"); + pub const PCELS_DESTINATION_PORT_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.24"); + pub const PCELS_IP_PROTOCOL_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.25"); + pub const PCELS_IP_VERSION_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.26"); + pub const PCELS_IP_TO_S_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.27"); + pub const PCELS_DSCP_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.28"); + pub const PCELS_FLOW_ID_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.29"); + pub const PCELS_GROUP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.3"); + pub const PCELS_SOURCE_MAC_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.30"); + pub const PCELS_DESTINATION_MAC_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.31"); + pub const PCELS_VLAN_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.32"); + pub const PCELS_CO_S_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.33"); + pub const PCELS_ETHERTYPE_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.34"); + pub const PCELS_SOURCE_SAP_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.35"); + pub const PCELS_DESTINATION_SAP_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.36"); + pub const PCELS_SNAPOUI_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.37"); + pub const PCELS_SNAP_TYPE_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.38"); + pub const PCELS_FLOW_DIRECTION_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.39"); + pub const PCELS_GROUP_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.4"); + pub const PCELS_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.40"); + pub const PCELS_I_PV_4_ADDR_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.41"); + pub const PCELS_I_PV_6_ADDR_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.42"); + pub const PCELS_MAC_ADDR_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.43"); + pub const PCELS_STRING_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.44"); + pub const PCELS_BIT_STRING_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.45"); + pub const PCELS_INTEGER_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.46"); + pub const PCELS_BOOLEAN_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.47"); + pub const PCELS_REUSABLE_CONTAINER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.48"); + pub const PCELS_REUSABLE_CONTAINER_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.49"); + pub const PCELS_GROUP_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.5"); + pub const PCELS_REUSABLE_CONTAINER_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.50"); + pub const PCELS_ROLE_COLLECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.51"); + pub const PCELS_FILTER_ENTRY_BASE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.52"); + pub const PCELS_IP_HEADERS_FILTER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.53"); + pub const PCELS_8021_FILTER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.54"); + pub const PCELS_FILTER_LIST_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.55"); + pub const PCELS_VENDOR_VARIABLE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.56"); + pub const PCELS_VENDOR_VALUE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.57"); + pub const PCELS_RULE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.6"); + pub const PCELS_RULE_AUX_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.7"); + pub const PCELS_RULE_INSTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.8"); + pub const PCELS_CONDITION_ASSOCIATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.1.9"); + pub const PCELS_POLICY_SET_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.1"); + pub const PCELS_EXECUTION_STRATEGY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.10"); + pub const PCELS_VARIABLE_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.11"); + pub const PCELS_VALUE_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.12"); + pub const PCELS_IS_MIRRORED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.13"); + pub const PCELS_VARIABLE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.14"); + pub const PCELS_EXPECTED_VALUE_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.15"); + pub const PCELS_VARIABLE_MODEL_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.16"); + pub const PCELS_VARIABLE_MODEL_PROPERTY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.17"); + pub const PCELS_EXPECTED_VALUE_TYPES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.18"); + pub const PCELS_VALUE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.19"); + pub const PCELS_DECISION_STRATEGY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.2"); + pub const PCELS_I_PV_4_ADDR_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.20"); + pub const PCELS_I_PV_6_ADDR_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.21"); + pub const PCELS_MAC_ADDR_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.22"); + pub const PCELS_STRING_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.23"); + pub const PCELS_BIT_STRING_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.24"); + pub const PCELS_INTEGER_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.25"); + pub const PCELS_BOOLEAN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.26"); + pub const PCELS_REUSABLE_CONTAINER_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.27"); + pub const PCELS_REUSABLE_CONTAINER_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.28"); + pub const PCELS_ROLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.29"); + pub const PCELS_POLICY_SET_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.3"); + pub const PCELS_ROLE_COLLECTION_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.30"); + pub const PCELS_ELEMENT_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.31"); + pub const PCELS_FILTER_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.32"); + pub const PCELS_FILTER_IS_NEGATED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.33"); + pub const PCELS_IP_HDR_VERSION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.34"); + pub const PCELS_IP_HDR_SOURCE_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.35"); + pub const PCELS_IP_HDR_SOURCE_ADDRESS_END_OF_RANGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.36"); + pub const PCELS_IP_HDR_SOURCE_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.37"); + pub const PCELS_IP_HDR_DEST_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.38"); + pub const PCELS_IP_HDR_DEST_ADDRESS_END_OF_RANGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.39"); + pub const PCELS_PRIORITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.4"); + pub const PCELS_IP_HDR_DEST_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.40"); + pub const PCELS_IP_HDR_PROTOCOL_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.41"); + pub const PCELS_IP_HDR_SOURCE_PORT_START: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.42"); + pub const PCELS_IP_HDR_SOURCE_PORT_END: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.43"); + pub const PCELS_IP_HDR_DEST_PORT_START: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.44"); + pub const PCELS_IP_HDR_DEST_PORT_END: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.45"); + pub const PCELS_IP_HDR_DSCP_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.46"); + pub const PCELS_IP_HDR_FLOW_LABEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.47"); + pub const PCELS_8021_HDR_SOURCE_MAC_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.48"); + pub const PCELS_8021_HDR_SOURCE_MAC_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.49"); + pub const PCELS_POLICY_SET_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.5"); + pub const PCELS_8021_HDR_DEST_MAC_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.50"); + pub const PCELS_8021_HDR_DEST_MAC_MASK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.51"); + pub const PCELS_8021_HDR_PROTOCOL_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.52"); + pub const PCELS_8021_HDR_PRIORITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.53"); + pub const PCELS_8021_HDR_VLANID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.54"); + pub const PCELS_FILTER_LIST_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.55"); + pub const PCELS_FILTER_DIRECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.56"); + pub const PCELS_FILTER_ENTRY_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.57"); + pub const PCELS_VENDOR_VARIABLE_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.58"); + pub const PCELS_VENDOR_VARIABLE_ENCODING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.59"); + pub const PCELS_CONDITION_LIST_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.6"); + pub const PCELS_VENDOR_VALUE_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.60"); + pub const PCELS_VENDOR_VALUE_ENCODING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.61"); + pub const PCELS_RULE_VALIDITY_PERIOD_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.62"); + pub const PCELS_CONDITION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.7"); + pub const PCELS_ACTION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.8"); + pub const PCELS_SEQUENCED_ACTIONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.9.2.9"); +} +pub mod rfc4237 { + pub const VPIM_USER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.1.1"); + pub const VPIM_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.1"); + pub const VPIM_SUB_MAILBOXES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.10"); + pub const VPIM_RFC_822_MAILBOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.2"); + pub const VPIM_SPOKEN_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.3"); + pub const VPIM_SUPPORTED_UA_BEHAVIORS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.4"); + pub const VPIM_SUPPORTED_AUDIO_MEDIA_TYPES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.5"); + pub const VPIM_SUPPORTED_MESSAGE_CONTEXT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.6"); + pub const VPIM_TEXT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.7"); + pub const VPIM_EXTENDED_ABSENCE_STATUS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.8"); + pub const VPIM_MAX_MESSAGE_SIZE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.11.2.9"); +} +pub mod rfc4403 { + pub const UDDIV_3_SERVICE_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.32"); + pub const UDDI_BUSINESS_ENTITY_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.1"); + pub const UDDIV_3_ENTITY_OBITUARY_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.10"); + pub const UDDI_CONTACT_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.2"); + pub const UDDI_ADDRESS_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.3"); + pub const UDDI_BUSINESS_SERVICE_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.4"); + pub const UDDI_BINDING_TEMPLATE_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.5"); + pub const UDDI_T_MODEL_INSTANCE_INFO_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.6"); + pub const UDDI_T_MODEL_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.7"); + pub const UDDI_PUBLISHER_ASSERTION_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.8"); + pub const UDDIV_3_SUBSCRIPTION_NAME_FORM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.15.9"); + pub const UDDI_BUSINESS_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.1"); + pub const UDDI_E_MAIL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.10"); + pub const UDDI_SORT_CODE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.11"); + pub const UDDI_T_MODEL_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.12"); + pub const UDDI_ADDRESS_LINE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.13"); + pub const UDDI_IDENTIFIER_BAG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.14"); + pub const UDDI_CATEGORY_BAG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.15"); + pub const UDDI_KEYED_REFERENCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.16"); + pub const UDDI_SERVICE_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.17"); + pub const UDDI_BINDING_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.18"); + pub const UDDI_ACCESS_POINT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.19"); + pub const UDDI_AUTHORIZED_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.2"); + pub const UDDI_HOSTING_REDIRECTOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.20"); + pub const UDDI_INSTANCE_DESCRIPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.21"); + pub const UDDI_INSTANCE_PARMS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.22"); + pub const UDDI_OVERVIEW_DESCRIPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.23"); + pub const UDDI_OVERVIEW_URL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.24"); + pub const UDDI_FROM_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.25"); + pub const UDDI_TO_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.26"); + pub const UDDI_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.27"); + pub const UDDI_IS_HIDDEN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.28"); + pub const UDDI_IS_PROJECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.29"); + pub const UDDI_OPERATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.3"); + pub const UDDI_LANG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.30"); + pub const UDDIV_3_BUSINESS_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.31"); + pub const UDDIV_3_BINDING_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.33"); + pub const UDDIV_3_TMODEL_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.34"); + pub const UDDIV_3_DIGITAL_SIGNATURE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.35"); + pub const UDDIV_3_NODE_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.36"); + pub const UDDIV_3_ENTITY_MODIFICATION_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.37"); + pub const UDDIV_3_SUBSCRIPTION_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.38"); + pub const UDDIV_3_SUBSCRIPTION_FILTER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.39"); + pub const UDDI_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.4"); + pub const UDDIV_3_NOTIFICATION_INTERVAL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.40"); + pub const UDDIV_3_MAX_ENTITIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.41"); + pub const UDDIV_3_EXPIRES_AFTER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.42"); + pub const UDDIV_3_BRIEF_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.43"); + pub const UDDIV_3_ENTITY_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.44"); + pub const UDDIV_3_ENTITY_CREATION_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.45"); + pub const UDDIV_3_ENTITY_DELETION_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.46"); + pub const UDDI_DESCRIPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.5"); + pub const UDDI_DISCOVERY_UR_LS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.6"); + pub const UDDI_USE_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.7"); + pub const UDDI_PERSON_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.8"); + pub const UDDI_PHONE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.4.9"); + pub const UDDI_BUSINESS_ENTITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.1"); + pub const UDDIV_3_ENTITY_OBITUARY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.10"); + pub const UDDI_CONTACT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.2"); + pub const UDDI_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.3"); + pub const UDDI_BUSINESS_SERVICE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.4"); + pub const UDDI_BINDING_TEMPLATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.5"); + pub const UDDI_T_MODEL_INSTANCE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.6"); + pub const UDDI_T_MODEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.7"); + pub const UDDI_PUBLISHER_ASSERTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.8"); + pub const UDDIV_3_SUBSCRIPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.10.6.9"); +} +pub mod rfc4512 { + pub const EXTENSIBLE_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.111"); + pub const SUPPORTED_CONTROL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.13"); + pub const SUPPORTED_SASL_MECHANISMS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.14"); + pub const SUPPORTED_LDAP_VERSION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.15"); + pub const LDAP_SYNTAXES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.16"); + pub const NAMING_CONTEXTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.5"); + pub const ALT_SERVER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.6"); + pub const SUPPORTED_EXTENSION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.101.120.7"); + pub const SUPPORTED_FEATURES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.4203.1.3.5"); + pub const CREATE_TIMESTAMP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.1"); + pub const SUBSCHEMA_SUBENTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.10"); + pub const MODIFY_TIMESTAMP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.2"); + pub const CREATORS_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.3"); + pub const MODIFIERS_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.18.4"); + pub const SUBSCHEMA: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.20.1"); + pub const DIT_STRUCTURE_RULES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.1"); + pub const GOVERNING_STRUCTURE_RULE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.10"); + pub const DIT_CONTENT_RULES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.2"); + pub const MATCHING_RULES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.4"); + pub const ATTRIBUTE_TYPES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.5"); + pub const OBJECT_CLASSES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.6"); + pub const NAME_FORMS: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.21.7"); + pub const MATCHING_RULE_USE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.8"); + pub const STRUCTURAL_OBJECT_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.21.9"); + pub const OBJECT_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.0"); + pub const ALIASED_OBJECT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.1"); + pub const TOP: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.0"); + pub const ALIAS: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.1"); +} +pub mod rfc4517 { + pub const CASE_EXACT_IA_5_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.109.114.1"); + pub const CASE_IGNORE_IA_5_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.109.114.2"); + pub const CASE_IGNORE_IA_5_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.109.114.3"); + pub const OBJECT_IDENTIFIER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.0"); + pub const DISTINGUISHED_NAME_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.1"); + pub const NUMERIC_STRING_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.10"); + pub const CASE_IGNORE_LIST_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.11"); + pub const CASE_IGNORE_LIST_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.12"); + pub const BOOLEAN_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.13"); + pub const INTEGER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.14"); + pub const INTEGER_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.15"); + pub const BIT_STRING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.16"); + pub const OCTET_STRING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.17"); + pub const OCTET_STRING_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.18"); + pub const CASE_IGNORE_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.2"); + pub const TELEPHONE_NUMBER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.20"); + pub const TELEPHONE_NUMBER_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.21"); + pub const UNIQUE_MEMBER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.23"); + pub const GENERALIZED_TIME_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.27"); + pub const GENERALIZED_TIME_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.28"); + pub const INTEGER_FIRST_COMPONENT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.29"); + pub const CASE_IGNORE_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.3"); + pub const OBJECT_IDENTIFIER_FIRST_COMPONENT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.30"); + pub const DIRECTORY_STRING_FIRST_COMPONENT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.31"); + pub const WORD_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.32"); + pub const KEYWORD_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.33"); + pub const CASE_IGNORE_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.4"); + pub const CASE_EXACT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.5"); + pub const CASE_EXACT_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.6"); + pub const CASE_EXACT_SUBSTRINGS_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.7"); + pub const NUMERIC_STRING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.8"); + pub const NUMERIC_STRING_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.9"); +} +pub mod rfc4519 { + pub const UID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.1"); + pub const USER_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.1"); + pub const DC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.25"); + pub const DOMAIN_COMPONENT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.25"); + pub const UID_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.3.1"); + pub const DC_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.1466.344"); + pub const O: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.10"); + pub const ORGANIZATION_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.10"); + pub const OU: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.11"); + pub const ORGANIZATIONAL_UNIT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.11"); + pub const TITLE: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.12"); + pub const DESCRIPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.13"); + pub const SEARCH_GUIDE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.14"); + pub const BUSINESS_CATEGORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.15"); + pub const POSTAL_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.16"); + pub const POSTAL_CODE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.17"); + pub const POST_OFFICE_BOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.18"); + pub const PHYSICAL_DELIVERY_OFFICE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.19"); + pub const TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.20"); + pub const TELEX_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.21"); + pub const TELETEX_TERMINAL_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.22"); + pub const FACSIMILE_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.23"); + pub const X_121_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.24"); + pub const INTERNATIONALI_SDN_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.25"); + pub const REGISTERED_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.26"); + pub const DESTINATION_INDICATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.27"); + pub const PREFERRED_DELIVERY_METHOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.28"); + pub const CN: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.3"); + pub const COMMON_NAME: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.3"); + pub const MEMBER: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.31"); + pub const OWNER: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.32"); + pub const ROLE_OCCUPANT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.33"); + pub const SEE_ALSO: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.34"); + pub const USER_PASSWORD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.35"); + pub const SN: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.4"); + pub const SURNAME: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.4"); + pub const NAME: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.41"); + pub const GIVEN_NAME: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.42"); + pub const INITIALS: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.43"); + pub const GENERATION_QUALIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.44"); + pub const X_500_UNIQUE_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.45"); + pub const DN_QUALIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.46"); + pub const ENHANCED_SEARCH_GUIDE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.47"); + pub const DISTINGUISHED_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.49"); + pub const SERIAL_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.5"); + pub const UNIQUE_MEMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.50"); + pub const HOUSE_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.51"); + pub const C: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.6"); + pub const COUNTRY_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.6"); + pub const L: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.7"); + pub const LOCALITY_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.7"); + pub const ST: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.8"); + pub const STREET: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.9"); + pub const RESIDENTIAL_PERSON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.10"); + pub const APPLICATION_PROCESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.11"); + pub const DEVICE: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.14"); + pub const GROUP_OF_UNIQUE_NAMES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.17"); + pub const COUNTRY: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.2"); + pub const LOCALITY: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.3"); + pub const ORGANIZATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.4"); + pub const ORGANIZATIONAL_UNIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.5"); + pub const PERSON: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.6"); + pub const ORGANIZATIONAL_PERSON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.7"); + pub const ORGANIZATIONAL_ROLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.8"); + pub const GROUP_OF_NAMES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.9"); +} +pub mod rfc4523 { + pub const CERTIFICATE_EXACT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.34"); + pub const CERTIFICATE_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.35"); + pub const CERTIFICATE_PAIR_EXACT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.36"); + pub const CERTIFICATE_PAIR_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.37"); + pub const CERTIFICATE_LIST_EXACT_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.38"); + pub const CERTIFICATE_LIST_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.39"); + pub const ALGORITHM_IDENTIFIER_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.13.40"); + pub const USER_CERTIFICATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.36"); + pub const CA_CERTIFICATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.37"); + pub const AUTHORITY_REVOCATION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.38"); + pub const CERTIFICATE_REVOCATION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.39"); + pub const CROSS_CERTIFICATE_PAIR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.40"); + pub const SUPPORTED_ALGORITHMS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.52"); + pub const DELTA_REVOCATION_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.4.53"); + pub const STRONG_AUTHENTICATION_USER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.15"); + pub const CERTIFICATION_AUTHORITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.16"); + pub const CERTIFICATION_AUTHORITY_V_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.16.2"); + pub const USER_SECURITY_INFORMATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.18"); + pub const CRL_DISTRIBUTION_POINT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.6.19"); + pub const PKI_USER: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.21"); + pub const PKI_CA: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.22"); + pub const DELTA_CRL: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.6.23"); +} +pub mod rfc4524 { + pub const MANAGER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.10"); + pub const DOCUMENT_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.11"); + pub const DOCUMENT_TITLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.12"); + pub const DOCUMENT_VERSION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.13"); + pub const DOCUMENT_AUTHOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.14"); + pub const DOCUMENT_LOCATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.15"); + pub const HOME_PHONE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.20"); + pub const HOME_TELEPHONE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.20"); + pub const SECRETARY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.21"); + pub const MAIL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.3"); + pub const RFC_822_MAILBOX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.3"); + pub const ASSOCIATED_DOMAIN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.37"); + pub const ASSOCIATED_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.38"); + pub const HOME_POSTAL_ADDRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.39"); + pub const INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.4"); + pub const PERSONAL_TITLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.40"); + pub const MOBILE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.41"); + pub const MOBILE_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.41"); + pub const PAGER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.42"); + pub const PAGER_TELEPHONE_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.42"); + pub const CO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.43"); + pub const FRIENDLY_COUNTRY_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.43"); + pub const UNIQUE_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.44"); + pub const ORGANIZATIONAL_STATUS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.45"); + pub const BUILDING_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.48"); + pub const DRINK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.5"); + pub const FAVOURITE_DRINK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.5"); + pub const SINGLE_LEVEL_QUALITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.50"); + pub const DOCUMENT_PUBLISHER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.56"); + pub const ROOM_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.6"); + pub const USER_CLASS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.8"); + pub const HOST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.9"); + pub const DOMAIN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.13"); + pub const RFC_822_LOCAL_PART: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.14"); + pub const DOMAIN_RELATED_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.17"); + pub const FRIENDLY_COUNTRY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.18"); + pub const SIMPLE_SECURITY_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.19"); + pub const ACCOUNT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.5"); + pub const DOCUMENT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.6"); + pub const ROOM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.7"); + pub const DOCUMENT_SERIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.4.8"); +} +pub mod rfc4530 { + pub const UUID_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.16.2"); + pub const UUID_ORDERING_MATCH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.16.3"); + pub const ENTRY_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.16.4"); +} +pub mod rfc4876 { + pub const DEFAULT_SERVER_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.0"); + pub const DEFAULT_SEARCH_BASE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.1"); + pub const CREDENTIAL_LEVEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.10"); + pub const OBJECTCLASS_MAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.11"); + pub const DEFAULT_SEARCH_SCOPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.12"); + pub const SERVICE_CREDENTIAL_LEVEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.13"); + pub const SERVICE_SEARCH_DESCRIPTOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.14"); + pub const SERVICE_AUTHENTICATION_METHOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.15"); + pub const DEREFERENCE_ALIASES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.16"); + pub const PREFERRED_SERVER_LIST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.2"); + pub const SEARCH_TIME_LIMIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.3"); + pub const BIND_TIME_LIMIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.4"); + pub const FOLLOW_REFERRALS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.5"); + pub const AUTHENTICATION_METHOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.6"); + pub const PROFILE_TTL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.7"); + pub const ATTRIBUTE_MAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.1.9"); + pub const DUA_CONFIG_PROFILE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11.1.3.1.2.5"); +} +pub mod rfc5020 { + pub const ENTRY_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.20"); +} +pub mod rfc5280 { + pub const PKCS_9: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9"); + pub const ID_PKIX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7"); + pub const ID_PE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1"); + pub const ID_PE_AUTHORITY_INFO_ACCESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.1"); + pub const ID_PE_SUBJECT_INFO_ACCESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.11"); + pub const ID_QT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2"); + pub const ID_QT_CPS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2.1"); + pub const ID_QT_UNOTICE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2.2"); + pub const ID_KP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3"); + pub const ID_KP_SERVER_AUTH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.1"); + pub const ID_KP_CLIENT_AUTH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.2"); + pub const ID_KP_CODE_SIGNING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.3"); + pub const ID_KP_EMAIL_PROTECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.4"); + pub const ID_KP_TIME_STAMPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.8"); + pub const ID_KP_OCSP_SIGNING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.9"); + pub const ID_AD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48"); + pub const ID_AD_OCSP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1"); + pub const ID_AD_CA_ISSUERS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.2"); + pub const ID_AD_TIME_STAMPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.3"); + pub const ID_AD_CA_REPOSITORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.5"); + pub const HOLD_INSTRUCTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2"); + pub const ID_HOLDINSTRUCTION_NONE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.1"); + pub const ID_HOLDINSTRUCTION_CALLISSUER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.2"); + pub const ID_HOLDINSTRUCTION_REJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.3"); + pub const ID_CE: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.29"); + pub const ID_CE_SUBJECT_KEY_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.14"); + pub const ID_CE_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.15"); + pub const ID_CE_PRIVATE_KEY_USAGE_PERIOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.16"); + pub const ID_CE_SUBJECT_ALT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.17"); + pub const ID_CE_ISSUER_ALT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.18"); + pub const ID_CE_BASIC_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.19"); + pub const ID_CE_CRL_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.20"); + pub const ID_CE_CRL_REASONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.21"); + pub const ID_CE_HOLD_INSTRUCTION_CODE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.23"); + pub const ID_CE_INVALIDITY_DATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.24"); + pub const ID_CE_DELTA_CRL_INDICATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.27"); + pub const ID_CE_ISSUING_DISTRIBUTION_POINT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.28"); + pub const ID_CE_CERTIFICATE_ISSUER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.29"); + pub const ID_CE_NAME_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.30"); + pub const ID_CE_CRL_DISTRIBUTION_POINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.31"); + pub const ID_CE_CERTIFICATE_POLICIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.32"); + pub const ANY_POLICY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.32.0"); + pub const ID_CE_POLICY_MAPPINGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.33"); + pub const ID_CE_AUTHORITY_KEY_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.35"); + pub const ID_CE_POLICY_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.36"); + pub const ID_CE_EXT_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.37"); + pub const ANY_EXTENDED_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.37.0"); + pub const ID_CE_FRESHEST_CRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.46"); + pub const ID_CE_INHIBIT_ANY_POLICY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.54"); + pub const ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.9"); + pub const ID_AT: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4"); +} +pub mod rfc5911 { + pub const ID_PBKDF_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.5.12"); + pub const ID_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.1"); + pub const ID_SIGNED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.2"); + pub const ID_ENVELOPED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.3"); + pub const ID_DIGESTED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.5"); + pub const ID_ENCRYPTED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.6"); + pub const SMIME_CAPABILITIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.15"); + pub const ID_SMIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16"); + pub const ID_CT_RECEIPT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.1"); + pub const ID_CT_FIRMWARE_PACKAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.16"); + pub const ID_CT_FIRMWARE_LOAD_RECEIPT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.17"); + pub const ID_CT_FIRMWARE_LOAD_ERROR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.18"); + pub const ID_CT_AUTH_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.2"); + pub const ID_CT_AUTH_ENVELOPED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.23"); + pub const ID_CT_CONTENT_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.6"); + pub const ID_CAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.11"); + pub const ID_CAP_PREFER_BINARY_INSIDE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.11.1"); + pub const ID_AA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2"); + pub const ID_AA_RECEIPT_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.1"); + pub const ID_AA_CONTENT_REFERENCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.10"); + pub const ID_AA_ENCRYP_KEY_PREF: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.11"); + pub const ID_AA_SIGNING_CERTIFICATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.12"); + pub const ID_AA_SECURITY_LABEL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.2"); + pub const ID_AA_ML_EXPAND_HISTORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.3"); + pub const ID_AA_FIRMWARE_PACKAGE_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.35"); + pub const ID_AA_TARGET_HARDWARE_I_DS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.36"); + pub const ID_AA_DECRYPT_KEY_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.37"); + pub const ID_AA_IMPL_CRYPTO_ALGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.38"); + pub const ID_AA_WRAPPED_FIRMWARE_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.39"); + pub const ID_AA_CONTENT_HINT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.4"); + pub const ID_AA_COMMUNITY_IDENTIFIERS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.40"); + pub const ID_AA_FIRMWARE_PACKAGE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.42"); + pub const ID_AA_IMPL_COMPRESS_ALGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.43"); + pub const ID_AA_SIGNING_CERTIFICATE_V_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.47"); + pub const ID_AA_ER_INTERNAL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.49"); + pub const ID_AA_MSG_SIG_DIGEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.5"); + pub const ID_AA_ER_EXTERNAL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.50"); + pub const ID_AA_CONTENT_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.7"); + pub const ID_AA_EQUIVALENT_LABELS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.9"); + pub const ID_ALG_SSDH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3.10"); + pub const ID_ALG_ESDH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3.5"); + pub const ID_ALG_CMS_3_DE_SWRAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3.6"); + pub const ID_ALG_CMSRC_2_WRAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3.7"); + pub const ID_SKD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8"); + pub const ID_SKD_GL_USE_KEK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.1"); + pub const ID_SKD_GLA_QUERY_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.11"); + pub const ID_SKD_GLA_QUERY_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.12"); + pub const ID_SKD_GL_PROVIDE_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.13"); + pub const ID_SKD_GL_MANAGE_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.14"); + pub const ID_SKD_GL_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.15"); + pub const ID_SKD_GL_DELETE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.2"); + pub const ID_SKD_GL_ADD_MEMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.3"); + pub const ID_SKD_GL_DELETE_MEMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.4"); + pub const ID_SKD_GL_REKEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.5"); + pub const ID_SKD_GL_ADD_OWNER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.6"); + pub const ID_SKD_GL_REMOVE_OWNER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.7"); + pub const ID_SKD_GL_KEY_COMPROMISE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.8"); + pub const ID_SKD_GLK_REFRESH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8.9"); + pub const ID_CONTENT_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.3"); + pub const ID_MESSAGE_DIGEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.4"); + pub const ID_SIGNING_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.5"); + pub const ID_COUNTERSIGNATURE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.6"); + pub const RC_2_CBC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.3.2"); + pub const DES_EDE_3_CBC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.3.7"); + pub const LTANS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.11"); + pub const ID_CET_SKD_FAIL_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.15.1"); + pub const ID_CMC_GLA_RR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.99"); + pub const ID_CMC_GLA_SKD_ALG_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.99.1"); + pub const ID_CMC_GLA_SKD_ALG_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.99.2"); + pub const ID_ON_HARDWARE_MODULE_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.8.4"); + pub const HMAC_SHA_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.8.1.2"); + pub const AES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1"); + pub const ID_AES_128_CBC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.2"); + pub const ID_AES_192_CBC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.22"); + pub const ID_AES_192_WRAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.25"); + pub const ID_AES_192_GCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.26"); + pub const ID_AES_192_CCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.27"); + pub const ID_AES_256_CBC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.42"); + pub const ID_AES_256_WRAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.45"); + pub const ID_AES_256_GCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.46"); + pub const ID_AES_256_CCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.47"); + pub const ID_AES_128_WRAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.5"); + pub const ID_AES_128_GCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.6"); + pub const ID_AES_128_CCM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.1.7"); +} +pub mod rfc5912 { + pub const ID_DSA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10040.4.1"); + pub const DSA_WITH_SHA_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10040.4.3"); + pub const ID_EC_PUBLIC_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.2.1"); + pub const SECP_256_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.3.1.7"); + pub const ECDSA_WITH_SHA_224: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.4.3.1"); + pub const ECDSA_WITH_SHA_256: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.4.3.2"); + pub const ECDSA_WITH_SHA_384: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.4.3.3"); + pub const ECDSA_WITH_SHA_512: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10045.4.3.4"); + pub const DHPUBLICNUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.10046.2.1"); + pub const ID_PASSWORD_BASED_MAC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113533.7.66.13"); + pub const ID_DH_BASED_MAC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113533.7.66.30"); + pub const PKCS_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1"); + pub const RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.1"); + pub const ID_RSASSA_PSS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.10"); + pub const SHA_256_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.11"); + pub const SHA_384_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.12"); + pub const SHA_512_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.13"); + pub const SHA_224_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.14"); + pub const MD_2_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.2"); + pub const MD_5_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.4"); + pub const SHA_1_WITH_RSA_ENCRYPTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.5"); + pub const ID_RSAES_OAEP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.7"); + pub const ID_MGF_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.8"); + pub const ID_P_SPECIFIED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.9"); + pub const PKCS_9: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9"); + pub const ID_EXTENSION_REQ: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.14"); + pub const ID_SMIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16"); + pub const ID_CT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1"); + pub const ID_CT_SCVP_CERT_VAL_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.10"); + pub const ID_CT_SCVP_CERT_VAL_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.11"); + pub const ID_CT_SCVP_VAL_POL_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.12"); + pub const ID_CT_SCVP_VAL_POL_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.13"); + pub const ID_CT_ENC_KEY_WITH_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.21"); + pub const ID_AA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2"); + pub const ID_AA_CMC_UNSIGNED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.34"); + pub const ID_MD_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.2"); + pub const ID_MD_5: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.5"); + pub const SECT_163_K_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.1"); + pub const SECT_163_R_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.15"); + pub const SECT_283_K_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.16"); + pub const SECT_283_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.17"); + pub const SECT_233_K_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.26"); + pub const SECT_233_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.27"); + pub const SECP_224_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.33"); + pub const SECP_384_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.34"); + pub const SECP_521_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.35"); + pub const SECT_409_K_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.36"); + pub const SECT_409_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.37"); + pub const SECT_571_K_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.38"); + pub const SECT_571_R_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.0.39"); + pub const ID_EC_DH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.1.12"); + pub const ID_EC_MQV: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.132.1.13"); + pub const ID_SHA_1: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.14.3.2.26"); + pub const ID_PKIX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7"); + pub const ID_PE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1"); + pub const ID_PE_AUTHORITY_INFO_ACCESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.1"); + pub const ID_PE_AC_PROXYING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.10"); + pub const ID_PE_SUBJECT_INFO_ACCESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.11"); + pub const ID_PE_AC_AUDIT_IDENTITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.4"); + pub const ID_PE_AA_CONTROLS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1.6"); + pub const ID_ACA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10"); + pub const ID_ACA_AUTHENTICATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10.1"); + pub const ID_ACA_ACCESS_IDENTITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10.2"); + pub const ID_ACA_CHARGING_IDENTITY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10.3"); + pub const ID_ACA_GROUP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10.4"); + pub const ID_ACA_ENC_ATTRS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10.6"); + pub const ID_CCT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.12"); + pub const ID_CCT_PKI_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.12.2"); + pub const ID_CCT_PKI_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.12.3"); + pub const ID_STC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17"); + pub const ID_STC_BUILD_PKC_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.1"); + pub const ID_STC_BUILD_VALID_PKC_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.2"); + pub const ID_STC_BUILD_STATUS_CHECKED_PKC_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.3"); + pub const ID_STC_BUILD_AA_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.4"); + pub const ID_STC_BUILD_VALID_AA_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.5"); + pub const ID_STC_BUILD_STATUS_CHECKED_AA_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.6"); + pub const ID_STC_STATUS_CHECK_AC_AND_BUILD_STATUS_CHECKED_AA_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17.7"); + pub const ID_SWB: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18"); + pub const ID_SWB_PKC_BEST_CERT_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.1"); + pub const ID_SWB_PKC_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.10"); + pub const ID_SWB_AC_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.11"); + pub const ID_SWB_PKC_ALL_CERT_PATHS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.12"); + pub const ID_SWB_PKC_EE_REVOCATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.13"); + pub const ID_SWB_PKC_C_AS_REVOCATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.14"); + pub const ID_SWB_PKC_REVOCATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.2"); + pub const ID_SWB_PKC_PUBLIC_KEY_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.4"); + pub const ID_SWB_AA_CERT_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.5"); + pub const ID_SWB_AA_REVOCATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.6"); + pub const ID_SWB_AC_REVOCATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.7"); + pub const ID_SWB_RELAYED_RESPONSES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18.9"); + pub const ID_SVP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19"); + pub const ID_SVP_DEFAULT_VAL_POLICY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.1"); + pub const ID_SVP_NAME_VAL_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.2"); + pub const ID_SVP_BASIC_VAL_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.3"); + pub const NAME_COMP_ALG_SET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.4"); + pub const ID_NVA_DN_COMP_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.4"); + pub const ID_QT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2"); + pub const ID_QT_CPS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2.1"); + pub const ID_QT_UNOTICE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2.2"); + pub const ID_KP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3"); + pub const ID_KP_SERVER_AUTH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.1"); + pub const ID_KP_SCVP_SERVER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.15"); + pub const ID_KP_SCVP_CLIENT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.16"); + pub const ID_KP_CLIENT_AUTH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.2"); + pub const ID_KP_CODE_SIGNING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.3"); + pub const ID_KP_EMAIL_PROTECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.4"); + pub const ID_KP_TIME_STAMPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.8"); + pub const ID_KP_OCSP_SIGNING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3.9"); + pub const ID_IT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4"); + pub const ID_IT_CA_PROT_ENC_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.1"); + pub const ID_IT_KEY_PAIR_PARAM_REQ: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.10"); + pub const ID_IT_KEY_PAIR_PARAM_REP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.11"); + pub const ID_IT_REV_PASSPHRASE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.12"); + pub const ID_IT_IMPLICIT_CONFIRM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.13"); + pub const ID_IT_CONFIRM_WAIT_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.14"); + pub const ID_IT_ORIG_PKI_MESSAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.15"); + pub const ID_IT_SUPP_LANG_TAGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.16"); + pub const ID_IT_SIGN_KEY_PAIR_TYPES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.2"); + pub const ID_IT_ENC_KEY_PAIR_TYPES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.3"); + pub const ID_IT_PREFERRED_SYMM_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.4"); + pub const ID_IT_CA_KEY_UPDATE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.5"); + pub const ID_IT_CURRENT_CRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.6"); + pub const ID_IT_UNSUPPORTED_OI_DS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4.7"); + pub const ID_AD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48"); + pub const ID_AD_OCSP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1"); + pub const ID_AD_CA_ISSUERS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.2"); + pub const ID_AD_TIME_STAMPING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.3"); + pub const ID_AD_CA_REPOSITORY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.5"); + pub const ID_PKIP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5"); + pub const ID_REG_CTRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1"); + pub const ID_REG_CTRL_REG_TOKEN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.1"); + pub const ID_REG_CTRL_AUTHENTICATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.2"); + pub const ID_REG_CTRL_PKI_PUBLICATION_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.3"); + pub const ID_REG_CTRL_PKI_ARCHIVE_OPTIONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.4"); + pub const ID_REG_CTRL_OLD_CERT_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.5"); + pub const ID_REG_CTRL_PROTOCOL_ENCR_KEY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1.6"); + pub const ID_REG_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.2"); + pub const ID_REG_INFO_UTF_8_PAIRS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.2.1"); + pub const ID_REG_INFO_CERT_REQ: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.2.2"); + pub const ID_ALG_NO_SIGNATURE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.6.2"); + pub const ID_CMC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7"); + pub const ID_CMC_STATUS_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.1"); + pub const ID_CMC_DECRYPTED_POP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.10"); + pub const ID_CMC_LRA_POP_WITNESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.11"); + pub const ID_CMC_GET_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.15"); + pub const ID_CMC_GET_CRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.16"); + pub const ID_CMC_REVOKE_REQUEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.17"); + pub const ID_CMC_REG_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.18"); + pub const ID_CMC_RESPONSE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.19"); + pub const ID_CMC_IDENTIFICATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.2"); + pub const ID_CMC_QUERY_PENDING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.21"); + pub const ID_CMC_POP_LINK_RANDOM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.22"); + pub const ID_CMC_POP_LINK_WITNESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.23"); + pub const ID_CMC_CONFIRM_CERT_ACCEPTANCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.24"); + pub const ID_CMC_STATUS_INFO_V_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.25"); + pub const ID_CMC_TRUSTED_ANCHORS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.26"); + pub const ID_CMC_AUTH_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.27"); + pub const ID_CMC_BATCH_REQUESTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.28"); + pub const ID_CMC_BATCH_RESPONSES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.29"); + pub const ID_CMC_IDENTITY_PROOF: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.3"); + pub const ID_CMC_PUBLISH_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.30"); + pub const ID_CMC_MOD_CERT_TEMPLATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.31"); + pub const ID_CMC_CONTROL_PROCESSED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.32"); + pub const ID_CMC_IDENTITY_PROOF_V_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.33"); + pub const ID_CMC_POP_LINK_WITNESS_V_2: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.34"); + pub const ID_CMC_DATA_RETURN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.4"); + pub const ID_CMC_TRANSACTION_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.5"); + pub const ID_CMC_SENDER_NONCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.6"); + pub const ID_CMC_RECIPIENT_NONCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.7"); + pub const ID_CMC_ADD_EXTENSIONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.8"); + pub const ID_CMC_ENCRYPTED_POP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.9"); + pub const ID_KEY_EXCHANGE_ALGORITHM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.2.1.1.22"); + pub const ID_SHA_256: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.2.1"); + pub const ID_SHA_384: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.2.2"); + pub const ID_SHA_512: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.2.3"); + pub const ID_SHA_224: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.2.4"); + pub const DSA_WITH_SHA_224: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.3.1"); + pub const DSA_WITH_SHA_256: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.101.3.4.3.2"); + pub const HOLD_INSTRUCTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2"); + pub const ID_HOLDINSTRUCTION_NONE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.1"); + pub const ID_HOLDINSTRUCTION_CALLISSUER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.2"); + pub const ID_HOLDINSTRUCTION_REJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.2.840.10040.2.3"); + pub const ID_CE: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.29"); + pub const ID_CE_SUBJECT_KEY_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.14"); + pub const ID_CE_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.15"); + pub const ID_CE_PRIVATE_KEY_USAGE_PERIOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.16"); + pub const ID_CE_SUBJECT_ALT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.17"); + pub const ID_CE_ISSUER_ALT_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.18"); + pub const ID_CE_BASIC_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.19"); + pub const ID_CE_CRL_NUMBER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.20"); + pub const ID_CE_CRL_REASONS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.21"); + pub const ID_CE_HOLD_INSTRUCTION_CODE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.23"); + pub const ID_CE_INVALIDITY_DATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.24"); + pub const ID_CE_DELTA_CRL_INDICATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.27"); + pub const ID_CE_ISSUING_DISTRIBUTION_POINT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.28"); + pub const ID_CE_CERTIFICATE_ISSUER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.29"); + pub const ID_CE_NAME_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.30"); + pub const ID_CE_CRL_DISTRIBUTION_POINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.31"); + pub const ID_CE_CERTIFICATE_POLICIES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.32"); + pub const ID_CE_POLICY_MAPPINGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.33"); + pub const ID_CE_AUTHORITY_KEY_IDENTIFIER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.35"); + pub const ID_CE_POLICY_CONSTRAINTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.36"); + pub const ID_CE_EXT_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.37"); + pub const ANY_EXTENDED_KEY_USAGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.37.0"); + pub const ID_CE_FRESHEST_CRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.46"); + pub const ID_CE_INHIBIT_ANY_POLICY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.54"); + pub const ID_CE_TARGET_INFORMATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.55"); + pub const ID_CE_NO_REV_AVAIL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.56"); + pub const ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.5.29.9"); + pub const ID_AT: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4"); + pub const ID_AT_ROLE: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("2.5.4.72"); +} +pub mod rfc6109 { + pub const LDIF_LOCATION_URL_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.1.1"); + pub const PROVIDER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.1.2"); + pub const PROVIDER_CERTIFICATE_HASH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.1"); + pub const PROVIDER_CERTIFICATE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.2"); + pub const PROVIDER_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.3"); + pub const MAIL_RECEIPT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.4"); + pub const MANAGED_DOMAINS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.5"); + pub const LDIF_LOCATION_URL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.6"); + pub const PROVIDER_UNIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.16572.2.2.7"); +} +pub mod rfc6268 { + pub const RSADSI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549"); + pub const ID_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.1"); + pub const ID_SIGNED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.2"); + pub const ID_ENVELOPED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.3"); + pub const ID_DIGESTED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.5"); + pub const ID_ENCRYPTED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.7.6"); + pub const ID_CT_CONTENT_COLLECTION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.19"); + pub const ID_CT_AUTH_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.2"); + pub const ID_CT_CONTENT_WITH_ATTRS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.20"); + pub const ID_CT_AUTH_ENVELOPED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.23"); + pub const ID_CT_CONTENT_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.6"); + pub const ID_CT_COMPRESSED_DATA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1.9"); + pub const ID_AA_BINARY_SIGNING_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2.46"); + pub const ID_ALG_ZLIB_COMPRESS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3.8"); + pub const ID_AA_MULTIPLE_SIGNATURES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.2.51"); + pub const ID_CONTENT_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.3"); + pub const ID_MESSAGE_DIGEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.4"); + pub const ID_SIGNING_TIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.5"); + pub const ID_COUNTERSIGNATURE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.6"); + pub const DIGEST_ALGORITHM: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2"); + pub const ID_HMAC_WITH_SHA_384: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.10"); + pub const ID_HMAC_WITH_SHA_512: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.11"); + pub const ID_HMAC_WITH_SHA_224: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.8"); + pub const ID_HMAC_WITH_SHA_256: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.2.9"); +} +pub mod rfc6960 { + pub const ID_PKIX_OCSP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1"); + pub const ID_PKIX_OCSP_BASIC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.1"); + pub const ID_PKIX_OCSP_NONCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.2"); + pub const ID_PKIX_OCSP_CRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.3"); + pub const ID_PKIX_OCSP_RESPONSE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.4"); + pub const ID_PKIX_OCSP_NOCHECK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.5"); + pub const ID_PKIX_OCSP_ARCHIVE_CUTOFF: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.6"); + pub const ID_PKIX_OCSP_SERVICE_LOCATOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.7"); + pub const ID_PKIX_OCSP_PREF_SIG_ALGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.8"); + pub const ID_PKIX_OCSP_EXTENDED_REVOKE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1.9"); +} +pub mod rfc6962 { + pub const GOOGLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129"); + pub const CT_PRECERT_SCTS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129.2.4.2"); + pub const CT_PRECERT_POISON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129.2.4.3"); + pub const CT_PRECERT_SIGNING_CERT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.11129.2.4.4"); +} +pub mod rfc7107 { + pub const ID_SMIME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16"); + pub const ID_MOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.0"); + pub const ID_CT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.1"); + pub const ID_EIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.10"); + pub const ID_CAP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.11"); + pub const ID_PSKC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.12"); + pub const ID_AA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.2"); + pub const ID_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.3"); + pub const ID_CD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.4"); + pub const ID_SPQ: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.5"); + pub const ID_CTI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.6"); + pub const ID_TSP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.7"); + pub const ID_SKD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.8"); + pub const ID_STI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.2.840.113549.1.9.16.9"); +} +pub mod rfc7299 { + pub const ID_PKIX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7"); + pub const ID_MOD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.0"); + pub const ID_PE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.1"); + pub const ID_ACA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.10"); + pub const ID_QCS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.11"); + pub const ID_CCT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.12"); + pub const ID_TEST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.13"); + pub const ID_CP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.14"); + pub const ID_CET: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.15"); + pub const ID_RI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.16"); + pub const ID_SCT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.17"); + pub const ID_SWB: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.18"); + pub const ID_SVP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19"); + pub const ID_NVAE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.2"); + pub const ID_BVAE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.3"); + pub const ID_DNVAE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.19.4"); + pub const ID_QT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.2"); + pub const ID_LOGO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.20"); + pub const ID_PPL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.21"); + pub const ID_MR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.22"); + pub const ID_SKIS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.23"); + pub const ID_KP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.3"); + pub const ID_IT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.4"); + pub const ID_AD: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48"); + pub const ID_PKIX_OCSP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.48.1"); + pub const ID_PKIP: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5"); + pub const ID_REG_CTRL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.1"); + pub const ID_REG_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.5.2"); + pub const ID_ALG: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.6"); + pub const ID_CMC: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7"); + pub const ID_CMC_GLA_RR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.7.99"); + pub const ID_ON: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.8"); + pub const ID_PDA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.5.5.7.9"); +} +pub mod rfc7532 { + pub const FEDFS_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.1"); + pub const FEDFS_FSL_PORT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.10"); + pub const FEDFS_NFS_PATH: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.100"); + pub const FEDFS_NSDB_CONTAINER_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.1001"); + pub const FEDFS_FSN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.1002"); + pub const FEDFS_FSL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.1003"); + pub const FEDFS_NFS_FSL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.1004"); + pub const FEDFS_NFS_MAJOR_VER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.101"); + pub const FEDFS_NFS_MINOR_VER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.102"); + pub const FEDFS_NFS_CURRENCY: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.103"); + pub const FEDFS_NFS_GEN_FLAG_WRITABLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.104"); + pub const FEDFS_NFS_GEN_FLAG_GOING: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.105"); + pub const FEDFS_NFS_GEN_FLAG_SPLIT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.106"); + pub const FEDFS_NFS_TRANS_FLAG_RDMA: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.107"); + pub const FEDFS_NFS_CLASS_SIMUL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.108"); + pub const FEDFS_NFS_CLASS_HANDLE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.109"); + pub const FEDFS_FSL_TTL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.11"); + pub const FEDFS_NFS_CLASS_FILEID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.110"); + pub const FEDFS_NFS_CLASS_WRITEVER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.111"); + pub const FEDFS_NFS_CLASS_CHANGE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.112"); + pub const FEDFS_NFS_CLASS_READDIR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.113"); + pub const FEDFS_NFS_READ_RANK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.114"); + pub const FEDFS_NFS_READ_ORDER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.115"); + pub const FEDFS_NFS_WRITE_RANK: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.116"); + pub const FEDFS_NFS_WRITE_ORDER: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.117"); + pub const FEDFS_NFS_VAR_SUB: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.118"); + pub const FEDFS_NFS_VALID_FOR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.119"); + pub const FEDFS_ANNOTATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.12"); + pub const FEDFS_NFS_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.120"); + pub const FEDFS_DESCR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.13"); + pub const FEDFS_NCE_DN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.14"); + pub const FEDFS_FSN_TTL: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.15"); + pub const FEDFS_NET_ADDR: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.2"); + pub const FEDFS_NET_PORT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.3"); + pub const FEDFS_FSN_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.4"); + pub const FEDFS_NSDB_NAME: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.5"); + pub const FEDFS_NSDB_PORT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.6"); + pub const FEDFS_NCE_PREFIX: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.7"); + pub const FEDFS_FSL_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.8"); + pub const FEDFS_FSL_HOST: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.4.1.31103.1.9"); +} +pub mod rfc7612 { + pub const PRINTER_DEVICE_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.101"); + pub const PRINTER_DEVICE_SERVICE_COUNT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.102"); + pub const PRINTER_UUID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.104"); + pub const PRINTER_CHARGE_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.105"); + pub const PRINTER_CHARGE_INFO_URI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.106"); + pub const PRINTER_GEO_LOCATION: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.107"); + pub const PRINTER_IPP_FEATURES_SUPPORTED: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.18.0.2.24.46.1.108"); +} +pub mod rfc8284 { + pub const JID_OBJECT: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.23.1"); + pub const JID: crate::ObjectIdentifier = crate::ObjectIdentifier::new_unwrap("1.3.6.1.1.23.2"); +} +pub mod rfc8410 { + pub const ID_EDWARDS_CURVE_ALGS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.101"); + pub const ID_X_25519: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.101.110"); + pub const ID_X_448: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.101.111"); + pub const ID_ED_25519: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.101.112"); + pub const ID_ED_448: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("1.3.101.113"); +} +pub mod rfc8894 { + pub const ID_VERI_SIGN: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733"); + pub const ID_PKI: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1"); + pub const ID_ATTRIBUTES: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9"); + pub const ID_MESSAGE_TYPE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.2"); + pub const ID_PKI_STATUS: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.3"); + pub const ID_FAIL_INFO: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.4"); + pub const ID_SENDER_NONCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.5"); + pub const ID_RECIPIENT_NONCE: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.6"); + pub const ID_TRANSACTION_ID: crate::ObjectIdentifier = + crate::ObjectIdentifier::new_unwrap("2.16.840.1.113733.1.9.7"); +} +pub const DB: super::Database<'static> = super::Database(&[ + (&rfc1274::TEXT_ENCODED_OR_ADDRESS, "textEncodedORAddress"), + (&rfc1274::OTHER_MAILBOX, "otherMailbox"), + (&rfc1274::LAST_MODIFIED_TIME, "lastModifiedTime"), + (&rfc1274::LAST_MODIFIED_BY, "lastModifiedBy"), + (&rfc1274::A_RECORD, "aRecord"), + (&rfc1274::MD_RECORD, "mDRecord"), + (&rfc1274::MX_RECORD, "mXRecord"), + (&rfc1274::NS_RECORD, "nSRecord"), + (&rfc1274::SOA_RECORD, "sOARecord"), + (&rfc1274::CNAME_RECORD, "cNAMERecord"), + (&rfc1274::JANET_MAILBOX, "janetMailbox"), + (&rfc1274::MAIL_PREFERENCE_OPTION, "mailPreferenceOption"), + (&rfc1274::DSA_QUALITY, "dSAQuality"), + (&rfc1274::SUBTREE_MINIMUM_QUALITY, "subtreeMinimumQuality"), + (&rfc1274::SUBTREE_MAXIMUM_QUALITY, "subtreeMaximumQuality"), + (&rfc1274::PERSONAL_SIGNATURE, "personalSignature"), + (&rfc1274::DIT_REDIRECT, "dITRedirect"), + (&rfc1274::AUDIO, "audio"), + (&rfc1274::PHOTO, "photo"), + (&rfc1274::DNS_DOMAIN, "dNSDomain"), + (&rfc1274::PILOT_ORGANIZATION, "pilotOrganization"), + (&rfc1274::PILOT_DSA, "pilotDSA"), + (&rfc1274::QUALITY_LABELLED_DATA, "qualityLabelledData"), + (&rfc1274::PILOT_OBJECT, "pilotObject"), + (&rfc1274::PILOT_PERSON, "pilotPerson"), + (&rfc2079::LABELED_URI, "labeledURI"), + (&rfc2079::LABELED_URI_OBJECT, "labeledURIObject"), + (&rfc2164::RFC_822_TO_X_400_MAPPING, "rFC822ToX400Mapping"), + (&rfc2164::X_400_TO_RFC_822_MAPPING, "x400ToRFC822Mapping"), + ( + &rfc2164::OMITTED_OR_ADDRESS_COMPONENT, + "omittedORAddressComponent", + ), + (&rfc2164::MIXER_GATEWAY, "mixerGateway"), + (&rfc2164::ASSOCIATED_X_400_GATEWAY, "associatedX400Gateway"), + (&rfc2164::ASSOCIATED_OR_ADDRESS, "associatedORAddress"), + ( + &rfc2164::OR_ADDRESS_COMPONENT_TYPE, + "oRAddressComponentType", + ), + ( + &rfc2164::ASSOCIATED_INTERNET_GATEWAY, + "associatedInternetGateway", + ), + (&rfc2164::MCGAM_TABLES, "mcgamTables"), + (&rfc2247::DOMAIN_NAME_FORM, "domainNameForm"), + ( + &rfc2252::PRESENTATION_ADDRESS_MATCH, + "presentationAddressMatch", + ), + ( + &rfc2252::PROTOCOL_INFORMATION_MATCH, + "protocolInformationMatch", + ), + (&rfc2256::KNOWLEDGE_INFORMATION, "knowledgeInformation"), + (&rfc2256::PRESENTATION_ADDRESS, "presentationAddress"), + ( + &rfc2256::SUPPORTED_APPLICATION_CONTEXT, + "supportedApplicationContext", + ), + (&rfc2256::PROTOCOL_INFORMATION, "protocolInformation"), + (&rfc2256::DMD_NAME, "dmdName"), + (&rfc2256::STATE_OR_PROVINCE_NAME, "stateOrProvinceName"), + (&rfc2256::STREET_ADDRESS, "streetAddress"), + (&rfc2256::APPLICATION_ENTITY, "applicationEntity"), + (&rfc2256::DSA, "dSA"), + (&rfc2256::DMD, "dmd"), + (&rfc2293::SUBTREE, "subtree"), + (&rfc2293::TABLE, "table"), + (&rfc2293::TABLE_ENTRY, "tableEntry"), + (&rfc2293::TEXT_TABLE_ENTRY, "textTableEntry"), + ( + &rfc2293::DISTINGUISHED_NAME_TABLE_ENTRY, + "distinguishedNameTableEntry", + ), + (&rfc2293::TEXT_TABLE_KEY, "textTableKey"), + (&rfc2293::TEXT_TABLE_VALUE, "textTableValue"), + ( + &rfc2293::DISTINGUISHED_NAME_TABLE_KEY, + "distinguishedNameTableKey", + ), + (&rfc2589::DYNAMIC_OBJECT, "dynamicObject"), + (&rfc2589::ENTRY_TTL, "entryTtl"), + (&rfc2589::DYNAMIC_SUBTREES, "dynamicSubtrees"), + (&rfc2739::CAL_CAL_URI, "calCalURI"), + (&rfc2739::CAL_FBURL, "calFBURL"), + (&rfc2739::CAL_CAPURI, "calCAPURI"), + (&rfc2739::CAL_CAL_ADR_URI, "calCalAdrURI"), + (&rfc2739::CAL_OTHER_CAL_UR_IS, "calOtherCalURIs"), + (&rfc2739::CAL_OTHER_FBUR_LS, "calOtherFBURLs"), + (&rfc2739::CAL_OTHER_CAPUR_IS, "calOtherCAPURIs"), + (&rfc2739::CAL_OTHER_CAL_ADR_UR_IS, "calOtherCalAdrURIs"), + (&rfc2739::CAL_ENTRY, "calEntry"), + (&rfc2798::JPEG_PHOTO, "jpegPhoto"), + (&rfc2798::CAR_LICENSE, "carLicense"), + (&rfc2798::DEPARTMENT_NUMBER, "departmentNumber"), + (&rfc2798::USER_PKCS_12, "userPKCS12"), + (&rfc2798::DISPLAY_NAME, "displayName"), + (&rfc2798::EMPLOYEE_NUMBER, "employeeNumber"), + (&rfc2798::PREFERRED_LANGUAGE, "preferredLanguage"), + (&rfc2798::EMPLOYEE_TYPE, "employeeType"), + (&rfc2798::USER_SMIME_CERTIFICATE, "userSMIMECertificate"), + (&rfc2798::INET_ORG_PERSON, "inetOrgPerson"), + (&rfc3280::EMAIL, "email"), + (&rfc3280::EMAIL_ADDRESS, "emailAddress"), + (&rfc3280::PSEUDONYM, "pseudonym"), + (&rfc3296::REF, "ref"), + (&rfc3296::REFERRAL, "referral"), + ( + &rfc3671::COLLECTIVE_ATTRIBUTE_SUBENTRIES, + "collectiveAttributeSubentries", + ), + (&rfc3671::COLLECTIVE_EXCLUSIONS, "collectiveExclusions"), + ( + &rfc3671::COLLECTIVE_ATTRIBUTE_SUBENTRY, + "collectiveAttributeSubentry", + ), + (&rfc3671::C_O, "c-o"), + (&rfc3671::C_OU, "c-ou"), + (&rfc3671::C_POSTAL_ADDRESS, "c-PostalAddress"), + (&rfc3671::C_POSTAL_CODE, "c-PostalCode"), + (&rfc3671::C_POST_OFFICE_BOX, "c-PostOfficeBox"), + ( + &rfc3671::C_PHYSICAL_DELIVERY_OFFICE, + "c-PhysicalDeliveryOffice", + ), + (&rfc3671::C_TELEPHONE_NUMBER, "c-TelephoneNumber"), + (&rfc3671::C_TELEX_NUMBER, "c-TelexNumber"), + ( + &rfc3671::C_FACSIMILE_TELEPHONE_NUMBER, + "c-FacsimileTelephoneNumber", + ), + ( + &rfc3671::C_INTERNATIONAL_ISDN_NUMBER, + "c-InternationalISDNNumber", + ), + (&rfc3671::C_L, "c-l"), + (&rfc3671::C_ST, "c-st"), + (&rfc3671::C_STREET, "c-street"), + (&rfc3672::SUBENTRY, "subentry"), + (&rfc3672::ADMINISTRATIVE_ROLE, "administrativeRole"), + (&rfc3672::SUBTREE_SPECIFICATION, "subtreeSpecification"), + (&rfc3672::AUTONOMOUS_AREA, "autonomousArea"), + ( + &rfc3672::ACCESS_CONTROL_SPECIFIC_AREA, + "accessControlSpecificArea", + ), + ( + &rfc3672::ACCESS_CONTROL_INNER_AREA, + "accessControlInnerArea", + ), + ( + &rfc3672::SUBSCHEMA_ADMIN_SPECIFIC_AREA, + "subschemaAdminSpecificArea", + ), + ( + &rfc3672::COLLECTIVE_ATTRIBUTE_SPECIFIC_AREA, + "collectiveAttributeSpecificArea", + ), + ( + &rfc3672::COLLECTIVE_ATTRIBUTE_INNER_AREA, + "collectiveAttributeInnerArea", + ), + (&rfc3687::COMPONENT_FILTER_MATCH, "componentFilterMatch"), + (&rfc3687::RDN_MATCH, "rdnMatch"), + (&rfc3687::PRESENT_MATCH, "presentMatch"), + (&rfc3687::ALL_COMPONENTS_MATCH, "allComponentsMatch"), + ( + &rfc3687::DIRECTORY_COMPONENTS_MATCH, + "directoryComponentsMatch", + ), + (&rfc3698::STORED_PREFIX_MATCH, "storedPrefixMatch"), + (&rfc3703::PCIM_POLICY, "pcimPolicy"), + ( + &rfc3703::PCIM_RULE_ACTION_ASSOCIATION, + "pcimRuleActionAssociation", + ), + (&rfc3703::PCIM_CONDITION_AUX_CLASS, "pcimConditionAuxClass"), + (&rfc3703::PCIM_TPC_AUX_CLASS, "pcimTPCAuxClass"), + ( + &rfc3703::PCIM_CONDITION_VENDOR_AUX_CLASS, + "pcimConditionVendorAuxClass", + ), + (&rfc3703::PCIM_ACTION_AUX_CLASS, "pcimActionAuxClass"), + ( + &rfc3703::PCIM_ACTION_VENDOR_AUX_CLASS, + "pcimActionVendorAuxClass", + ), + (&rfc3703::PCIM_POLICY_INSTANCE, "pcimPolicyInstance"), + (&rfc3703::PCIM_ELEMENT_AUX_CLASS, "pcimElementAuxClass"), + (&rfc3703::PCIM_REPOSITORY, "pcimRepository"), + ( + &rfc3703::PCIM_REPOSITORY_AUX_CLASS, + "pcimRepositoryAuxClass", + ), + (&rfc3703::PCIM_GROUP, "pcimGroup"), + (&rfc3703::PCIM_REPOSITORY_INSTANCE, "pcimRepositoryInstance"), + ( + &rfc3703::PCIM_SUBTREES_PTR_AUX_CLASS, + "pcimSubtreesPtrAuxClass", + ), + ( + &rfc3703::PCIM_GROUP_CONTAINMENT_AUX_CLASS, + "pcimGroupContainmentAuxClass", + ), + ( + &rfc3703::PCIM_RULE_CONTAINMENT_AUX_CLASS, + "pcimRuleContainmentAuxClass", + ), + (&rfc3703::PCIM_GROUP_AUX_CLASS, "pcimGroupAuxClass"), + (&rfc3703::PCIM_GROUP_INSTANCE, "pcimGroupInstance"), + (&rfc3703::PCIM_RULE, "pcimRule"), + (&rfc3703::PCIM_RULE_AUX_CLASS, "pcimRuleAuxClass"), + (&rfc3703::PCIM_RULE_INSTANCE, "pcimRuleInstance"), + ( + &rfc3703::PCIM_RULE_CONDITION_ASSOCIATION, + "pcimRuleConditionAssociation", + ), + ( + &rfc3703::PCIM_RULE_VALIDITY_ASSOCIATION, + "pcimRuleValidityAssociation", + ), + ( + &rfc3703::PCIM_RULE_VALIDITY_PERIOD_LIST, + "pcimRuleValidityPeriodList", + ), + (&rfc3703::PCIM_RULE_USAGE, "pcimRuleUsage"), + (&rfc3703::PCIM_RULE_PRIORITY, "pcimRulePriority"), + (&rfc3703::PCIM_RULE_MANDATORY, "pcimRuleMandatory"), + ( + &rfc3703::PCIM_RULE_SEQUENCED_ACTIONS, + "pcimRuleSequencedActions", + ), + (&rfc3703::PCIM_ROLES, "pcimRoles"), + ( + &rfc3703::PCIM_CONDITION_GROUP_NUMBER, + "pcimConditionGroupNumber", + ), + (&rfc3703::PCIM_CONDITION_NEGATED, "pcimConditionNegated"), + (&rfc3703::PCIM_CONDITION_NAME, "pcimConditionName"), + (&rfc3703::PCIM_CONDITION_DN, "pcimConditionDN"), + ( + &rfc3703::PCIM_VALIDITY_CONDITION_NAME, + "pcimValidityConditionName", + ), + ( + &rfc3703::PCIM_TIME_PERIOD_CONDITION_DN, + "pcimTimePeriodConditionDN", + ), + (&rfc3703::PCIM_ACTION_NAME, "pcimActionName"), + (&rfc3703::PCIM_ACTION_ORDER, "pcimActionOrder"), + (&rfc3703::PCIM_ACTION_DN, "pcimActionDN"), + (&rfc3703::PCIM_TPC_TIME, "pcimTPCTime"), + ( + &rfc3703::PCIM_TPC_MONTH_OF_YEAR_MASK, + "pcimTPCMonthOfYearMask", + ), + ( + &rfc3703::PCIM_TPC_DAY_OF_MONTH_MASK, + "pcimTPCDayOfMonthMask", + ), + (&rfc3703::PCIM_TPC_DAY_OF_WEEK_MASK, "pcimTPCDayOfWeekMask"), + (&rfc3703::PCIM_TPC_TIME_OF_DAY_MASK, "pcimTPCTimeOfDayMask"), + (&rfc3703::PCIM_KEYWORDS, "pcimKeywords"), + ( + &rfc3703::PCIM_TPC_LOCAL_OR_UTC_TIME, + "pcimTPCLocalOrUtcTime", + ), + ( + &rfc3703::PCIM_VENDOR_CONSTRAINT_DATA, + "pcimVendorConstraintData", + ), + ( + &rfc3703::PCIM_VENDOR_CONSTRAINT_ENCODING, + "pcimVendorConstraintEncoding", + ), + (&rfc3703::PCIM_VENDOR_ACTION_DATA, "pcimVendorActionData"), + ( + &rfc3703::PCIM_VENDOR_ACTION_ENCODING, + "pcimVendorActionEncoding", + ), + ( + &rfc3703::PCIM_POLICY_INSTANCE_NAME, + "pcimPolicyInstanceName", + ), + (&rfc3703::PCIM_REPOSITORY_NAME, "pcimRepositoryName"), + ( + &rfc3703::PCIM_SUBTREES_AUX_CONTAINED_SET, + "pcimSubtreesAuxContainedSet", + ), + ( + &rfc3703::PCIM_GROUPS_AUX_CONTAINED_SET, + "pcimGroupsAuxContainedSet", + ), + ( + &rfc3703::PCIM_RULES_AUX_CONTAINED_SET, + "pcimRulesAuxContainedSet", + ), + (&rfc3703::PCIM_GROUP_NAME, "pcimGroupName"), + (&rfc3703::PCIM_RULE_NAME, "pcimRuleName"), + (&rfc3703::PCIM_RULE_ENABLED, "pcimRuleEnabled"), + ( + &rfc3703::PCIM_RULE_CONDITION_LIST_TYPE, + "pcimRuleConditionListType", + ), + (&rfc3703::PCIM_RULE_CONDITION_LIST, "pcimRuleConditionList"), + (&rfc3703::PCIM_RULE_ACTION_LIST, "pcimRuleActionList"), + (&rfc3712::PRINTER_XRI_SUPPORTED, "printer-xri-supported"), + (&rfc3712::PRINTER_ALIASES, "printer-aliases"), + ( + &rfc3712::PRINTER_CHARSET_CONFIGURED, + "printer-charset-configured", + ), + ( + &rfc3712::PRINTER_JOB_PRIORITY_SUPPORTED, + "printer-job-priority-supported", + ), + ( + &rfc3712::PRINTER_JOB_K_OCTETS_SUPPORTED, + "printer-job-k-octets-supported", + ), + ( + &rfc3712::PRINTER_CURRENT_OPERATOR, + "printer-current-operator", + ), + (&rfc3712::PRINTER_SERVICE_PERSON, "printer-service-person"), + ( + &rfc3712::PRINTER_DELIVERY_ORIENTATION_SUPPORTED, + "printer-delivery-orientation-supported", + ), + ( + &rfc3712::PRINTER_STACKING_ORDER_SUPPORTED, + "printer-stacking-order-supported", + ), + ( + &rfc3712::PRINTER_OUTPUT_FEATURES_SUPPORTED, + "printer-output-features-supported", + ), + ( + &rfc3712::PRINTER_MEDIA_LOCAL_SUPPORTED, + "printer-media-local-supported", + ), + ( + &rfc3712::PRINTER_COPIES_SUPPORTED, + "printer-copies-supported", + ), + ( + &rfc3712::PRINTER_NATURAL_LANGUAGE_CONFIGURED, + "printer-natural-language-configured", + ), + ( + &rfc3712::PRINTER_PRINT_QUALITY_SUPPORTED, + "printer-print-quality-supported", + ), + ( + &rfc3712::PRINTER_RESOLUTION_SUPPORTED, + "printer-resolution-supported", + ), + (&rfc3712::PRINTER_MEDIA_SUPPORTED, "printer-media-supported"), + (&rfc3712::PRINTER_SIDES_SUPPORTED, "printer-sides-supported"), + ( + &rfc3712::PRINTER_NUMBER_UP_SUPPORTED, + "printer-number-up-supported", + ), + ( + &rfc3712::PRINTER_FINISHINGS_SUPPORTED, + "printer-finishings-supported", + ), + ( + &rfc3712::PRINTER_PAGES_PER_MINUTE_COLOR, + "printer-pages-per-minute-color", + ), + ( + &rfc3712::PRINTER_PAGES_PER_MINUTE, + "printer-pages-per-minute", + ), + ( + &rfc3712::PRINTER_COMPRESSION_SUPPORTED, + "printer-compression-supported", + ), + (&rfc3712::PRINTER_COLOR_SUPPORTED, "printer-color-supported"), + ( + &rfc3712::PRINTER_DOCUMENT_FORMAT_SUPPORTED, + "printer-document-format-supported", + ), + ( + &rfc3712::PRINTER_CHARSET_SUPPORTED, + "printer-charset-supported", + ), + ( + &rfc3712::PRINTER_MULTIPLE_DOCUMENT_JOBS_SUPPORTED, + "printer-multiple-document-jobs-supported", + ), + ( + &rfc3712::PRINTER_IPP_VERSIONS_SUPPORTED, + "printer-ipp-versions-supported", + ), + (&rfc3712::PRINTER_MORE_INFO, "printer-more-info"), + (&rfc3712::PRINTER_NAME, "printer-name"), + (&rfc3712::PRINTER_LOCATION, "printer-location"), + ( + &rfc3712::PRINTER_GENERATED_NATURAL_LANGUAGE_SUPPORTED, + "printer-generated-natural-language-supported", + ), + (&rfc3712::PRINTER_MAKE_AND_MODEL, "printer-make-and-model"), + (&rfc3712::PRINTER_INFO, "printer-info"), + (&rfc3712::PRINTER_URI, "printer-uri"), + (&rfc3712::PRINTER_LPR, "printerLPR"), + (&rfc3712::SLP_SERVICE_PRINTER, "slpServicePrinter"), + (&rfc3712::PRINTER_SERVICE, "printerService"), + (&rfc3712::PRINTER_IPP, "printerIPP"), + ( + &rfc3712::PRINTER_SERVICE_AUX_CLASS, + "printerServiceAuxClass", + ), + (&rfc3712::PRINTER_ABSTRACT, "printerAbstract"), + (&rfc4104::PCELS_POLICY_SET, "pcelsPolicySet"), + (&rfc4104::PCELS_ACTION_ASSOCIATION, "pcelsActionAssociation"), + ( + &rfc4104::PCELS_SIMPLE_CONDITION_AUX_CLASS, + "pcelsSimpleConditionAuxClass", + ), + ( + &rfc4104::PCELS_COMPOUND_CONDITION_AUX_CLASS, + "pcelsCompoundConditionAuxClass", + ), + ( + &rfc4104::PCELS_COMPOUND_FILTER_CONDITION_AUX_CLASS, + "pcelsCompoundFilterConditionAuxClass", + ), + ( + &rfc4104::PCELS_SIMPLE_ACTION_AUX_CLASS, + "pcelsSimpleActionAuxClass", + ), + ( + &rfc4104::PCELS_COMPOUND_ACTION_AUX_CLASS, + "pcelsCompoundActionAuxClass", + ), + (&rfc4104::PCELS_VARIABLE, "pcelsVariable"), + ( + &rfc4104::PCELS_EXPLICIT_VARIABLE_AUX_CLASS, + "pcelsExplicitVariableAuxClass", + ), + ( + &rfc4104::PCELS_IMPLICIT_VARIABLE_AUX_CLASS, + "pcelsImplicitVariableAuxClass", + ), + ( + &rfc4104::PCELS_SOURCE_I_PV_4_VARIABLE_AUX_CLASS, + "pcelsSourceIPv4VariableAuxClass", + ), + ( + &rfc4104::PCELS_POLICY_SET_ASSOCIATION, + "pcelsPolicySetAssociation", + ), + ( + &rfc4104::PCELS_SOURCE_I_PV_6_VARIABLE_AUX_CLASS, + "pcelsSourceIPv6VariableAuxClass", + ), + ( + &rfc4104::PCELS_DESTINATION_I_PV_4_VARIABLE_AUX_CLASS, + "pcelsDestinationIPv4VariableAuxClass", + ), + ( + &rfc4104::PCELS_DESTINATION_I_PV_6_VARIABLE_AUX_CLASS, + "pcelsDestinationIPv6VariableAuxClass", + ), + ( + &rfc4104::PCELS_SOURCE_PORT_VARIABLE_AUX_CLASS, + "pcelsSourcePortVariableAuxClass", + ), + ( + &rfc4104::PCELS_DESTINATION_PORT_VARIABLE_AUX_CLASS, + "pcelsDestinationPortVariableAuxClass", + ), + ( + &rfc4104::PCELS_IP_PROTOCOL_VARIABLE_AUX_CLASS, + "pcelsIPProtocolVariableAuxClass", + ), + ( + &rfc4104::PCELS_IP_VERSION_VARIABLE_AUX_CLASS, + "pcelsIPVersionVariableAuxClass", + ), + ( + &rfc4104::PCELS_IP_TO_S_VARIABLE_AUX_CLASS, + "pcelsIPToSVariableAuxClass", + ), + ( + &rfc4104::PCELS_DSCP_VARIABLE_AUX_CLASS, + "pcelsDSCPVariableAuxClass", + ), + ( + &rfc4104::PCELS_FLOW_ID_VARIABLE_AUX_CLASS, + "pcelsFlowIdVariableAuxClass", + ), + (&rfc4104::PCELS_GROUP, "pcelsGroup"), + ( + &rfc4104::PCELS_SOURCE_MAC_VARIABLE_AUX_CLASS, + "pcelsSourceMACVariableAuxClass", + ), + ( + &rfc4104::PCELS_DESTINATION_MAC_VARIABLE_AUX_CLASS, + "pcelsDestinationMACVariableAuxClass", + ), + ( + &rfc4104::PCELS_VLAN_VARIABLE_AUX_CLASS, + "pcelsVLANVariableAuxClass", + ), + ( + &rfc4104::PCELS_CO_S_VARIABLE_AUX_CLASS, + "pcelsCoSVariableAuxClass", + ), + ( + &rfc4104::PCELS_ETHERTYPE_VARIABLE_AUX_CLASS, + "pcelsEthertypeVariableAuxClass", + ), + ( + &rfc4104::PCELS_SOURCE_SAP_VARIABLE_AUX_CLASS, + "pcelsSourceSAPVariableAuxClass", + ), + ( + &rfc4104::PCELS_DESTINATION_SAP_VARIABLE_AUX_CLASS, + "pcelsDestinationSAPVariableAuxClass", + ), + ( + &rfc4104::PCELS_SNAPOUI_VARIABLE_AUX_CLASS, + "pcelsSNAPOUIVariableAuxClass", + ), + ( + &rfc4104::PCELS_SNAP_TYPE_VARIABLE_AUX_CLASS, + "pcelsSNAPTypeVariableAuxClass", + ), + ( + &rfc4104::PCELS_FLOW_DIRECTION_VARIABLE_AUX_CLASS, + "pcelsFlowDirectionVariableAuxClass", + ), + (&rfc4104::PCELS_GROUP_AUX_CLASS, "pcelsGroupAuxClass"), + (&rfc4104::PCELS_VALUE_AUX_CLASS, "pcelsValueAuxClass"), + ( + &rfc4104::PCELS_I_PV_4_ADDR_VALUE_AUX_CLASS, + "pcelsIPv4AddrValueAuxClass", + ), + ( + &rfc4104::PCELS_I_PV_6_ADDR_VALUE_AUX_CLASS, + "pcelsIPv6AddrValueAuxClass", + ), + ( + &rfc4104::PCELS_MAC_ADDR_VALUE_AUX_CLASS, + "pcelsMACAddrValueAuxClass", + ), + ( + &rfc4104::PCELS_STRING_VALUE_AUX_CLASS, + "pcelsStringValueAuxClass", + ), + ( + &rfc4104::PCELS_BIT_STRING_VALUE_AUX_CLASS, + "pcelsBitStringValueAuxClass", + ), + ( + &rfc4104::PCELS_INTEGER_VALUE_AUX_CLASS, + "pcelsIntegerValueAuxClass", + ), + ( + &rfc4104::PCELS_BOOLEAN_VALUE_AUX_CLASS, + "pcelsBooleanValueAuxClass", + ), + (&rfc4104::PCELS_REUSABLE_CONTAINER, "pcelsReusableContainer"), + ( + &rfc4104::PCELS_REUSABLE_CONTAINER_AUX_CLASS, + "pcelsReusableContainerAuxClass", + ), + (&rfc4104::PCELS_GROUP_INSTANCE, "pcelsGroupInstance"), + ( + &rfc4104::PCELS_REUSABLE_CONTAINER_INSTANCE, + "pcelsReusableContainerInstance", + ), + (&rfc4104::PCELS_ROLE_COLLECTION, "pcelsRoleCollection"), + (&rfc4104::PCELS_FILTER_ENTRY_BASE, "pcelsFilterEntryBase"), + (&rfc4104::PCELS_IP_HEADERS_FILTER, "pcelsIPHeadersFilter"), + (&rfc4104::PCELS_8021_FILTER, "pcels8021Filter"), + ( + &rfc4104::PCELS_FILTER_LIST_AUX_CLASS, + "pcelsFilterListAuxClass", + ), + ( + &rfc4104::PCELS_VENDOR_VARIABLE_AUX_CLASS, + "pcelsVendorVariableAuxClass", + ), + ( + &rfc4104::PCELS_VENDOR_VALUE_AUX_CLASS, + "pcelsVendorValueAuxClass", + ), + (&rfc4104::PCELS_RULE, "pcelsRule"), + (&rfc4104::PCELS_RULE_AUX_CLASS, "pcelsRuleAuxClass"), + (&rfc4104::PCELS_RULE_INSTANCE, "pcelsRuleInstance"), + ( + &rfc4104::PCELS_CONDITION_ASSOCIATION, + "pcelsConditionAssociation", + ), + (&rfc4104::PCELS_POLICY_SET_NAME, "pcelsPolicySetName"), + (&rfc4104::PCELS_EXECUTION_STRATEGY, "pcelsExecutionStrategy"), + (&rfc4104::PCELS_VARIABLE_DN, "pcelsVariableDN"), + (&rfc4104::PCELS_VALUE_DN, "pcelsValueDN"), + (&rfc4104::PCELS_IS_MIRRORED, "pcelsIsMirrored"), + (&rfc4104::PCELS_VARIABLE_NAME, "pcelsVariableName"), + ( + &rfc4104::PCELS_EXPECTED_VALUE_LIST, + "pcelsExpectedValueList", + ), + ( + &rfc4104::PCELS_VARIABLE_MODEL_CLASS, + "pcelsVariableModelClass", + ), + ( + &rfc4104::PCELS_VARIABLE_MODEL_PROPERTY, + "pcelsVariableModelProperty", + ), + ( + &rfc4104::PCELS_EXPECTED_VALUE_TYPES, + "pcelsExpectedValueTypes", + ), + (&rfc4104::PCELS_VALUE_NAME, "pcelsValueName"), + (&rfc4104::PCELS_DECISION_STRATEGY, "pcelsDecisionStrategy"), + (&rfc4104::PCELS_I_PV_4_ADDR_LIST, "pcelsIPv4AddrList"), + (&rfc4104::PCELS_I_PV_6_ADDR_LIST, "pcelsIPv6AddrList"), + (&rfc4104::PCELS_MAC_ADDR_LIST, "pcelsMACAddrList"), + (&rfc4104::PCELS_STRING_LIST, "pcelsStringList"), + (&rfc4104::PCELS_BIT_STRING_LIST, "pcelsBitStringList"), + (&rfc4104::PCELS_INTEGER_LIST, "pcelsIntegerList"), + (&rfc4104::PCELS_BOOLEAN, "pcelsBoolean"), + ( + &rfc4104::PCELS_REUSABLE_CONTAINER_NAME, + "pcelsReusableContainerName", + ), + ( + &rfc4104::PCELS_REUSABLE_CONTAINER_LIST, + "pcelsReusableContainerList", + ), + (&rfc4104::PCELS_ROLE, "pcelsRole"), + (&rfc4104::PCELS_POLICY_SET_LIST, "pcelsPolicySetList"), + ( + &rfc4104::PCELS_ROLE_COLLECTION_NAME, + "pcelsRoleCollectionName", + ), + (&rfc4104::PCELS_ELEMENT_LIST, "pcelsElementList"), + (&rfc4104::PCELS_FILTER_NAME, "pcelsFilterName"), + (&rfc4104::PCELS_FILTER_IS_NEGATED, "pcelsFilterIsNegated"), + (&rfc4104::PCELS_IP_HDR_VERSION, "pcelsIPHdrVersion"), + ( + &rfc4104::PCELS_IP_HDR_SOURCE_ADDRESS, + "pcelsIPHdrSourceAddress", + ), + ( + &rfc4104::PCELS_IP_HDR_SOURCE_ADDRESS_END_OF_RANGE, + "pcelsIPHdrSourceAddressEndOfRange", + ), + (&rfc4104::PCELS_IP_HDR_SOURCE_MASK, "pcelsIPHdrSourceMask"), + (&rfc4104::PCELS_IP_HDR_DEST_ADDRESS, "pcelsIPHdrDestAddress"), + ( + &rfc4104::PCELS_IP_HDR_DEST_ADDRESS_END_OF_RANGE, + "pcelsIPHdrDestAddressEndOfRange", + ), + (&rfc4104::PCELS_PRIORITY, "pcelsPriority"), + (&rfc4104::PCELS_IP_HDR_DEST_MASK, "pcelsIPHdrDestMask"), + (&rfc4104::PCELS_IP_HDR_PROTOCOL_ID, "pcelsIPHdrProtocolID"), + ( + &rfc4104::PCELS_IP_HDR_SOURCE_PORT_START, + "pcelsIPHdrSourcePortStart", + ), + ( + &rfc4104::PCELS_IP_HDR_SOURCE_PORT_END, + "pcelsIPHdrSourcePortEnd", + ), + ( + &rfc4104::PCELS_IP_HDR_DEST_PORT_START, + "pcelsIPHdrDestPortStart", + ), + ( + &rfc4104::PCELS_IP_HDR_DEST_PORT_END, + "pcelsIPHdrDestPortEnd", + ), + (&rfc4104::PCELS_IP_HDR_DSCP_LIST, "pcelsIPHdrDSCPList"), + (&rfc4104::PCELS_IP_HDR_FLOW_LABEL, "pcelsIPHdrFlowLabel"), + ( + &rfc4104::PCELS_8021_HDR_SOURCE_MAC_ADDRESS, + "pcels8021HdrSourceMACAddress", + ), + ( + &rfc4104::PCELS_8021_HDR_SOURCE_MAC_MASK, + "pcels8021HdrSourceMACMask", + ), + (&rfc4104::PCELS_POLICY_SET_DN, "pcelsPolicySetDN"), + ( + &rfc4104::PCELS_8021_HDR_DEST_MAC_ADDRESS, + "pcels8021HdrDestMACAddress", + ), + ( + &rfc4104::PCELS_8021_HDR_DEST_MAC_MASK, + "pcels8021HdrDestMACMask", + ), + ( + &rfc4104::PCELS_8021_HDR_PROTOCOL_ID, + "pcels8021HdrProtocolID", + ), + (&rfc4104::PCELS_8021_HDR_PRIORITY, "pcels8021HdrPriority"), + (&rfc4104::PCELS_8021_HDR_VLANID, "pcels8021HdrVLANID"), + (&rfc4104::PCELS_FILTER_LIST_NAME, "pcelsFilterListName"), + (&rfc4104::PCELS_FILTER_DIRECTION, "pcelsFilterDirection"), + (&rfc4104::PCELS_FILTER_ENTRY_LIST, "pcelsFilterEntryList"), + ( + &rfc4104::PCELS_VENDOR_VARIABLE_DATA, + "pcelsVendorVariableData", + ), + ( + &rfc4104::PCELS_VENDOR_VARIABLE_ENCODING, + "pcelsVendorVariableEncoding", + ), + ( + &rfc4104::PCELS_CONDITION_LIST_TYPE, + "pcelsConditionListType", + ), + (&rfc4104::PCELS_VENDOR_VALUE_DATA, "pcelsVendorValueData"), + ( + &rfc4104::PCELS_VENDOR_VALUE_ENCODING, + "pcelsVendorValueEncoding", + ), + ( + &rfc4104::PCELS_RULE_VALIDITY_PERIOD_LIST, + "pcelsRuleValidityPeriodList", + ), + (&rfc4104::PCELS_CONDITION_LIST, "pcelsConditionList"), + (&rfc4104::PCELS_ACTION_LIST, "pcelsActionList"), + (&rfc4104::PCELS_SEQUENCED_ACTIONS, "pcelsSequencedActions"), + (&rfc4237::VPIM_USER, "vPIMUser"), + (&rfc4237::VPIM_TELEPHONE_NUMBER, "vPIMTelephoneNumber"), + (&rfc4237::VPIM_SUB_MAILBOXES, "vPIMSubMailboxes"), + (&rfc4237::VPIM_RFC_822_MAILBOX, "vPIMRfc822Mailbox"), + (&rfc4237::VPIM_SPOKEN_NAME, "vPIMSpokenName"), + ( + &rfc4237::VPIM_SUPPORTED_UA_BEHAVIORS, + "vPIMSupportedUABehaviors", + ), + ( + &rfc4237::VPIM_SUPPORTED_AUDIO_MEDIA_TYPES, + "vPIMSupportedAudioMediaTypes", + ), + ( + &rfc4237::VPIM_SUPPORTED_MESSAGE_CONTEXT, + "vPIMSupportedMessageContext", + ), + (&rfc4237::VPIM_TEXT_NAME, "vPIMTextName"), + ( + &rfc4237::VPIM_EXTENDED_ABSENCE_STATUS, + "vPIMExtendedAbsenceStatus", + ), + (&rfc4237::VPIM_MAX_MESSAGE_SIZE, "vPIMMaxMessageSize"), + (&rfc4403::UDDIV_3_SERVICE_KEY, "uddiv3ServiceKey"), + ( + &rfc4403::UDDI_BUSINESS_ENTITY_NAME_FORM, + "uddiBusinessEntityNameForm", + ), + ( + &rfc4403::UDDIV_3_ENTITY_OBITUARY_NAME_FORM, + "uddiv3EntityObituaryNameForm", + ), + (&rfc4403::UDDI_CONTACT_NAME_FORM, "uddiContactNameForm"), + (&rfc4403::UDDI_ADDRESS_NAME_FORM, "uddiAddressNameForm"), + ( + &rfc4403::UDDI_BUSINESS_SERVICE_NAME_FORM, + "uddiBusinessServiceNameForm", + ), + ( + &rfc4403::UDDI_BINDING_TEMPLATE_NAME_FORM, + "uddiBindingTemplateNameForm", + ), + ( + &rfc4403::UDDI_T_MODEL_INSTANCE_INFO_NAME_FORM, + "uddiTModelInstanceInfoNameForm", + ), + (&rfc4403::UDDI_T_MODEL_NAME_FORM, "uddiTModelNameForm"), + ( + &rfc4403::UDDI_PUBLISHER_ASSERTION_NAME_FORM, + "uddiPublisherAssertionNameForm", + ), + ( + &rfc4403::UDDIV_3_SUBSCRIPTION_NAME_FORM, + "uddiv3SubscriptionNameForm", + ), + (&rfc4403::UDDI_BUSINESS_KEY, "uddiBusinessKey"), + (&rfc4403::UDDI_E_MAIL, "uddiEMail"), + (&rfc4403::UDDI_SORT_CODE, "uddiSortCode"), + (&rfc4403::UDDI_T_MODEL_KEY, "uddiTModelKey"), + (&rfc4403::UDDI_ADDRESS_LINE, "uddiAddressLine"), + (&rfc4403::UDDI_IDENTIFIER_BAG, "uddiIdentifierBag"), + (&rfc4403::UDDI_CATEGORY_BAG, "uddiCategoryBag"), + (&rfc4403::UDDI_KEYED_REFERENCE, "uddiKeyedReference"), + (&rfc4403::UDDI_SERVICE_KEY, "uddiServiceKey"), + (&rfc4403::UDDI_BINDING_KEY, "uddiBindingKey"), + (&rfc4403::UDDI_ACCESS_POINT, "uddiAccessPoint"), + (&rfc4403::UDDI_AUTHORIZED_NAME, "uddiAuthorizedName"), + (&rfc4403::UDDI_HOSTING_REDIRECTOR, "uddiHostingRedirector"), + ( + &rfc4403::UDDI_INSTANCE_DESCRIPTION, + "uddiInstanceDescription", + ), + (&rfc4403::UDDI_INSTANCE_PARMS, "uddiInstanceParms"), + ( + &rfc4403::UDDI_OVERVIEW_DESCRIPTION, + "uddiOverviewDescription", + ), + (&rfc4403::UDDI_OVERVIEW_URL, "uddiOverviewURL"), + (&rfc4403::UDDI_FROM_KEY, "uddiFromKey"), + (&rfc4403::UDDI_TO_KEY, "uddiToKey"), + (&rfc4403::UDDI_UUID, "uddiUUID"), + (&rfc4403::UDDI_IS_HIDDEN, "uddiIsHidden"), + (&rfc4403::UDDI_IS_PROJECTION, "uddiIsProjection"), + (&rfc4403::UDDI_OPERATOR, "uddiOperator"), + (&rfc4403::UDDI_LANG, "uddiLang"), + (&rfc4403::UDDIV_3_BUSINESS_KEY, "uddiv3BusinessKey"), + (&rfc4403::UDDIV_3_BINDING_KEY, "uddiv3BindingKey"), + (&rfc4403::UDDIV_3_TMODEL_KEY, "uddiv3TmodelKey"), + ( + &rfc4403::UDDIV_3_DIGITAL_SIGNATURE, + "uddiv3DigitalSignature", + ), + (&rfc4403::UDDIV_3_NODE_ID, "uddiv3NodeId"), + ( + &rfc4403::UDDIV_3_ENTITY_MODIFICATION_TIME, + "uddiv3EntityModificationTime", + ), + (&rfc4403::UDDIV_3_SUBSCRIPTION_KEY, "uddiv3SubscriptionKey"), + ( + &rfc4403::UDDIV_3_SUBSCRIPTION_FILTER, + "uddiv3SubscriptionFilter", + ), + (&rfc4403::UDDI_NAME, "uddiName"), + ( + &rfc4403::UDDIV_3_NOTIFICATION_INTERVAL, + "uddiv3NotificationInterval", + ), + (&rfc4403::UDDIV_3_MAX_ENTITIES, "uddiv3MaxEntities"), + (&rfc4403::UDDIV_3_EXPIRES_AFTER, "uddiv3ExpiresAfter"), + (&rfc4403::UDDIV_3_BRIEF_RESPONSE, "uddiv3BriefResponse"), + (&rfc4403::UDDIV_3_ENTITY_KEY, "uddiv3EntityKey"), + ( + &rfc4403::UDDIV_3_ENTITY_CREATION_TIME, + "uddiv3EntityCreationTime", + ), + ( + &rfc4403::UDDIV_3_ENTITY_DELETION_TIME, + "uddiv3EntityDeletionTime", + ), + (&rfc4403::UDDI_DESCRIPTION, "uddiDescription"), + (&rfc4403::UDDI_DISCOVERY_UR_LS, "uddiDiscoveryURLs"), + (&rfc4403::UDDI_USE_TYPE, "uddiUseType"), + (&rfc4403::UDDI_PERSON_NAME, "uddiPersonName"), + (&rfc4403::UDDI_PHONE, "uddiPhone"), + (&rfc4403::UDDI_BUSINESS_ENTITY, "uddiBusinessEntity"), + (&rfc4403::UDDIV_3_ENTITY_OBITUARY, "uddiv3EntityObituary"), + (&rfc4403::UDDI_CONTACT, "uddiContact"), + (&rfc4403::UDDI_ADDRESS, "uddiAddress"), + (&rfc4403::UDDI_BUSINESS_SERVICE, "uddiBusinessService"), + (&rfc4403::UDDI_BINDING_TEMPLATE, "uddiBindingTemplate"), + ( + &rfc4403::UDDI_T_MODEL_INSTANCE_INFO, + "uddiTModelInstanceInfo", + ), + (&rfc4403::UDDI_T_MODEL, "uddiTModel"), + (&rfc4403::UDDI_PUBLISHER_ASSERTION, "uddiPublisherAssertion"), + (&rfc4403::UDDIV_3_SUBSCRIPTION, "uddiv3Subscription"), + (&rfc4512::EXTENSIBLE_OBJECT, "extensibleObject"), + (&rfc4512::SUPPORTED_CONTROL, "supportedControl"), + ( + &rfc4512::SUPPORTED_SASL_MECHANISMS, + "supportedSASLMechanisms", + ), + (&rfc4512::SUPPORTED_LDAP_VERSION, "supportedLDAPVersion"), + (&rfc4512::LDAP_SYNTAXES, "ldapSyntaxes"), + (&rfc4512::NAMING_CONTEXTS, "namingContexts"), + (&rfc4512::ALT_SERVER, "altServer"), + (&rfc4512::SUPPORTED_EXTENSION, "supportedExtension"), + (&rfc4512::SUPPORTED_FEATURES, "supportedFeatures"), + (&rfc4512::CREATE_TIMESTAMP, "createTimestamp"), + (&rfc4512::SUBSCHEMA_SUBENTRY, "subschemaSubentry"), + (&rfc4512::MODIFY_TIMESTAMP, "modifyTimestamp"), + (&rfc4512::CREATORS_NAME, "creatorsName"), + (&rfc4512::MODIFIERS_NAME, "modifiersName"), + (&rfc4512::SUBSCHEMA, "subschema"), + (&rfc4512::DIT_STRUCTURE_RULES, "dITStructureRules"), + (&rfc4512::GOVERNING_STRUCTURE_RULE, "governingStructureRule"), + (&rfc4512::DIT_CONTENT_RULES, "dITContentRules"), + (&rfc4512::MATCHING_RULES, "matchingRules"), + (&rfc4512::ATTRIBUTE_TYPES, "attributeTypes"), + (&rfc4512::OBJECT_CLASSES, "objectClasses"), + (&rfc4512::NAME_FORMS, "nameForms"), + (&rfc4512::MATCHING_RULE_USE, "matchingRuleUse"), + (&rfc4512::STRUCTURAL_OBJECT_CLASS, "structuralObjectClass"), + (&rfc4512::OBJECT_CLASS, "objectClass"), + (&rfc4512::ALIASED_OBJECT_NAME, "aliasedObjectName"), + (&rfc4512::TOP, "top"), + (&rfc4512::ALIAS, "alias"), + (&rfc4517::CASE_EXACT_IA_5_MATCH, "caseExactIA5Match"), + (&rfc4517::CASE_IGNORE_IA_5_MATCH, "caseIgnoreIA5Match"), + ( + &rfc4517::CASE_IGNORE_IA_5_SUBSTRINGS_MATCH, + "caseIgnoreIA5SubstringsMatch", + ), + (&rfc4517::OBJECT_IDENTIFIER_MATCH, "objectIdentifierMatch"), + (&rfc4517::DISTINGUISHED_NAME_MATCH, "distinguishedNameMatch"), + ( + &rfc4517::NUMERIC_STRING_SUBSTRINGS_MATCH, + "numericStringSubstringsMatch", + ), + (&rfc4517::CASE_IGNORE_LIST_MATCH, "caseIgnoreListMatch"), + ( + &rfc4517::CASE_IGNORE_LIST_SUBSTRINGS_MATCH, + "caseIgnoreListSubstringsMatch", + ), + (&rfc4517::BOOLEAN_MATCH, "booleanMatch"), + (&rfc4517::INTEGER_MATCH, "integerMatch"), + (&rfc4517::INTEGER_ORDERING_MATCH, "integerOrderingMatch"), + (&rfc4517::BIT_STRING_MATCH, "bitStringMatch"), + (&rfc4517::OCTET_STRING_MATCH, "octetStringMatch"), + ( + &rfc4517::OCTET_STRING_ORDERING_MATCH, + "octetStringOrderingMatch", + ), + (&rfc4517::CASE_IGNORE_MATCH, "caseIgnoreMatch"), + (&rfc4517::TELEPHONE_NUMBER_MATCH, "telephoneNumberMatch"), + ( + &rfc4517::TELEPHONE_NUMBER_SUBSTRINGS_MATCH, + "telephoneNumberSubstringsMatch", + ), + (&rfc4517::UNIQUE_MEMBER_MATCH, "uniqueMemberMatch"), + (&rfc4517::GENERALIZED_TIME_MATCH, "generalizedTimeMatch"), + ( + &rfc4517::GENERALIZED_TIME_ORDERING_MATCH, + "generalizedTimeOrderingMatch", + ), + ( + &rfc4517::INTEGER_FIRST_COMPONENT_MATCH, + "integerFirstComponentMatch", + ), + ( + &rfc4517::CASE_IGNORE_ORDERING_MATCH, + "caseIgnoreOrderingMatch", + ), + ( + &rfc4517::OBJECT_IDENTIFIER_FIRST_COMPONENT_MATCH, + "objectIdentifierFirstComponentMatch", + ), + ( + &rfc4517::DIRECTORY_STRING_FIRST_COMPONENT_MATCH, + "directoryStringFirstComponentMatch", + ), + (&rfc4517::WORD_MATCH, "wordMatch"), + (&rfc4517::KEYWORD_MATCH, "keywordMatch"), + ( + &rfc4517::CASE_IGNORE_SUBSTRINGS_MATCH, + "caseIgnoreSubstringsMatch", + ), + (&rfc4517::CASE_EXACT_MATCH, "caseExactMatch"), + ( + &rfc4517::CASE_EXACT_ORDERING_MATCH, + "caseExactOrderingMatch", + ), + ( + &rfc4517::CASE_EXACT_SUBSTRINGS_MATCH, + "caseExactSubstringsMatch", + ), + (&rfc4517::NUMERIC_STRING_MATCH, "numericStringMatch"), + ( + &rfc4517::NUMERIC_STRING_ORDERING_MATCH, + "numericStringOrderingMatch", + ), + (&rfc4519::UID, "uid"), + (&rfc4519::USER_ID, "userId"), + (&rfc4519::DC, "DC"), + (&rfc4519::DOMAIN_COMPONENT, "domainComponent"), + (&rfc4519::UID_OBJECT, "uidObject"), + (&rfc4519::DC_OBJECT, "dcObject"), + (&rfc4519::O, "o"), + (&rfc4519::ORGANIZATION_NAME, "organizationName"), + (&rfc4519::OU, "ou"), + (&rfc4519::ORGANIZATIONAL_UNIT_NAME, "organizationalUnitName"), + (&rfc4519::TITLE, "title"), + (&rfc4519::DESCRIPTION, "description"), + (&rfc4519::SEARCH_GUIDE, "searchGuide"), + (&rfc4519::BUSINESS_CATEGORY, "businessCategory"), + (&rfc4519::POSTAL_ADDRESS, "postalAddress"), + (&rfc4519::POSTAL_CODE, "postalCode"), + (&rfc4519::POST_OFFICE_BOX, "postOfficeBox"), + ( + &rfc4519::PHYSICAL_DELIVERY_OFFICE_NAME, + "physicalDeliveryOfficeName", + ), + (&rfc4519::TELEPHONE_NUMBER, "telephoneNumber"), + (&rfc4519::TELEX_NUMBER, "telexNumber"), + ( + &rfc4519::TELETEX_TERMINAL_IDENTIFIER, + "teletexTerminalIdentifier", + ), + ( + &rfc4519::FACSIMILE_TELEPHONE_NUMBER, + "facsimileTelephoneNumber", + ), + (&rfc4519::X_121_ADDRESS, "x121Address"), + ( + &rfc4519::INTERNATIONALI_SDN_NUMBER, + "internationaliSDNNumber", + ), + (&rfc4519::REGISTERED_ADDRESS, "registeredAddress"), + (&rfc4519::DESTINATION_INDICATOR, "destinationIndicator"), + ( + &rfc4519::PREFERRED_DELIVERY_METHOD, + "preferredDeliveryMethod", + ), + (&rfc4519::CN, "cn"), + (&rfc4519::COMMON_NAME, "commonName"), + (&rfc4519::MEMBER, "member"), + (&rfc4519::OWNER, "owner"), + (&rfc4519::ROLE_OCCUPANT, "roleOccupant"), + (&rfc4519::SEE_ALSO, "seeAlso"), + (&rfc4519::USER_PASSWORD, "userPassword"), + (&rfc4519::SN, "sn"), + (&rfc4519::SURNAME, "surname"), + (&rfc4519::NAME, "name"), + (&rfc4519::GIVEN_NAME, "givenName"), + (&rfc4519::INITIALS, "initials"), + (&rfc4519::GENERATION_QUALIFIER, "generationQualifier"), + (&rfc4519::X_500_UNIQUE_IDENTIFIER, "x500UniqueIdentifier"), + (&rfc4519::DN_QUALIFIER, "dnQualifier"), + (&rfc4519::ENHANCED_SEARCH_GUIDE, "enhancedSearchGuide"), + (&rfc4519::DISTINGUISHED_NAME, "distinguishedName"), + (&rfc4519::SERIAL_NUMBER, "serialNumber"), + (&rfc4519::UNIQUE_MEMBER, "uniqueMember"), + (&rfc4519::HOUSE_IDENTIFIER, "houseIdentifier"), + (&rfc4519::C, "c"), + (&rfc4519::COUNTRY_NAME, "countryName"), + (&rfc4519::L, "L"), + (&rfc4519::LOCALITY_NAME, "localityName"), + (&rfc4519::ST, "st"), + (&rfc4519::STREET, "street"), + (&rfc4519::RESIDENTIAL_PERSON, "residentialPerson"), + (&rfc4519::APPLICATION_PROCESS, "applicationProcess"), + (&rfc4519::DEVICE, "device"), + (&rfc4519::GROUP_OF_UNIQUE_NAMES, "groupOfUniqueNames"), + (&rfc4519::COUNTRY, "country"), + (&rfc4519::LOCALITY, "locality"), + (&rfc4519::ORGANIZATION, "organization"), + (&rfc4519::ORGANIZATIONAL_UNIT, "organizationalUnit"), + (&rfc4519::PERSON, "person"), + (&rfc4519::ORGANIZATIONAL_PERSON, "organizationalPerson"), + (&rfc4519::ORGANIZATIONAL_ROLE, "organizationalRole"), + (&rfc4519::GROUP_OF_NAMES, "groupOfNames"), + (&rfc4523::CERTIFICATE_EXACT_MATCH, "certificateExactMatch"), + (&rfc4523::CERTIFICATE_MATCH, "certificateMatch"), + ( + &rfc4523::CERTIFICATE_PAIR_EXACT_MATCH, + "certificatePairExactMatch", + ), + (&rfc4523::CERTIFICATE_PAIR_MATCH, "certificatePairMatch"), + ( + &rfc4523::CERTIFICATE_LIST_EXACT_MATCH, + "certificateListExactMatch", + ), + (&rfc4523::CERTIFICATE_LIST_MATCH, "certificateListMatch"), + ( + &rfc4523::ALGORITHM_IDENTIFIER_MATCH, + "algorithmIdentifierMatch", + ), + (&rfc4523::USER_CERTIFICATE, "userCertificate"), + (&rfc4523::CA_CERTIFICATE, "cACertificate"), + ( + &rfc4523::AUTHORITY_REVOCATION_LIST, + "authorityRevocationList", + ), + ( + &rfc4523::CERTIFICATE_REVOCATION_LIST, + "certificateRevocationList", + ), + (&rfc4523::CROSS_CERTIFICATE_PAIR, "crossCertificatePair"), + (&rfc4523::SUPPORTED_ALGORITHMS, "supportedAlgorithms"), + (&rfc4523::DELTA_REVOCATION_LIST, "deltaRevocationList"), + ( + &rfc4523::STRONG_AUTHENTICATION_USER, + "strongAuthenticationUser", + ), + (&rfc4523::CERTIFICATION_AUTHORITY, "certificationAuthority"), + ( + &rfc4523::CERTIFICATION_AUTHORITY_V_2, + "certificationAuthority-V2", + ), + ( + &rfc4523::USER_SECURITY_INFORMATION, + "userSecurityInformation", + ), + (&rfc4523::CRL_DISTRIBUTION_POINT, "cRLDistributionPoint"), + (&rfc4523::PKI_USER, "pkiUser"), + (&rfc4523::PKI_CA, "pkiCA"), + (&rfc4523::DELTA_CRL, "deltaCRL"), + (&rfc4524::MANAGER, "manager"), + (&rfc4524::DOCUMENT_IDENTIFIER, "documentIdentifier"), + (&rfc4524::DOCUMENT_TITLE, "documentTitle"), + (&rfc4524::DOCUMENT_VERSION, "documentVersion"), + (&rfc4524::DOCUMENT_AUTHOR, "documentAuthor"), + (&rfc4524::DOCUMENT_LOCATION, "documentLocation"), + (&rfc4524::HOME_PHONE, "homePhone"), + (&rfc4524::HOME_TELEPHONE, "homeTelephone"), + (&rfc4524::SECRETARY, "secretary"), + (&rfc4524::MAIL, "mail"), + (&rfc4524::RFC_822_MAILBOX, "RFC822Mailbox"), + (&rfc4524::ASSOCIATED_DOMAIN, "associatedDomain"), + (&rfc4524::ASSOCIATED_NAME, "associatedName"), + (&rfc4524::HOME_POSTAL_ADDRESS, "homePostalAddress"), + (&rfc4524::INFO, "info"), + (&rfc4524::PERSONAL_TITLE, "personalTitle"), + (&rfc4524::MOBILE, "mobile"), + (&rfc4524::MOBILE_TELEPHONE_NUMBER, "mobileTelephoneNumber"), + (&rfc4524::PAGER, "pager"), + (&rfc4524::PAGER_TELEPHONE_NUMBER, "pagerTelephoneNumber"), + (&rfc4524::CO, "co"), + (&rfc4524::FRIENDLY_COUNTRY_NAME, "friendlyCountryName"), + (&rfc4524::UNIQUE_IDENTIFIER, "uniqueIdentifier"), + (&rfc4524::ORGANIZATIONAL_STATUS, "organizationalStatus"), + (&rfc4524::BUILDING_NAME, "buildingName"), + (&rfc4524::DRINK, "drink"), + (&rfc4524::FAVOURITE_DRINK, "favouriteDrink"), + (&rfc4524::SINGLE_LEVEL_QUALITY, "singleLevelQuality"), + (&rfc4524::DOCUMENT_PUBLISHER, "documentPublisher"), + (&rfc4524::ROOM_NUMBER, "roomNumber"), + (&rfc4524::USER_CLASS, "userClass"), + (&rfc4524::HOST, "host"), + (&rfc4524::DOMAIN, "domain"), + (&rfc4524::RFC_822_LOCAL_PART, "RFC822LocalPart"), + (&rfc4524::DOMAIN_RELATED_OBJECT, "domainRelatedObject"), + (&rfc4524::FRIENDLY_COUNTRY, "friendlyCountry"), + (&rfc4524::SIMPLE_SECURITY_OBJECT, "simpleSecurityObject"), + (&rfc4524::ACCOUNT, "account"), + (&rfc4524::DOCUMENT, "document"), + (&rfc4524::ROOM, "room"), + (&rfc4524::DOCUMENT_SERIES, "documentSeries"), + (&rfc4530::UUID_MATCH, "uuidMatch"), + (&rfc4530::UUID_ORDERING_MATCH, "uuidOrderingMatch"), + (&rfc4530::ENTRY_UUID, "entryUUID"), + (&rfc4876::DEFAULT_SERVER_LIST, "defaultServerList"), + (&rfc4876::DEFAULT_SEARCH_BASE, "defaultSearchBase"), + (&rfc4876::CREDENTIAL_LEVEL, "credentialLevel"), + (&rfc4876::OBJECTCLASS_MAP, "objectclassMap"), + (&rfc4876::DEFAULT_SEARCH_SCOPE, "defaultSearchScope"), + (&rfc4876::SERVICE_CREDENTIAL_LEVEL, "serviceCredentialLevel"), + ( + &rfc4876::SERVICE_SEARCH_DESCRIPTOR, + "serviceSearchDescriptor", + ), + ( + &rfc4876::SERVICE_AUTHENTICATION_METHOD, + "serviceAuthenticationMethod", + ), + (&rfc4876::DEREFERENCE_ALIASES, "dereferenceAliases"), + (&rfc4876::PREFERRED_SERVER_LIST, "preferredServerList"), + (&rfc4876::SEARCH_TIME_LIMIT, "searchTimeLimit"), + (&rfc4876::BIND_TIME_LIMIT, "bindTimeLimit"), + (&rfc4876::FOLLOW_REFERRALS, "followReferrals"), + (&rfc4876::AUTHENTICATION_METHOD, "authenticationMethod"), + (&rfc4876::PROFILE_TTL, "profileTTL"), + (&rfc4876::ATTRIBUTE_MAP, "attributeMap"), + (&rfc4876::DUA_CONFIG_PROFILE, "DUAConfigProfile"), + (&rfc5020::ENTRY_DN, "entryDN"), + (&rfc5280::PKCS_9, "pkcs-9"), + (&rfc5280::ID_PKIX, "id-pkix"), + (&rfc5280::ID_PE, "id-pe"), + ( + &rfc5280::ID_PE_AUTHORITY_INFO_ACCESS, + "id-pe-authorityInfoAccess", + ), + ( + &rfc5280::ID_PE_SUBJECT_INFO_ACCESS, + "id-pe-subjectInfoAccess", + ), + (&rfc5280::ID_QT, "id-qt"), + (&rfc5280::ID_QT_CPS, "id-qt-cps"), + (&rfc5280::ID_QT_UNOTICE, "id-qt-unotice"), + (&rfc5280::ID_KP, "id-kp"), + (&rfc5280::ID_KP_SERVER_AUTH, "id-kp-serverAuth"), + (&rfc5280::ID_KP_CLIENT_AUTH, "id-kp-clientAuth"), + (&rfc5280::ID_KP_CODE_SIGNING, "id-kp-codeSigning"), + (&rfc5280::ID_KP_EMAIL_PROTECTION, "id-kp-emailProtection"), + (&rfc5280::ID_KP_TIME_STAMPING, "id-kp-timeStamping"), + (&rfc5280::ID_KP_OCSP_SIGNING, "id-kp-OCSPSigning"), + (&rfc5280::ID_AD, "id-ad"), + (&rfc5280::ID_AD_OCSP, "id-ad-ocsp"), + (&rfc5280::ID_AD_CA_ISSUERS, "id-ad-caIssuers"), + (&rfc5280::ID_AD_TIME_STAMPING, "id-ad-timeStamping"), + (&rfc5280::ID_AD_CA_REPOSITORY, "id-ad-caRepository"), + (&rfc5280::HOLD_INSTRUCTION, "holdInstruction"), + (&rfc5280::ID_HOLDINSTRUCTION_NONE, "id-holdinstruction-none"), + ( + &rfc5280::ID_HOLDINSTRUCTION_CALLISSUER, + "id-holdinstruction-callissuer", + ), + ( + &rfc5280::ID_HOLDINSTRUCTION_REJECT, + "id-holdinstruction-reject", + ), + (&rfc5280::ID_CE, "id-ce"), + ( + &rfc5280::ID_CE_SUBJECT_KEY_IDENTIFIER, + "id-ce-subjectKeyIdentifier", + ), + (&rfc5280::ID_CE_KEY_USAGE, "id-ce-keyUsage"), + ( + &rfc5280::ID_CE_PRIVATE_KEY_USAGE_PERIOD, + "id-ce-privateKeyUsagePeriod", + ), + (&rfc5280::ID_CE_SUBJECT_ALT_NAME, "id-ce-subjectAltName"), + (&rfc5280::ID_CE_ISSUER_ALT_NAME, "id-ce-issuerAltName"), + (&rfc5280::ID_CE_BASIC_CONSTRAINTS, "id-ce-basicConstraints"), + (&rfc5280::ID_CE_CRL_NUMBER, "id-ce-cRLNumber"), + (&rfc5280::ID_CE_CRL_REASONS, "id-ce-cRLReasons"), + ( + &rfc5280::ID_CE_HOLD_INSTRUCTION_CODE, + "id-ce-holdInstructionCode", + ), + (&rfc5280::ID_CE_INVALIDITY_DATE, "id-ce-invalidityDate"), + ( + &rfc5280::ID_CE_DELTA_CRL_INDICATOR, + "id-ce-deltaCRLIndicator", + ), + ( + &rfc5280::ID_CE_ISSUING_DISTRIBUTION_POINT, + "id-ce-issuingDistributionPoint", + ), + ( + &rfc5280::ID_CE_CERTIFICATE_ISSUER, + "id-ce-certificateIssuer", + ), + (&rfc5280::ID_CE_NAME_CONSTRAINTS, "id-ce-nameConstraints"), + ( + &rfc5280::ID_CE_CRL_DISTRIBUTION_POINTS, + "id-ce-cRLDistributionPoints", + ), + ( + &rfc5280::ID_CE_CERTIFICATE_POLICIES, + "id-ce-certificatePolicies", + ), + (&rfc5280::ANY_POLICY, "anyPolicy"), + (&rfc5280::ID_CE_POLICY_MAPPINGS, "id-ce-policyMappings"), + ( + &rfc5280::ID_CE_AUTHORITY_KEY_IDENTIFIER, + "id-ce-authorityKeyIdentifier", + ), + ( + &rfc5280::ID_CE_POLICY_CONSTRAINTS, + "id-ce-policyConstraints", + ), + (&rfc5280::ID_CE_EXT_KEY_USAGE, "id-ce-extKeyUsage"), + (&rfc5280::ANY_EXTENDED_KEY_USAGE, "anyExtendedKeyUsage"), + (&rfc5280::ID_CE_FRESHEST_CRL, "id-ce-freshestCRL"), + (&rfc5280::ID_CE_INHIBIT_ANY_POLICY, "id-ce-inhibitAnyPolicy"), + ( + &rfc5280::ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES, + "id-ce-subjectDirectoryAttributes", + ), + (&rfc5280::ID_AT, "id-at"), + (&rfc5911::ID_PBKDF_2, "id-PBKDF2"), + (&rfc5911::ID_DATA, "id-data"), + (&rfc5911::ID_SIGNED_DATA, "id-signedData"), + (&rfc5911::ID_ENVELOPED_DATA, "id-envelopedData"), + (&rfc5911::ID_DIGESTED_DATA, "id-digestedData"), + (&rfc5911::ID_ENCRYPTED_DATA, "id-encryptedData"), + (&rfc5911::SMIME_CAPABILITIES, "smimeCapabilities"), + (&rfc5911::ID_SMIME, "id-smime"), + (&rfc5911::ID_CT_RECEIPT, "id-ct-receipt"), + (&rfc5911::ID_CT_FIRMWARE_PACKAGE, "id-ct-firmwarePackage"), + ( + &rfc5911::ID_CT_FIRMWARE_LOAD_RECEIPT, + "id-ct-firmwareLoadReceipt", + ), + ( + &rfc5911::ID_CT_FIRMWARE_LOAD_ERROR, + "id-ct-firmwareLoadError", + ), + (&rfc5911::ID_CT_AUTH_DATA, "id-ct-authData"), + ( + &rfc5911::ID_CT_AUTH_ENVELOPED_DATA, + "id-ct-authEnvelopedData", + ), + (&rfc5911::ID_CT_CONTENT_INFO, "id-ct-contentInfo"), + (&rfc5911::ID_CAP, "id-cap"), + ( + &rfc5911::ID_CAP_PREFER_BINARY_INSIDE, + "id-cap-preferBinaryInside", + ), + (&rfc5911::ID_AA, "id-aa"), + (&rfc5911::ID_AA_RECEIPT_REQUEST, "id-aa-receiptRequest"), + (&rfc5911::ID_AA_CONTENT_REFERENCE, "id-aa-contentReference"), + (&rfc5911::ID_AA_ENCRYP_KEY_PREF, "id-aa-encrypKeyPref"), + ( + &rfc5911::ID_AA_SIGNING_CERTIFICATE, + "id-aa-signingCertificate", + ), + (&rfc5911::ID_AA_SECURITY_LABEL, "id-aa-securityLabel"), + (&rfc5911::ID_AA_ML_EXPAND_HISTORY, "id-aa-mlExpandHistory"), + ( + &rfc5911::ID_AA_FIRMWARE_PACKAGE_ID, + "id-aa-firmwarePackageID", + ), + ( + &rfc5911::ID_AA_TARGET_HARDWARE_I_DS, + "id-aa-targetHardwareIDs", + ), + (&rfc5911::ID_AA_DECRYPT_KEY_ID, "id-aa-decryptKeyID"), + (&rfc5911::ID_AA_IMPL_CRYPTO_ALGS, "id-aa-implCryptoAlgs"), + ( + &rfc5911::ID_AA_WRAPPED_FIRMWARE_KEY, + "id-aa-wrappedFirmwareKey", + ), + (&rfc5911::ID_AA_CONTENT_HINT, "id-aa-contentHint"), + ( + &rfc5911::ID_AA_COMMUNITY_IDENTIFIERS, + "id-aa-communityIdentifiers", + ), + ( + &rfc5911::ID_AA_FIRMWARE_PACKAGE_INFO, + "id-aa-firmwarePackageInfo", + ), + (&rfc5911::ID_AA_IMPL_COMPRESS_ALGS, "id-aa-implCompressAlgs"), + ( + &rfc5911::ID_AA_SIGNING_CERTIFICATE_V_2, + "id-aa-signingCertificateV2", + ), + (&rfc5911::ID_AA_ER_INTERNAL, "id-aa-er-internal"), + (&rfc5911::ID_AA_MSG_SIG_DIGEST, "id-aa-msgSigDigest"), + (&rfc5911::ID_AA_ER_EXTERNAL, "id-aa-er-external"), + ( + &rfc5911::ID_AA_CONTENT_IDENTIFIER, + "id-aa-contentIdentifier", + ), + (&rfc5911::ID_AA_EQUIVALENT_LABELS, "id-aa-equivalentLabels"), + (&rfc5911::ID_ALG_SSDH, "id-alg-SSDH"), + (&rfc5911::ID_ALG_ESDH, "id-alg-ESDH"), + (&rfc5911::ID_ALG_CMS_3_DE_SWRAP, "id-alg-CMS3DESwrap"), + (&rfc5911::ID_ALG_CMSRC_2_WRAP, "id-alg-CMSRC2wrap"), + (&rfc5911::ID_SKD, "id-skd"), + (&rfc5911::ID_SKD_GL_USE_KEK, "id-skd-glUseKEK"), + (&rfc5911::ID_SKD_GLA_QUERY_REQUEST, "id-skd-glaQueryRequest"), + ( + &rfc5911::ID_SKD_GLA_QUERY_RESPONSE, + "id-skd-glaQueryResponse", + ), + (&rfc5911::ID_SKD_GL_PROVIDE_CERT, "id-skd-glProvideCert"), + (&rfc5911::ID_SKD_GL_MANAGE_CERT, "id-skd-glManageCert"), + (&rfc5911::ID_SKD_GL_KEY, "id-skd-glKey"), + (&rfc5911::ID_SKD_GL_DELETE, "id-skd-glDelete"), + (&rfc5911::ID_SKD_GL_ADD_MEMBER, "id-skd-glAddMember"), + (&rfc5911::ID_SKD_GL_DELETE_MEMBER, "id-skd-glDeleteMember"), + (&rfc5911::ID_SKD_GL_REKEY, "id-skd-glRekey"), + (&rfc5911::ID_SKD_GL_ADD_OWNER, "id-skd-glAddOwner"), + (&rfc5911::ID_SKD_GL_REMOVE_OWNER, "id-skd-glRemoveOwner"), + (&rfc5911::ID_SKD_GL_KEY_COMPROMISE, "id-skd-glKeyCompromise"), + (&rfc5911::ID_SKD_GLK_REFRESH, "id-skd-glkRefresh"), + (&rfc5911::ID_CONTENT_TYPE, "id-contentType"), + (&rfc5911::ID_MESSAGE_DIGEST, "id-messageDigest"), + (&rfc5911::ID_SIGNING_TIME, "id-signingTime"), + (&rfc5911::ID_COUNTERSIGNATURE, "id-countersignature"), + (&rfc5911::RC_2_CBC, "rc2-cbc"), + (&rfc5911::DES_EDE_3_CBC, "des-ede3-cbc"), + (&rfc5911::LTANS, "ltans"), + (&rfc5911::ID_CET_SKD_FAIL_INFO, "id-cet-skdFailInfo"), + (&rfc5911::ID_CMC_GLA_RR, "id-cmc-glaRR"), + ( + &rfc5911::ID_CMC_GLA_SKD_ALG_REQUEST, + "id-cmc-gla-skdAlgRequest", + ), + ( + &rfc5911::ID_CMC_GLA_SKD_ALG_RESPONSE, + "id-cmc-gla-skdAlgResponse", + ), + ( + &rfc5911::ID_ON_HARDWARE_MODULE_NAME, + "id-on-hardwareModuleName", + ), + (&rfc5911::HMAC_SHA_1, "hMAC-SHA1"), + (&rfc5911::AES, "aes"), + (&rfc5911::ID_AES_128_CBC, "id-aes128-CBC"), + (&rfc5911::ID_AES_192_CBC, "id-aes192-CBC"), + (&rfc5911::ID_AES_192_WRAP, "id-aes192-wrap"), + (&rfc5911::ID_AES_192_GCM, "id-aes192-GCM"), + (&rfc5911::ID_AES_192_CCM, "id-aes192-CCM"), + (&rfc5911::ID_AES_256_CBC, "id-aes256-CBC"), + (&rfc5911::ID_AES_256_WRAP, "id-aes256-wrap"), + (&rfc5911::ID_AES_256_GCM, "id-aes256-GCM"), + (&rfc5911::ID_AES_256_CCM, "id-aes256-CCM"), + (&rfc5911::ID_AES_128_WRAP, "id-aes128-wrap"), + (&rfc5911::ID_AES_128_GCM, "id-aes128-GCM"), + (&rfc5911::ID_AES_128_CCM, "id-aes128-CCM"), + (&rfc5912::ID_DSA, "id-dsa"), + (&rfc5912::DSA_WITH_SHA_1, "dsa-with-sha1"), + (&rfc5912::ID_EC_PUBLIC_KEY, "id-ecPublicKey"), + (&rfc5912::SECP_256_R_1, "secp256r1"), + (&rfc5912::ECDSA_WITH_SHA_224, "ecdsa-with-SHA224"), + (&rfc5912::ECDSA_WITH_SHA_256, "ecdsa-with-SHA256"), + (&rfc5912::ECDSA_WITH_SHA_384, "ecdsa-with-SHA384"), + (&rfc5912::ECDSA_WITH_SHA_512, "ecdsa-with-SHA512"), + (&rfc5912::DHPUBLICNUMBER, "dhpublicnumber"), + (&rfc5912::ID_PASSWORD_BASED_MAC, "id-PasswordBasedMac"), + (&rfc5912::ID_DH_BASED_MAC, "id-DHBasedMac"), + (&rfc5912::PKCS_1, "pkcs-1"), + (&rfc5912::RSA_ENCRYPTION, "rsaEncryption"), + (&rfc5912::ID_RSASSA_PSS, "id-RSASSA-PSS"), + ( + &rfc5912::SHA_256_WITH_RSA_ENCRYPTION, + "sha256WithRSAEncryption", + ), + ( + &rfc5912::SHA_384_WITH_RSA_ENCRYPTION, + "sha384WithRSAEncryption", + ), + ( + &rfc5912::SHA_512_WITH_RSA_ENCRYPTION, + "sha512WithRSAEncryption", + ), + ( + &rfc5912::SHA_224_WITH_RSA_ENCRYPTION, + "sha224WithRSAEncryption", + ), + (&rfc5912::MD_2_WITH_RSA_ENCRYPTION, "md2WithRSAEncryption"), + (&rfc5912::MD_5_WITH_RSA_ENCRYPTION, "md5WithRSAEncryption"), + (&rfc5912::SHA_1_WITH_RSA_ENCRYPTION, "sha1WithRSAEncryption"), + (&rfc5912::ID_RSAES_OAEP, "id-RSAES-OAEP"), + (&rfc5912::ID_MGF_1, "id-mgf1"), + (&rfc5912::ID_P_SPECIFIED, "id-pSpecified"), + (&rfc5912::PKCS_9, "pkcs-9"), + (&rfc5912::ID_EXTENSION_REQ, "id-ExtensionReq"), + (&rfc5912::ID_SMIME, "id-smime"), + (&rfc5912::ID_CT, "id-ct"), + ( + &rfc5912::ID_CT_SCVP_CERT_VAL_REQUEST, + "id-ct-scvp-certValRequest", + ), + ( + &rfc5912::ID_CT_SCVP_CERT_VAL_RESPONSE, + "id-ct-scvp-certValResponse", + ), + ( + &rfc5912::ID_CT_SCVP_VAL_POL_REQUEST, + "id-ct-scvp-valPolRequest", + ), + ( + &rfc5912::ID_CT_SCVP_VAL_POL_RESPONSE, + "id-ct-scvp-valPolResponse", + ), + (&rfc5912::ID_CT_ENC_KEY_WITH_ID, "id-ct-encKeyWithID"), + (&rfc5912::ID_AA, "id-aa"), + (&rfc5912::ID_AA_CMC_UNSIGNED_DATA, "id-aa-cmc-unsignedData"), + (&rfc5912::ID_MD_2, "id-md2"), + (&rfc5912::ID_MD_5, "id-md5"), + (&rfc5912::SECT_163_K_1, "sect163k1"), + (&rfc5912::SECT_163_R_2, "sect163r2"), + (&rfc5912::SECT_283_K_1, "sect283k1"), + (&rfc5912::SECT_283_R_1, "sect283r1"), + (&rfc5912::SECT_233_K_1, "sect233k1"), + (&rfc5912::SECT_233_R_1, "sect233r1"), + (&rfc5912::SECP_224_R_1, "secp224r1"), + (&rfc5912::SECP_384_R_1, "secp384r1"), + (&rfc5912::SECP_521_R_1, "secp521r1"), + (&rfc5912::SECT_409_K_1, "sect409k1"), + (&rfc5912::SECT_409_R_1, "sect409r1"), + (&rfc5912::SECT_571_K_1, "sect571k1"), + (&rfc5912::SECT_571_R_1, "sect571r1"), + (&rfc5912::ID_EC_DH, "id-ecDH"), + (&rfc5912::ID_EC_MQV, "id-ecMQV"), + (&rfc5912::ID_SHA_1, "id-sha1"), + (&rfc5912::ID_PKIX, "id-pkix"), + (&rfc5912::ID_PE, "id-pe"), + ( + &rfc5912::ID_PE_AUTHORITY_INFO_ACCESS, + "id-pe-authorityInfoAccess", + ), + (&rfc5912::ID_PE_AC_PROXYING, "id-pe-ac-proxying"), + ( + &rfc5912::ID_PE_SUBJECT_INFO_ACCESS, + "id-pe-subjectInfoAccess", + ), + (&rfc5912::ID_PE_AC_AUDIT_IDENTITY, "id-pe-ac-auditIdentity"), + (&rfc5912::ID_PE_AA_CONTROLS, "id-pe-aaControls"), + (&rfc5912::ID_ACA, "id-aca"), + ( + &rfc5912::ID_ACA_AUTHENTICATION_INFO, + "id-aca-authenticationInfo", + ), + (&rfc5912::ID_ACA_ACCESS_IDENTITY, "id-aca-accessIdentity"), + ( + &rfc5912::ID_ACA_CHARGING_IDENTITY, + "id-aca-chargingIdentity", + ), + (&rfc5912::ID_ACA_GROUP, "id-aca-group"), + (&rfc5912::ID_ACA_ENC_ATTRS, "id-aca-encAttrs"), + (&rfc5912::ID_CCT, "id-cct"), + (&rfc5912::ID_CCT_PKI_DATA, "id-cct-PKIData"), + (&rfc5912::ID_CCT_PKI_RESPONSE, "id-cct-PKIResponse"), + (&rfc5912::ID_STC, "id-stc"), + (&rfc5912::ID_STC_BUILD_PKC_PATH, "id-stc-build-pkc-path"), + ( + &rfc5912::ID_STC_BUILD_VALID_PKC_PATH, + "id-stc-build-valid-pkc-path", + ), + ( + &rfc5912::ID_STC_BUILD_STATUS_CHECKED_PKC_PATH, + "id-stc-build-status-checked-pkc-path", + ), + (&rfc5912::ID_STC_BUILD_AA_PATH, "id-stc-build-aa-path"), + ( + &rfc5912::ID_STC_BUILD_VALID_AA_PATH, + "id-stc-build-valid-aa-path", + ), + ( + &rfc5912::ID_STC_BUILD_STATUS_CHECKED_AA_PATH, + "id-stc-build-status-checked-aa-path", + ), + ( + &rfc5912::ID_STC_STATUS_CHECK_AC_AND_BUILD_STATUS_CHECKED_AA_PATH, + "id-stc-status-check-ac-and-build-status-checked-aa-path", + ), + (&rfc5912::ID_SWB, "id-swb"), + ( + &rfc5912::ID_SWB_PKC_BEST_CERT_PATH, + "id-swb-pkc-best-cert-path", + ), + (&rfc5912::ID_SWB_PKC_CERT, "id-swb-pkc-cert"), + (&rfc5912::ID_SWB_AC_CERT, "id-swb-ac-cert"), + ( + &rfc5912::ID_SWB_PKC_ALL_CERT_PATHS, + "id-swb-pkc-all-cert-paths", + ), + ( + &rfc5912::ID_SWB_PKC_EE_REVOCATION_INFO, + "id-swb-pkc-ee-revocation-info", + ), + ( + &rfc5912::ID_SWB_PKC_C_AS_REVOCATION_INFO, + "id-swb-pkc-CAs-revocation-info", + ), + ( + &rfc5912::ID_SWB_PKC_REVOCATION_INFO, + "id-swb-pkc-revocation-info", + ), + ( + &rfc5912::ID_SWB_PKC_PUBLIC_KEY_INFO, + "id-swb-pkc-public-key-info", + ), + (&rfc5912::ID_SWB_AA_CERT_PATH, "id-swb-aa-cert-path"), + ( + &rfc5912::ID_SWB_AA_REVOCATION_INFO, + "id-swb-aa-revocation-info", + ), + ( + &rfc5912::ID_SWB_AC_REVOCATION_INFO, + "id-swb-ac-revocation-info", + ), + ( + &rfc5912::ID_SWB_RELAYED_RESPONSES, + "id-swb-relayed-responses", + ), + (&rfc5912::ID_SVP, "id-svp"), + ( + &rfc5912::ID_SVP_DEFAULT_VAL_POLICY, + "id-svp-defaultValPolicy", + ), + (&rfc5912::ID_SVP_NAME_VAL_ALG, "id-svp-nameValAlg"), + (&rfc5912::ID_SVP_BASIC_VAL_ALG, "id-svp-basicValAlg"), + (&rfc5912::NAME_COMP_ALG_SET, "NameCompAlgSet"), + (&rfc5912::ID_NVA_DN_COMP_ALG, "id-nva-dnCompAlg"), + (&rfc5912::ID_QT, "id-qt"), + (&rfc5912::ID_QT_CPS, "id-qt-cps"), + (&rfc5912::ID_QT_UNOTICE, "id-qt-unotice"), + (&rfc5912::ID_KP, "id-kp"), + (&rfc5912::ID_KP_SERVER_AUTH, "id-kp-serverAuth"), + (&rfc5912::ID_KP_SCVP_SERVER, "id-kp-scvpServer"), + (&rfc5912::ID_KP_SCVP_CLIENT, "id-kp-scvpClient"), + (&rfc5912::ID_KP_CLIENT_AUTH, "id-kp-clientAuth"), + (&rfc5912::ID_KP_CODE_SIGNING, "id-kp-codeSigning"), + (&rfc5912::ID_KP_EMAIL_PROTECTION, "id-kp-emailProtection"), + (&rfc5912::ID_KP_TIME_STAMPING, "id-kp-timeStamping"), + (&rfc5912::ID_KP_OCSP_SIGNING, "id-kp-OCSPSigning"), + (&rfc5912::ID_IT, "id-it"), + (&rfc5912::ID_IT_CA_PROT_ENC_CERT, "id-it-caProtEncCert"), + (&rfc5912::ID_IT_KEY_PAIR_PARAM_REQ, "id-it-keyPairParamReq"), + (&rfc5912::ID_IT_KEY_PAIR_PARAM_REP, "id-it-keyPairParamRep"), + (&rfc5912::ID_IT_REV_PASSPHRASE, "id-it-revPassphrase"), + (&rfc5912::ID_IT_IMPLICIT_CONFIRM, "id-it-implicitConfirm"), + (&rfc5912::ID_IT_CONFIRM_WAIT_TIME, "id-it-confirmWaitTime"), + (&rfc5912::ID_IT_ORIG_PKI_MESSAGE, "id-it-origPKIMessage"), + (&rfc5912::ID_IT_SUPP_LANG_TAGS, "id-it-suppLangTags"), + ( + &rfc5912::ID_IT_SIGN_KEY_PAIR_TYPES, + "id-it-signKeyPairTypes", + ), + (&rfc5912::ID_IT_ENC_KEY_PAIR_TYPES, "id-it-encKeyPairTypes"), + (&rfc5912::ID_IT_PREFERRED_SYMM_ALG, "id-it-preferredSymmAlg"), + (&rfc5912::ID_IT_CA_KEY_UPDATE_INFO, "id-it-caKeyUpdateInfo"), + (&rfc5912::ID_IT_CURRENT_CRL, "id-it-currentCRL"), + (&rfc5912::ID_IT_UNSUPPORTED_OI_DS, "id-it-unsupportedOIDs"), + (&rfc5912::ID_AD, "id-ad"), + (&rfc5912::ID_AD_OCSP, "id-ad-ocsp"), + (&rfc5912::ID_AD_CA_ISSUERS, "id-ad-caIssuers"), + (&rfc5912::ID_AD_TIME_STAMPING, "id-ad-timeStamping"), + (&rfc5912::ID_AD_CA_REPOSITORY, "id-ad-caRepository"), + (&rfc5912::ID_PKIP, "id-pkip"), + (&rfc5912::ID_REG_CTRL, "id-regCtrl"), + (&rfc5912::ID_REG_CTRL_REG_TOKEN, "id-regCtrl-regToken"), + ( + &rfc5912::ID_REG_CTRL_AUTHENTICATOR, + "id-regCtrl-authenticator", + ), + ( + &rfc5912::ID_REG_CTRL_PKI_PUBLICATION_INFO, + "id-regCtrl-pkiPublicationInfo", + ), + ( + &rfc5912::ID_REG_CTRL_PKI_ARCHIVE_OPTIONS, + "id-regCtrl-pkiArchiveOptions", + ), + (&rfc5912::ID_REG_CTRL_OLD_CERT_ID, "id-regCtrl-oldCertID"), + ( + &rfc5912::ID_REG_CTRL_PROTOCOL_ENCR_KEY, + "id-regCtrl-protocolEncrKey", + ), + (&rfc5912::ID_REG_INFO, "id-regInfo"), + (&rfc5912::ID_REG_INFO_UTF_8_PAIRS, "id-regInfo-utf8Pairs"), + (&rfc5912::ID_REG_INFO_CERT_REQ, "id-regInfo-certReq"), + (&rfc5912::ID_ALG_NO_SIGNATURE, "id-alg-noSignature"), + (&rfc5912::ID_CMC, "id-cmc"), + (&rfc5912::ID_CMC_STATUS_INFO, "id-cmc-statusInfo"), + (&rfc5912::ID_CMC_DECRYPTED_POP, "id-cmc-decryptedPOP"), + (&rfc5912::ID_CMC_LRA_POP_WITNESS, "id-cmc-lraPOPWitness"), + (&rfc5912::ID_CMC_GET_CERT, "id-cmc-getCert"), + (&rfc5912::ID_CMC_GET_CRL, "id-cmc-getCRL"), + (&rfc5912::ID_CMC_REVOKE_REQUEST, "id-cmc-revokeRequest"), + (&rfc5912::ID_CMC_REG_INFO, "id-cmc-regInfo"), + (&rfc5912::ID_CMC_RESPONSE_INFO, "id-cmc-responseInfo"), + (&rfc5912::ID_CMC_IDENTIFICATION, "id-cmc-identification"), + (&rfc5912::ID_CMC_QUERY_PENDING, "id-cmc-queryPending"), + (&rfc5912::ID_CMC_POP_LINK_RANDOM, "id-cmc-popLinkRandom"), + (&rfc5912::ID_CMC_POP_LINK_WITNESS, "id-cmc-popLinkWitness"), + ( + &rfc5912::ID_CMC_CONFIRM_CERT_ACCEPTANCE, + "id-cmc-confirmCertAcceptance", + ), + (&rfc5912::ID_CMC_STATUS_INFO_V_2, "id-cmc-statusInfoV2"), + (&rfc5912::ID_CMC_TRUSTED_ANCHORS, "id-cmc-trustedAnchors"), + (&rfc5912::ID_CMC_AUTH_DATA, "id-cmc-authData"), + (&rfc5912::ID_CMC_BATCH_REQUESTS, "id-cmc-batchRequests"), + (&rfc5912::ID_CMC_BATCH_RESPONSES, "id-cmc-batchResponses"), + (&rfc5912::ID_CMC_IDENTITY_PROOF, "id-cmc-identityProof"), + (&rfc5912::ID_CMC_PUBLISH_CERT, "id-cmc-publishCert"), + (&rfc5912::ID_CMC_MOD_CERT_TEMPLATE, "id-cmc-modCertTemplate"), + ( + &rfc5912::ID_CMC_CONTROL_PROCESSED, + "id-cmc-controlProcessed", + ), + ( + &rfc5912::ID_CMC_IDENTITY_PROOF_V_2, + "id-cmc-identityProofV2", + ), + ( + &rfc5912::ID_CMC_POP_LINK_WITNESS_V_2, + "id-cmc-popLinkWitnessV2", + ), + (&rfc5912::ID_CMC_DATA_RETURN, "id-cmc-dataReturn"), + (&rfc5912::ID_CMC_TRANSACTION_ID, "id-cmc-transactionId"), + (&rfc5912::ID_CMC_SENDER_NONCE, "id-cmc-senderNonce"), + (&rfc5912::ID_CMC_RECIPIENT_NONCE, "id-cmc-recipientNonce"), + (&rfc5912::ID_CMC_ADD_EXTENSIONS, "id-cmc-addExtensions"), + (&rfc5912::ID_CMC_ENCRYPTED_POP, "id-cmc-encryptedPOP"), + ( + &rfc5912::ID_KEY_EXCHANGE_ALGORITHM, + "id-keyExchangeAlgorithm", + ), + (&rfc5912::ID_SHA_256, "id-sha256"), + (&rfc5912::ID_SHA_384, "id-sha384"), + (&rfc5912::ID_SHA_512, "id-sha512"), + (&rfc5912::ID_SHA_224, "id-sha224"), + (&rfc5912::DSA_WITH_SHA_224, "dsa-with-sha224"), + (&rfc5912::DSA_WITH_SHA_256, "dsa-with-sha256"), + (&rfc5912::HOLD_INSTRUCTION, "holdInstruction"), + (&rfc5912::ID_HOLDINSTRUCTION_NONE, "id-holdinstruction-none"), + ( + &rfc5912::ID_HOLDINSTRUCTION_CALLISSUER, + "id-holdinstruction-callissuer", + ), + ( + &rfc5912::ID_HOLDINSTRUCTION_REJECT, + "id-holdinstruction-reject", + ), + (&rfc5912::ID_CE, "id-ce"), + ( + &rfc5912::ID_CE_SUBJECT_KEY_IDENTIFIER, + "id-ce-subjectKeyIdentifier", + ), + (&rfc5912::ID_CE_KEY_USAGE, "id-ce-keyUsage"), + ( + &rfc5912::ID_CE_PRIVATE_KEY_USAGE_PERIOD, + "id-ce-privateKeyUsagePeriod", + ), + (&rfc5912::ID_CE_SUBJECT_ALT_NAME, "id-ce-subjectAltName"), + (&rfc5912::ID_CE_ISSUER_ALT_NAME, "id-ce-issuerAltName"), + (&rfc5912::ID_CE_BASIC_CONSTRAINTS, "id-ce-basicConstraints"), + (&rfc5912::ID_CE_CRL_NUMBER, "id-ce-cRLNumber"), + (&rfc5912::ID_CE_CRL_REASONS, "id-ce-cRLReasons"), + ( + &rfc5912::ID_CE_HOLD_INSTRUCTION_CODE, + "id-ce-holdInstructionCode", + ), + (&rfc5912::ID_CE_INVALIDITY_DATE, "id-ce-invalidityDate"), + ( + &rfc5912::ID_CE_DELTA_CRL_INDICATOR, + "id-ce-deltaCRLIndicator", + ), + ( + &rfc5912::ID_CE_ISSUING_DISTRIBUTION_POINT, + "id-ce-issuingDistributionPoint", + ), + ( + &rfc5912::ID_CE_CERTIFICATE_ISSUER, + "id-ce-certificateIssuer", + ), + (&rfc5912::ID_CE_NAME_CONSTRAINTS, "id-ce-nameConstraints"), + ( + &rfc5912::ID_CE_CRL_DISTRIBUTION_POINTS, + "id-ce-cRLDistributionPoints", + ), + ( + &rfc5912::ID_CE_CERTIFICATE_POLICIES, + "id-ce-certificatePolicies", + ), + (&rfc5912::ID_CE_POLICY_MAPPINGS, "id-ce-policyMappings"), + ( + &rfc5912::ID_CE_AUTHORITY_KEY_IDENTIFIER, + "id-ce-authorityKeyIdentifier", + ), + ( + &rfc5912::ID_CE_POLICY_CONSTRAINTS, + "id-ce-policyConstraints", + ), + (&rfc5912::ID_CE_EXT_KEY_USAGE, "id-ce-extKeyUsage"), + (&rfc5912::ANY_EXTENDED_KEY_USAGE, "anyExtendedKeyUsage"), + (&rfc5912::ID_CE_FRESHEST_CRL, "id-ce-freshestCRL"), + (&rfc5912::ID_CE_INHIBIT_ANY_POLICY, "id-ce-inhibitAnyPolicy"), + ( + &rfc5912::ID_CE_TARGET_INFORMATION, + "id-ce-targetInformation", + ), + (&rfc5912::ID_CE_NO_REV_AVAIL, "id-ce-noRevAvail"), + ( + &rfc5912::ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES, + "id-ce-subjectDirectoryAttributes", + ), + (&rfc5912::ID_AT, "id-at"), + (&rfc5912::ID_AT_ROLE, "id-at-role"), + (&rfc6109::LDIF_LOCATION_URL_OBJECT, "LDIFLocationURLObject"), + (&rfc6109::PROVIDER, "provider"), + ( + &rfc6109::PROVIDER_CERTIFICATE_HASH, + "providerCertificateHash", + ), + (&rfc6109::PROVIDER_CERTIFICATE, "providerCertificate"), + (&rfc6109::PROVIDER_NAME, "providerName"), + (&rfc6109::MAIL_RECEIPT, "mailReceipt"), + (&rfc6109::MANAGED_DOMAINS, "managedDomains"), + (&rfc6109::LDIF_LOCATION_URL, "LDIFLocationURL"), + (&rfc6109::PROVIDER_UNIT, "providerUnit"), + (&rfc6268::RSADSI, "rsadsi"), + (&rfc6268::ID_DATA, "id-data"), + (&rfc6268::ID_SIGNED_DATA, "id-signedData"), + (&rfc6268::ID_ENVELOPED_DATA, "id-envelopedData"), + (&rfc6268::ID_DIGESTED_DATA, "id-digestedData"), + (&rfc6268::ID_ENCRYPTED_DATA, "id-encryptedData"), + ( + &rfc6268::ID_CT_CONTENT_COLLECTION, + "id-ct-contentCollection", + ), + (&rfc6268::ID_CT_AUTH_DATA, "id-ct-authData"), + (&rfc6268::ID_CT_CONTENT_WITH_ATTRS, "id-ct-contentWithAttrs"), + ( + &rfc6268::ID_CT_AUTH_ENVELOPED_DATA, + "id-ct-authEnvelopedData", + ), + (&rfc6268::ID_CT_CONTENT_INFO, "id-ct-contentInfo"), + (&rfc6268::ID_CT_COMPRESSED_DATA, "id-ct-compressedData"), + ( + &rfc6268::ID_AA_BINARY_SIGNING_TIME, + "id-aa-binarySigningTime", + ), + (&rfc6268::ID_ALG_ZLIB_COMPRESS, "id-alg-zlibCompress"), + ( + &rfc6268::ID_AA_MULTIPLE_SIGNATURES, + "id-aa-multipleSignatures", + ), + (&rfc6268::ID_CONTENT_TYPE, "id-contentType"), + (&rfc6268::ID_MESSAGE_DIGEST, "id-messageDigest"), + (&rfc6268::ID_SIGNING_TIME, "id-signingTime"), + (&rfc6268::ID_COUNTERSIGNATURE, "id-countersignature"), + (&rfc6268::DIGEST_ALGORITHM, "digestAlgorithm"), + (&rfc6268::ID_HMAC_WITH_SHA_384, "id-hmacWithSHA384"), + (&rfc6268::ID_HMAC_WITH_SHA_512, "id-hmacWithSHA512"), + (&rfc6268::ID_HMAC_WITH_SHA_224, "id-hmacWithSHA224"), + (&rfc6268::ID_HMAC_WITH_SHA_256, "id-hmacWithSHA256"), + (&rfc6960::ID_PKIX_OCSP, "id-pkix-ocsp"), + (&rfc6960::ID_PKIX_OCSP_BASIC, "id-pkix-ocsp-basic"), + (&rfc6960::ID_PKIX_OCSP_NONCE, "id-pkix-ocsp-nonce"), + (&rfc6960::ID_PKIX_OCSP_CRL, "id-pkix-ocsp-crl"), + (&rfc6960::ID_PKIX_OCSP_RESPONSE, "id-pkix-ocsp-response"), + (&rfc6960::ID_PKIX_OCSP_NOCHECK, "id-pkix-ocsp-nocheck"), + ( + &rfc6960::ID_PKIX_OCSP_ARCHIVE_CUTOFF, + "id-pkix-ocsp-archive-cutoff", + ), + ( + &rfc6960::ID_PKIX_OCSP_SERVICE_LOCATOR, + "id-pkix-ocsp-service-locator", + ), + ( + &rfc6960::ID_PKIX_OCSP_PREF_SIG_ALGS, + "id-pkix-ocsp-pref-sig-algs", + ), + ( + &rfc6960::ID_PKIX_OCSP_EXTENDED_REVOKE, + "id-pkix-ocsp-extended-revoke", + ), + (&rfc6962::GOOGLE, "google"), + (&rfc6962::CT_PRECERT_SCTS, "ct-precert-scts"), + (&rfc6962::CT_PRECERT_POISON, "ct-precert-poison"), + (&rfc6962::CT_PRECERT_SIGNING_CERT, "ct-precert-signing-cert"), + (&rfc7107::ID_SMIME, "id-smime"), + (&rfc7107::ID_MOD, "id-mod"), + (&rfc7107::ID_CT, "id-ct"), + (&rfc7107::ID_EIT, "id-eit"), + (&rfc7107::ID_CAP, "id-cap"), + (&rfc7107::ID_PSKC, "id-pskc"), + (&rfc7107::ID_AA, "id-aa"), + (&rfc7107::ID_ALG, "id-alg"), + (&rfc7107::ID_CD, "id-cd"), + (&rfc7107::ID_SPQ, "id-spq"), + (&rfc7107::ID_CTI, "id-cti"), + (&rfc7107::ID_TSP, "id-tsp"), + (&rfc7107::ID_SKD, "id-skd"), + (&rfc7107::ID_STI, "id-sti"), + (&rfc7299::ID_PKIX, "id-pkix"), + (&rfc7299::ID_MOD, "id-mod"), + (&rfc7299::ID_PE, "id-pe"), + (&rfc7299::ID_ACA, "id-aca"), + (&rfc7299::ID_QCS, "id-qcs"), + (&rfc7299::ID_CCT, "id-cct"), + (&rfc7299::ID_TEST, "id-TEST"), + (&rfc7299::ID_CP, "id-cp"), + (&rfc7299::ID_CET, "id-cet"), + (&rfc7299::ID_RI, "id-ri"), + (&rfc7299::ID_SCT, "id-sct"), + (&rfc7299::ID_SWB, "id-swb"), + (&rfc7299::ID_SVP, "id-svp"), + (&rfc7299::ID_NVAE, "id-nvae"), + (&rfc7299::ID_BVAE, "id-bvae"), + (&rfc7299::ID_DNVAE, "id-dnvae"), + (&rfc7299::ID_QT, "id-qt"), + (&rfc7299::ID_LOGO, "id-logo"), + (&rfc7299::ID_PPL, "id-ppl"), + (&rfc7299::ID_MR, "id-mr"), + (&rfc7299::ID_SKIS, "id-skis"), + (&rfc7299::ID_KP, "id-kp"), + (&rfc7299::ID_IT, "id-it"), + (&rfc7299::ID_AD, "id-ad"), + (&rfc7299::ID_PKIX_OCSP, "id-pkix-ocsp"), + (&rfc7299::ID_PKIP, "id-pkip"), + (&rfc7299::ID_REG_CTRL, "id-regCtrl"), + (&rfc7299::ID_REG_INFO, "id-regInfo"), + (&rfc7299::ID_ALG, "id-alg"), + (&rfc7299::ID_CMC, "id-cmc"), + (&rfc7299::ID_CMC_GLA_RR, "id-cmc-glaRR"), + (&rfc7299::ID_ON, "id-on"), + (&rfc7299::ID_PDA, "id-pda"), + (&rfc7532::FEDFS_UUID, "fedfsUuid"), + (&rfc7532::FEDFS_FSL_PORT, "fedfsFslPort"), + (&rfc7532::FEDFS_NFS_PATH, "fedfsNfsPath"), + ( + &rfc7532::FEDFS_NSDB_CONTAINER_INFO, + "fedfsNsdbContainerInfo", + ), + (&rfc7532::FEDFS_FSN, "fedfsFsn"), + (&rfc7532::FEDFS_FSL, "fedfsFsl"), + (&rfc7532::FEDFS_NFS_FSL, "fedfsNfsFsl"), + (&rfc7532::FEDFS_NFS_MAJOR_VER, "fedfsNfsMajorVer"), + (&rfc7532::FEDFS_NFS_MINOR_VER, "fedfsNfsMinorVer"), + (&rfc7532::FEDFS_NFS_CURRENCY, "fedfsNfsCurrency"), + ( + &rfc7532::FEDFS_NFS_GEN_FLAG_WRITABLE, + "fedfsNfsGenFlagWritable", + ), + (&rfc7532::FEDFS_NFS_GEN_FLAG_GOING, "fedfsNfsGenFlagGoing"), + (&rfc7532::FEDFS_NFS_GEN_FLAG_SPLIT, "fedfsNfsGenFlagSplit"), + (&rfc7532::FEDFS_NFS_TRANS_FLAG_RDMA, "fedfsNfsTransFlagRdma"), + (&rfc7532::FEDFS_NFS_CLASS_SIMUL, "fedfsNfsClassSimul"), + (&rfc7532::FEDFS_NFS_CLASS_HANDLE, "fedfsNfsClassHandle"), + (&rfc7532::FEDFS_FSL_TTL, "fedfsFslTTL"), + (&rfc7532::FEDFS_NFS_CLASS_FILEID, "fedfsNfsClassFileid"), + (&rfc7532::FEDFS_NFS_CLASS_WRITEVER, "fedfsNfsClassWritever"), + (&rfc7532::FEDFS_NFS_CLASS_CHANGE, "fedfsNfsClassChange"), + (&rfc7532::FEDFS_NFS_CLASS_READDIR, "fedfsNfsClassReaddir"), + (&rfc7532::FEDFS_NFS_READ_RANK, "fedfsNfsReadRank"), + (&rfc7532::FEDFS_NFS_READ_ORDER, "fedfsNfsReadOrder"), + (&rfc7532::FEDFS_NFS_WRITE_RANK, "fedfsNfsWriteRank"), + (&rfc7532::FEDFS_NFS_WRITE_ORDER, "fedfsNfsWriteOrder"), + (&rfc7532::FEDFS_NFS_VAR_SUB, "fedfsNfsVarSub"), + (&rfc7532::FEDFS_NFS_VALID_FOR, "fedfsNfsValidFor"), + (&rfc7532::FEDFS_ANNOTATION, "fedfsAnnotation"), + (&rfc7532::FEDFS_NFS_URI, "fedfsNfsURI"), + (&rfc7532::FEDFS_DESCR, "fedfsDescr"), + (&rfc7532::FEDFS_NCE_DN, "fedfsNceDN"), + (&rfc7532::FEDFS_FSN_TTL, "fedfsFsnTTL"), + (&rfc7532::FEDFS_NET_ADDR, "fedfsNetAddr"), + (&rfc7532::FEDFS_NET_PORT, "fedfsNetPort"), + (&rfc7532::FEDFS_FSN_UUID, "fedfsFsnUuid"), + (&rfc7532::FEDFS_NSDB_NAME, "fedfsNsdbName"), + (&rfc7532::FEDFS_NSDB_PORT, "fedfsNsdbPort"), + (&rfc7532::FEDFS_NCE_PREFIX, "fedfsNcePrefix"), + (&rfc7532::FEDFS_FSL_UUID, "fedfsFslUuid"), + (&rfc7532::FEDFS_FSL_HOST, "fedfsFslHost"), + (&rfc7612::PRINTER_DEVICE_ID, "printer-device-id"), + ( + &rfc7612::PRINTER_DEVICE_SERVICE_COUNT, + "printer-device-service-count", + ), + (&rfc7612::PRINTER_UUID, "printer-uuid"), + (&rfc7612::PRINTER_CHARGE_INFO, "printer-charge-info"), + (&rfc7612::PRINTER_CHARGE_INFO_URI, "printer-charge-info-uri"), + (&rfc7612::PRINTER_GEO_LOCATION, "printer-geo-location"), + ( + &rfc7612::PRINTER_IPP_FEATURES_SUPPORTED, + "printer-ipp-features-supported", + ), + (&rfc8284::JID_OBJECT, "JIDObject"), + (&rfc8284::JID, "jid"), + (&rfc8410::ID_EDWARDS_CURVE_ALGS, "id-edwards-curve-algs"), + (&rfc8410::ID_X_25519, "id-X25519"), + (&rfc8410::ID_X_448, "id-X448"), + (&rfc8410::ID_ED_25519, "id-Ed25519"), + (&rfc8410::ID_ED_448, "id-Ed448"), + (&rfc8894::ID_VERI_SIGN, "id-VeriSign"), + (&rfc8894::ID_PKI, "id-pki"), + (&rfc8894::ID_ATTRIBUTES, "id-attributes"), + (&rfc8894::ID_MESSAGE_TYPE, "id-messageType"), + (&rfc8894::ID_PKI_STATUS, "id-pkiStatus"), + (&rfc8894::ID_FAIL_INFO, "id-failInfo"), + (&rfc8894::ID_SENDER_NONCE, "id-senderNonce"), + (&rfc8894::ID_RECIPIENT_NONCE, "id-recipientNonce"), + (&rfc8894::ID_TRANSACTION_ID, "id-transactionID"), +]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/encoder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/encoder.rs new file mode 100644 index 0000000000000000000000000000000000000000..4df3aab4507d3864e7a2ee4974207ddc523f3922 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/encoder.rs @@ -0,0 +1,165 @@ +//! OID encoder with `const` support. + +use crate::{ + arcs::{ARC_MAX_FIRST, ARC_MAX_SECOND}, + Arc, Error, ObjectIdentifier, Result, +}; + +/// BER/DER encoder +#[derive(Debug)] +pub(crate) struct Encoder { + /// Current state + state: State, + + /// Bytes of the OID being encoded in-progress + bytes: [u8; ObjectIdentifier::MAX_SIZE], + + /// Current position within the byte buffer + cursor: usize, +} + +/// Current state of the encoder +#[derive(Debug)] +enum State { + /// Initial state - no arcs yet encoded + Initial, + + /// First arc parsed + FirstArc(Arc), + + /// Encoding base 128 body of the OID + Body, +} + +impl Encoder { + /// Create a new encoder initialized to an empty default state. + pub(crate) const fn new() -> Self { + Self { + state: State::Initial, + bytes: [0u8; ObjectIdentifier::MAX_SIZE], + cursor: 0, + } + } + + /// Extend an existing OID. + pub(crate) const fn extend(oid: ObjectIdentifier) -> Self { + Self { + state: State::Body, + bytes: oid.bytes, + cursor: oid.length as usize, + } + } + + /// Encode an [`Arc`] as base 128 into the internal buffer. + pub(crate) const fn arc(mut self, arc: Arc) -> Result { + match self.state { + State::Initial => { + if arc > ARC_MAX_FIRST { + return Err(Error::ArcInvalid { arc }); + } + + self.state = State::FirstArc(arc); + Ok(self) + } + // Ensured not to overflow by `ARC_MAX_SECOND` check + #[allow(clippy::integer_arithmetic)] + State::FirstArc(first_arc) => { + if arc > ARC_MAX_SECOND { + return Err(Error::ArcInvalid { arc }); + } + + self.state = State::Body; + self.bytes[0] = (first_arc * (ARC_MAX_SECOND + 1)) as u8 + arc as u8; + self.cursor = 1; + Ok(self) + } + // TODO(tarcieri): finer-grained overflow safety / checked arithmetic + #[allow(clippy::integer_arithmetic)] + State::Body => { + // Total number of bytes in encoded arc - 1 + let nbytes = base128_len(arc); + + // Shouldn't overflow on any 16-bit+ architectures + if self.cursor + nbytes + 1 >= ObjectIdentifier::MAX_SIZE { + return Err(Error::Length); + } + + let new_cursor = self.cursor + nbytes + 1; + + // TODO(tarcieri): use `?` when stable in `const fn` + match self.encode_base128_byte(arc, nbytes, false) { + Ok(mut encoder) => { + encoder.cursor = new_cursor; + Ok(encoder) + } + Err(err) => Err(err), + } + } + } + } + + /// Finish encoding an OID. + pub(crate) const fn finish(self) -> Result { + if self.cursor >= 2 { + Ok(ObjectIdentifier { + bytes: self.bytes, + length: self.cursor as u8, + }) + } else { + Err(Error::NotEnoughArcs) + } + } + + /// Encode a single byte of a Base 128 value. + const fn encode_base128_byte(mut self, mut n: u32, i: usize, continued: bool) -> Result { + let mask = if continued { 0b10000000 } else { 0 }; + + // Underflow checked by branch + #[allow(clippy::integer_arithmetic)] + if n > 0x80 { + self.bytes[checked_add!(self.cursor, i)] = (n & 0b1111111) as u8 | mask; + n >>= 7; + + if i > 0 { + self.encode_base128_byte(n, i.saturating_sub(1), true) + } else { + Err(Error::Base128) + } + } else { + self.bytes[self.cursor] = n as u8 | mask; + Ok(self) + } + } +} + +/// Compute the length - 1 of an arc when encoded in base 128. +const fn base128_len(arc: Arc) -> usize { + match arc { + 0..=0x7f => 0, + 0x80..=0x3fff => 1, + 0x4000..=0x1fffff => 2, + 0x200000..=0x1fffffff => 3, + _ => 4, + } +} + +#[cfg(test)] +mod tests { + use super::Encoder; + use hex_literal::hex; + + /// OID `1.2.840.10045.2.1` encoded as ASN.1 BER/DER + const EXAMPLE_OID_BER: &[u8] = &hex!("2A8648CE3D0201"); + + #[test] + fn encode() { + let encoder = Encoder::new(); + let encoder = encoder.arc(1).unwrap(); + let encoder = encoder.arc(2).unwrap(); + let encoder = encoder.arc(840).unwrap(); + let encoder = encoder.arc(10045).unwrap(); + let encoder = encoder.arc(2).unwrap(); + let encoder = encoder.arc(1).unwrap(); + assert_eq!(&encoder.bytes[..encoder.cursor], EXAMPLE_OID_BER); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..528ce785c4d58ca1509c00de27658e6cb02d9a94 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/error.rs @@ -0,0 +1,83 @@ +//! Error types + +use crate::Arc; +use core::fmt; + +/// Result type +pub type Result = core::result::Result; + +/// OID errors. +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)] +pub enum Error { + /// Arc exceeds allowed range (i.e. for first or second OID) + ArcInvalid { + /// Arc value that is erroneous. + arc: Arc, + }, + + /// Arc is too big (exceeds 32-bit limits of this library). + /// + /// Technically the size of an arc is not constrained by X.660, however + /// this library has elected to use `u32` as the arc representation as + /// sufficient for PKIX/PKCS usages. + ArcTooBig, + + /// Base 128 encoding error (used in BER/DER serialization of arcs). + Base128, + + /// Expected a digit, but was provided something else. + DigitExpected { + /// What was found instead of a digit + actual: u8, + }, + + /// Input data is empty. + Empty, + + /// OID length is invalid (too short or too long). + Length, + + /// Minimum 3 arcs required. + NotEnoughArcs, + + /// Trailing `.` character at end of input. + TrailingDot, +} + +impl Error { + /// Escalate this error into a panic. + /// + /// This is a workaround until `Result::unwrap` is allowed in `const fn`. + #[allow(clippy::panic)] + pub(crate) const fn panic(self) -> ! { + match self { + Error::ArcInvalid { .. } | Error::ArcTooBig => panic!("OID contains invalid arc"), + Error::Base128 => panic!("OID contains arc with invalid base 128 encoding"), + Error::DigitExpected { .. } => panic!("OID expected to start with digit"), + Error::Empty => panic!("OID value is empty"), + Error::Length => panic!("OID length invalid"), + Error::NotEnoughArcs => panic!("OID requires minimum of 3 arcs"), + Error::TrailingDot => panic!("OID ends with invalid trailing '.'"), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Error::ArcInvalid { arc } => write!(f, "OID contains out-of-range arc: {}", arc), + Error::ArcTooBig => f.write_str("OID contains arc which is larger than 32-bits"), + Error::Base128 => f.write_str("OID contains arc with invalid base 128 encoding"), + Error::DigitExpected { actual } => { + write!(f, "expected digit, got '{}'", char::from(actual)) + } + Error::Empty => f.write_str("OID value is empty"), + Error::Length => f.write_str("OID length invalid"), + Error::NotEnoughArcs => f.write_str("OID requires minimum of 3 arcs"), + Error::TrailingDot => f.write_str("OID ends with invalid trailing '.'"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5bdef085dfe3263ce02f997e850a292844e2a72e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/lib.rs @@ -0,0 +1,280 @@ +#![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc = include_str!("../README.md")] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg" +)] +#![forbid(unsafe_code)] +#![warn( + clippy::integer_arithmetic, + clippy::panic, + clippy::panic_in_result_fn, + clippy::unwrap_used, + missing_docs, + rust_2018_idioms, + unused_lifetimes, + unused_qualifications +)] + +#[cfg(feature = "std")] +extern crate std; + +#[macro_use] +mod checked; + +mod arcs; +mod encoder; +mod error; +mod parser; + +#[cfg(feature = "db")] +#[cfg_attr(docsrs, doc(cfg(feature = "db")))] +pub mod db; + +pub use crate::{ + arcs::{Arc, Arcs}, + error::{Error, Result}, +}; + +use crate::encoder::Encoder; +use core::{fmt, str::FromStr}; + +/// A trait which associates an OID with a type. +pub trait AssociatedOid { + /// The OID associated with this type. + const OID: ObjectIdentifier; +} + +/// A trait which associates a dynamic, `&self`-dependent OID with a type, +/// which may change depending on the type's value. +/// +/// This trait is object safe and auto-impl'd for any types which impl +/// [`AssociatedOid`]. +pub trait DynAssociatedOid { + /// Get the OID associated with this value. + fn oid(&self) -> ObjectIdentifier; +} + +impl DynAssociatedOid for T { + fn oid(&self) -> ObjectIdentifier { + T::OID + } +} + +/// Object identifier (OID). +/// +/// OIDs are hierarchical structures consisting of "arcs", i.e. integer +/// identifiers. +/// +/// # Validity +/// +/// In order for an OID to be considered valid by this library, it must meet +/// the following criteria: +/// +/// - The OID MUST have at least 3 arcs +/// - The first arc MUST be within the range 0-2 +/// - The second arc MUST be within the range 0-39 +/// - The BER/DER encoding of the OID MUST be shorter than +/// [`ObjectIdentifier::MAX_SIZE`] +#[derive(Copy, Clone, Eq, Hash, PartialEq, PartialOrd, Ord)] +pub struct ObjectIdentifier { + /// Length in bytes + length: u8, + + /// Array containing BER/DER-serialized bytes (no header) + bytes: [u8; Self::MAX_SIZE], +} + +#[allow(clippy::len_without_is_empty)] +impl ObjectIdentifier { + /// Maximum size of a BER/DER-encoded OID in bytes. + pub const MAX_SIZE: usize = 39; // makes `ObjectIdentifier` 40-bytes total w\ 1-byte length + + /// Parse an [`ObjectIdentifier`] from the dot-delimited string form, + /// panicking on parse errors. + /// + /// This function exists as a workaround for `unwrap` not yet being + /// stable in `const fn` contexts, and is intended to allow the result to + /// be bound to a constant value: + /// + /// ``` + /// use const_oid::ObjectIdentifier; + /// + /// pub const MY_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.1"); + /// ``` + /// + /// In future versions of Rust it should be possible to replace this with + /// `ObjectIdentifier::new(...).unwrap()`. + /// + /// Use [`ObjectIdentifier::new`] for fallible parsing. + // TODO(tarcieri): remove this when `Result::unwrap` is `const fn` + pub const fn new_unwrap(s: &str) -> Self { + match Self::new(s) { + Ok(oid) => oid, + Err(err) => err.panic(), + } + } + + /// Parse an [`ObjectIdentifier`] from the dot-delimited string form. + pub const fn new(s: &str) -> Result { + // TODO(tarcieri): use `?` when stable in `const fn` + match parser::Parser::parse(s) { + Ok(parser) => parser.finish(), + Err(err) => Err(err), + } + } + + /// Parse an OID from a slice of [`Arc`] values (i.e. integers). + pub fn from_arcs(arcs: impl IntoIterator) -> Result { + let mut encoder = Encoder::new(); + + for arc in arcs { + encoder = encoder.arc(arc)?; + } + + encoder.finish() + } + + /// Parse an OID from from its BER/DER encoding. + pub fn from_bytes(ber_bytes: &[u8]) -> Result { + let len = ber_bytes.len(); + + match len { + 0 => return Err(Error::Empty), + 3..=Self::MAX_SIZE => (), + _ => return Err(Error::NotEnoughArcs), + } + let mut bytes = [0u8; Self::MAX_SIZE]; + bytes[..len].copy_from_slice(ber_bytes); + + let oid = Self { + bytes, + length: len as u8, + }; + + // Ensure arcs are well-formed + let mut arcs = oid.arcs(); + while arcs.try_next()?.is_some() {} + + Ok(oid) + } + + /// Get the BER/DER serialization of this OID as bytes. + /// + /// Note that this encoding omits the tag/length, and only contains the + /// value portion of the encoded OID. + pub fn as_bytes(&self) -> &[u8] { + &self.bytes[..self.length as usize] + } + + /// Return the arc with the given index, if it exists. + pub fn arc(&self, index: usize) -> Option { + self.arcs().nth(index) + } + + /// Iterate over the arcs (a.k.a. nodes) of an [`ObjectIdentifier`]. + /// + /// Returns [`Arcs`], an iterator over [`Arc`] values. + pub fn arcs(&self) -> Arcs<'_> { + Arcs::new(self) + } + + /// Get the length of this [`ObjectIdentifier`] in arcs. + pub fn len(&self) -> usize { + self.arcs().count() + } + + /// Get the parent OID of this one (if applicable). + pub fn parent(&self) -> Option { + let num_arcs = self.len().checked_sub(1)?; + Self::from_arcs(self.arcs().take(num_arcs)).ok() + } + + /// Push an additional arc onto this OID, returning the child OID. + pub const fn push_arc(self, arc: Arc) -> Result { + // TODO(tarcieri): use `?` when stable in `const fn` + match Encoder::extend(self).arc(arc) { + Ok(encoder) => encoder.finish(), + Err(err) => Err(err), + } + } +} + +impl AsRef<[u8]> for ObjectIdentifier { + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } +} + +impl FromStr for ObjectIdentifier { + type Err = Error; + + fn from_str(string: &str) -> Result { + Self::new(string) + } +} + +impl TryFrom<&[u8]> for ObjectIdentifier { + type Error = Error; + + fn try_from(ber_bytes: &[u8]) -> Result { + Self::from_bytes(ber_bytes) + } +} + +impl From<&ObjectIdentifier> for ObjectIdentifier { + fn from(oid: &ObjectIdentifier) -> ObjectIdentifier { + *oid + } +} + +impl fmt::Debug for ObjectIdentifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ObjectIdentifier({})", self) + } +} + +impl fmt::Display for ObjectIdentifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let len = self.arcs().count(); + + for (i, arc) in self.arcs().enumerate() { + write!(f, "{}", arc)?; + + if let Some(j) = i.checked_add(1) { + if j < len { + write!(f, ".")?; + } + } + } + + Ok(()) + } +} + +// Implement by hand because the derive would create invalid values. +// Use the constructor to create a valid oid with at least 3 arcs. +#[cfg(feature = "arbitrary")] +impl<'a> arbitrary::Arbitrary<'a> for ObjectIdentifier { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let first = u.int_in_range(0..=arcs::ARC_MAX_FIRST)?; + let second = u.int_in_range(0..=arcs::ARC_MAX_SECOND)?; + let third = u.arbitrary()?; + + let mut oid = Self::from_arcs([first, second, third]) + .map_err(|_| arbitrary::Error::IncorrectFormat)?; + + for arc in u.arbitrary_iter()? { + oid = oid + .push_arc(arc?) + .map_err(|_| arbitrary::Error::IncorrectFormat)?; + } + + Ok(oid) + } + + fn size_hint(depth: usize) -> (usize, Option) { + (Arc::size_hint(depth).0.saturating_mul(3), None) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/parser.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/parser.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f875faaa65615e904d08dcfd4ba02056849499b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/src/parser.rs @@ -0,0 +1,112 @@ +//! OID string parser with `const` support. + +use crate::{encoder::Encoder, Arc, Error, ObjectIdentifier, Result}; + +/// Const-friendly OID string parser. +/// +/// Parses an OID from the dotted string representation. +#[derive(Debug)] +pub(crate) struct Parser { + /// Current arc in progress + current_arc: Arc, + + /// BER/DER encoder + encoder: Encoder, +} + +impl Parser { + /// Parse an OID from a dot-delimited string e.g. `1.2.840.113549.1.1.1` + pub(crate) const fn parse(s: &str) -> Result { + let bytes = s.as_bytes(); + + if bytes.is_empty() { + return Err(Error::Empty); + } + + match bytes[0] { + b'0'..=b'9' => Self { + current_arc: 0, + encoder: Encoder::new(), + } + .parse_bytes(bytes), + actual => Err(Error::DigitExpected { actual }), + } + } + + /// Finish parsing, returning the result + pub(crate) const fn finish(self) -> Result { + self.encoder.finish() + } + + /// Parse the remaining bytes + const fn parse_bytes(mut self, bytes: &[u8]) -> Result { + match bytes { + // TODO(tarcieri): use `?` when stable in `const fn` + [] => match self.encoder.arc(self.current_arc) { + Ok(encoder) => { + self.encoder = encoder; + Ok(self) + } + Err(err) => Err(err), + }, + // TODO(tarcieri): checked arithmetic + #[allow(clippy::integer_arithmetic)] + [byte @ b'0'..=b'9', remaining @ ..] => { + let digit = byte.saturating_sub(b'0'); + self.current_arc = self.current_arc * 10 + digit as Arc; + self.parse_bytes(remaining) + } + [b'.', remaining @ ..] => { + if remaining.is_empty() { + return Err(Error::TrailingDot); + } + + // TODO(tarcieri): use `?` when stable in `const fn` + match self.encoder.arc(self.current_arc) { + Ok(encoder) => { + self.encoder = encoder; + self.current_arc = 0; + self.parse_bytes(remaining) + } + Err(err) => Err(err), + } + } + [byte, ..] => Err(Error::DigitExpected { actual: *byte }), + } + } +} + +#[cfg(test)] +mod tests { + use super::Parser; + use crate::Error; + + #[test] + fn parse() { + let oid = Parser::parse("1.23.456").unwrap().finish().unwrap(); + assert_eq!(oid, "1.23.456".parse().unwrap()); + } + + #[test] + fn reject_empty_string() { + assert_eq!(Parser::parse("").err().unwrap(), Error::Empty); + } + + #[test] + fn reject_non_digits() { + assert_eq!( + Parser::parse("X").err().unwrap(), + Error::DigitExpected { actual: b'X' } + ); + + assert_eq!( + Parser::parse("1.2.X").err().unwrap(), + Error::DigitExpected { actual: b'X' } + ); + } + + #[test] + fn reject_trailing_dot() { + assert_eq!(Parser::parse("1.23.").err().unwrap(), Error::TrailingDot); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/tests/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/tests/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..e91dfc6cae46293ef0934a2d72f79aa6d899093c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/const-oid-0.9.6/tests/lib.rs @@ -0,0 +1,209 @@ +//! `const-oid` crate tests + +// TODO(tarcieri): test full set of OID encoding constraints specified here: +// + +use const_oid::{Error, ObjectIdentifier}; +use hex_literal::hex; +use std::string::ToString; + +/// Example OID value with a root arc of `0` (and large arc). +const EXAMPLE_OID_0_STR: &str = "0.9.2342.19200300.100.1.1"; +const EXAMPLE_OID_0_BER: &[u8] = &hex!("0992268993F22C640101"); +const EXAMPLE_OID_0: ObjectIdentifier = ObjectIdentifier::new_unwrap(EXAMPLE_OID_0_STR); + +/// Example OID value with a root arc of `1`. +const EXAMPLE_OID_1_STR: &str = "1.2.840.10045.2.1"; +const EXAMPLE_OID_1_BER: &[u8] = &hex!("2A8648CE3D0201"); +const EXAMPLE_OID_1: ObjectIdentifier = ObjectIdentifier::new_unwrap(EXAMPLE_OID_1_STR); + +/// Example OID value with a root arc of `2`. +const EXAMPLE_OID_2_STR: &str = "2.16.840.1.101.3.4.1.42"; +const EXAMPLE_OID_2_BER: &[u8] = &hex!("60864801650304012A"); +const EXAMPLE_OID_2: ObjectIdentifier = ObjectIdentifier::new_unwrap(EXAMPLE_OID_2_STR); + +/// Example OID value with a large arc +const EXAMPLE_OID_LARGE_ARC_STR: &str = "0.9.2342.19200300.100.1.1"; +const EXAMPLE_OID_LARGE_ARC_BER: &[u8] = &hex!("0992268993F22C640101"); +const EXAMPLE_OID_LARGE_ARC: ObjectIdentifier = + ObjectIdentifier::new_unwrap("0.9.2342.19200300.100.1.1"); + +#[test] +fn from_bytes() { + let oid0 = ObjectIdentifier::from_bytes(EXAMPLE_OID_0_BER).unwrap(); + assert_eq!(oid0.arc(0).unwrap(), 0); + assert_eq!(oid0.arc(1).unwrap(), 9); + assert_eq!(oid0, EXAMPLE_OID_0); + + let oid1 = ObjectIdentifier::from_bytes(EXAMPLE_OID_1_BER).unwrap(); + assert_eq!(oid1.arc(0).unwrap(), 1); + assert_eq!(oid1.arc(1).unwrap(), 2); + assert_eq!(oid1, EXAMPLE_OID_1); + + let oid2 = ObjectIdentifier::from_bytes(EXAMPLE_OID_2_BER).unwrap(); + assert_eq!(oid2.arc(0).unwrap(), 2); + assert_eq!(oid2.arc(1).unwrap(), 16); + assert_eq!(oid2, EXAMPLE_OID_2); + + let oid3 = ObjectIdentifier::from_bytes(EXAMPLE_OID_LARGE_ARC_BER).unwrap(); + assert_eq!(oid3.arc(0).unwrap(), 0); + assert_eq!(oid3.arc(1).unwrap(), 9); + assert_eq!(oid3.arc(2).unwrap(), 2342); + assert_eq!(oid3.arc(3).unwrap(), 19200300); + assert_eq!(oid3.arc(4).unwrap(), 100); + assert_eq!(oid3.arc(5).unwrap(), 1); + assert_eq!(oid3.arc(6).unwrap(), 1); + assert_eq!(oid3, EXAMPLE_OID_LARGE_ARC); + + // Empty + assert_eq!(ObjectIdentifier::from_bytes(&[]), Err(Error::Empty)); + + // Truncated + assert_eq!( + ObjectIdentifier::from_bytes(&[42]), + Err(Error::NotEnoughArcs) + ); + assert_eq!( + ObjectIdentifier::from_bytes(&[42, 134]), + Err(Error::NotEnoughArcs) + ); +} + +#[test] +fn from_str() { + let oid0 = EXAMPLE_OID_0_STR.parse::().unwrap(); + assert_eq!(oid0.arc(0).unwrap(), 0); + assert_eq!(oid0.arc(1).unwrap(), 9); + assert_eq!(oid0, EXAMPLE_OID_0); + + let oid1 = EXAMPLE_OID_1_STR.parse::().unwrap(); + assert_eq!(oid1.arc(0).unwrap(), 1); + assert_eq!(oid1.arc(1).unwrap(), 2); + assert_eq!(oid1, EXAMPLE_OID_1); + + let oid2 = EXAMPLE_OID_2_STR.parse::().unwrap(); + assert_eq!(oid2.arc(0).unwrap(), 2); + assert_eq!(oid2.arc(1).unwrap(), 16); + assert_eq!(oid2, EXAMPLE_OID_2); + + let oid3 = EXAMPLE_OID_LARGE_ARC_STR + .parse::() + .unwrap(); + assert_eq!(oid3.arc(0).unwrap(), 0); + assert_eq!(oid3.arc(1).unwrap(), 9); + assert_eq!(oid3.arc(2).unwrap(), 2342); + assert_eq!(oid3.arc(3).unwrap(), 19200300); + assert_eq!(oid3.arc(4).unwrap(), 100); + assert_eq!(oid3.arc(5).unwrap(), 1); + assert_eq!(oid3.arc(6).unwrap(), 1); + assert_eq!(oid3, EXAMPLE_OID_LARGE_ARC); + + // Too short + assert_eq!("1.2".parse::(), Err(Error::NotEnoughArcs)); + + // Truncated + assert_eq!( + "1.2.840.10045.2.".parse::(), + Err(Error::TrailingDot) + ); + + // Invalid first arc + assert_eq!( + "3.2.840.10045.2.1".parse::(), + Err(Error::ArcInvalid { arc: 3 }) + ); + + // Invalid second arc + assert_eq!( + "1.40.840.10045.2.1".parse::(), + Err(Error::ArcInvalid { arc: 40 }) + ); +} + +#[test] +fn display() { + assert_eq!(EXAMPLE_OID_0.to_string(), EXAMPLE_OID_0_STR); + assert_eq!(EXAMPLE_OID_1.to_string(), EXAMPLE_OID_1_STR); + assert_eq!(EXAMPLE_OID_2.to_string(), EXAMPLE_OID_2_STR); + assert_eq!(EXAMPLE_OID_LARGE_ARC.to_string(), EXAMPLE_OID_LARGE_ARC_STR); +} + +#[test] +fn try_from_u32_slice() { + let oid1 = ObjectIdentifier::from_arcs([1, 2, 840, 10045, 2, 1]).unwrap(); + assert_eq!(oid1.arc(0).unwrap(), 1); + assert_eq!(oid1.arc(1).unwrap(), 2); + assert_eq!(EXAMPLE_OID_1, oid1); + + let oid2 = ObjectIdentifier::from_arcs([2, 16, 840, 1, 101, 3, 4, 1, 42]).unwrap(); + assert_eq!(oid2.arc(0).unwrap(), 2); + assert_eq!(oid2.arc(1).unwrap(), 16); + assert_eq!(EXAMPLE_OID_2, oid2); + + // Too short + assert_eq!( + ObjectIdentifier::from_arcs([1, 2]), + Err(Error::NotEnoughArcs) + ); + + // Invalid first arc + assert_eq!( + ObjectIdentifier::from_arcs([3, 2, 840, 10045, 3, 1, 7]), + Err(Error::ArcInvalid { arc: 3 }) + ); + + // Invalid second arc + assert_eq!( + ObjectIdentifier::from_arcs([1, 40, 840, 10045, 3, 1, 7]), + Err(Error::ArcInvalid { arc: 40 }) + ); +} + +#[test] +fn as_bytes() { + assert_eq!(EXAMPLE_OID_1.as_bytes(), EXAMPLE_OID_1_BER); + assert_eq!(EXAMPLE_OID_2.as_bytes(), EXAMPLE_OID_2_BER); +} + +#[test] +fn parse_empty() { + assert_eq!(ObjectIdentifier::new(""), Err(Error::Empty)); +} + +#[test] +fn parse_not_enough_arcs() { + assert_eq!(ObjectIdentifier::new("1.2"), Err(Error::NotEnoughArcs)); +} + +#[test] +fn parse_invalid_first_arc() { + assert_eq!( + ObjectIdentifier::new("3.2.840.10045.3.1.7"), + Err(Error::ArcInvalid { arc: 3 }) + ); +} + +#[test] +fn parse_invalid_second_arc() { + assert_eq!( + ObjectIdentifier::new("1.40.840.10045.3.1.7"), + Err(Error::ArcInvalid { arc: 40 }) + ); +} + +#[test] +fn parent() { + let oid = ObjectIdentifier::new("1.2.3.4").unwrap(); + let parent = oid.parent().unwrap(); + assert_eq!(parent, ObjectIdentifier::new("1.2.3").unwrap()); + assert_eq!(parent.parent(), None); +} + +#[test] +fn push_arc() { + let oid = ObjectIdentifier::new("1.2.3").unwrap(); + assert_eq!( + oid.push_arc(4).unwrap(), + ObjectIdentifier::new("1.2.3.4").unwrap() + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.github/workflows/main.yml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.github/workflows/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..b564dc22af63421384a6a8f1c9bf2ca5a5e8ca5e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/.github/workflows/main.yml @@ -0,0 +1,77 @@ +name: CI +on: [push, pull_request] + +jobs: + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + build: [stable, beta, nightly, macos, win32, win64, mingw] + include: + - build: stable + os: ubuntu-latest + rust: stable + - build: beta + os: ubuntu-latest + rust: beta + - build: nightly + os: ubuntu-latest + rust: nightly + - build: macos + os: macos-latest + rust: stable + - build: win32 + os: windows-latest + rust: stable-i686 + - build: win64 + os: windows-latest + rust: stable-x86_64 + - build: mingw + os: windows-latest + rust: stable-x86_64-gnu + steps: + - uses: actions/checkout@master + - name: Install Rust (rustup) + run: rustup update ${{ matrix.rust }} --no-self-update && rustup default ${{ matrix.rust }} + shell: bash + - run: cargo test + + rustfmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Install Rust + run: rustup update stable && rustup default stable && rustup component add rustfmt + - run: cargo fmt -- --check + + build: + name: Build for Android and Redox + runs-on: ubuntu-latest + strategy: + matrix: + target: [x86_64-linux-android, x86_64-unknown-redox] + steps: + - uses: actions/checkout@master + - name: Install Rust + run: rustup update stable && rustup default stable && rustup target add ${{ matrix.target }} + - run: cargo build --target ${{ matrix.target }} + + publish_docs: + name: Publish Documentation + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Install Rust + run: rustup update stable && rustup default stable + - name: Build documentation + run: cargo doc --no-deps --all-features + - name: Publish documentation + run: | + cd target/doc + git init + git add . + git -c user.name='ci' -c user.email='ci' commit -m init + git push -f -q https://git:${{ secrets.github_token }}@github.com/${{ github.repository }} HEAD:gh-pages + if: github.event_name == 'push' && github.event.ref == 'refs/heads/master' diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..359ccd3171fc567f156560291c9a0475ff2b9e41 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.lock @@ -0,0 +1,187 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "filetime" +version = "0.2.27" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "tempfile", +] + +[[package]] +name = "libc" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "redox_syscall" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "tempfile" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..cf6dc45ddaf789b106755b3fdf2588fac8f105d6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/Cargo.toml.orig @@ -0,0 +1,26 @@ +[package] +name = "filetime" +authors = ["Alex Crichton "] +version = "0.2.27" +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["timestamp", "mtime"] +repository = "https://github.com/alexcrichton/filetime" +homepage = "https://github.com/alexcrichton/filetime" +documentation = "https://docs.rs/filetime" +description = """ +Platform-agnostic accessors of timestamps in File metadata +""" +edition = "2018" + +[dependencies] +cfg-if = "1.0.0" + +[target.'cfg(unix)'.dependencies] +libc = "0.2.27" + +[target.'cfg(target_os = "redox")'.dependencies] +libredox = "0.1.0" + +[dev-dependencies] +tempfile = "3" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..39e0ed6602151f235148e6c08413aa7eda5b9038 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f85d9679b542b266e0b6901b4986e5eb75eca0c1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/README.md @@ -0,0 +1,37 @@ +# filetime + +[Documentation](https://docs.rs/filetime) + +A helper library for inspecting and setting the various timestamps of files in Rust. This +library takes into account cross-platform differences in terms of where the +timestamps are located, what they are called, and how to convert them into a +platform-independent representation. + +```toml +# Cargo.toml +[dependencies] +filetime = "0.2" +``` + +# Advantages over using `std::fs::Metadata` + +This library includes the ability to set this data, which std does not. + +This library, when built with `RUSTFLAGS=--cfg emulate_second_only_system` set, will return all times rounded down to the second. This emulates the behavior of some file systems, mostly [HFS](https://en.wikipedia.org/wiki/HFS_Plus), allowing debugging on other hardware. + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Filetime by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..95ea5d4f7ae8a46b3fe4a19d1c1c7056b09c8e78 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/lib.rs @@ -0,0 +1,715 @@ +//! Timestamps for files in Rust +//! +//! This library provides platform-agnostic inspection of the various timestamps +//! present in the standard `fs::Metadata` structure. +//! +//! # Installation +//! +//! Add this to your `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! filetime = "0.2" +//! ``` +//! +//! # Usage +//! +//! ```no_run +//! use std::fs; +//! use filetime::FileTime; +//! +//! let metadata = fs::metadata("foo.txt").unwrap(); +//! +//! let mtime = FileTime::from_last_modification_time(&metadata); +//! println!("{}", mtime); +//! +//! let atime = FileTime::from_last_access_time(&metadata); +//! assert!(mtime < atime); +//! +//! // Inspect values that can be interpreted across platforms +//! println!("{}", mtime.unix_seconds()); +//! println!("{}", mtime.nanoseconds()); +//! +//! // Print the platform-specific value of seconds +//! println!("{}", mtime.seconds()); +//! ``` + +use std::fmt; +use std::fs; +use std::io; +use std::path::Path; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +cfg_if::cfg_if! { + if #[cfg(target_os = "redox")] { + #[path = "redox.rs"] + mod imp; + } else if #[cfg(windows)] { + #[path = "windows.rs"] + mod imp; + } else if #[cfg(all(target_family = "wasm", not(target_os = "emscripten")))] { + #[path = "wasm.rs"] + mod imp; + } else { + #[path = "unix/mod.rs"] + mod imp; + } +} + +/// A helper structure to represent a timestamp for a file. +/// +/// The actual value contined within is platform-specific and does not have the +/// same meaning across platforms, but comparisons and stringification can be +/// significant among the same platform. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Copy, Clone, Hash)] +pub struct FileTime { + seconds: i64, + nanos: u32, +} + +impl FileTime { + /// Creates a new timestamp representing a 0 time. + /// + /// Useful for creating the base of a cmp::max chain of times. + pub const fn zero() -> FileTime { + FileTime { + seconds: 0, + nanos: 0, + } + } + + const fn emulate_second_only_system(self) -> FileTime { + if cfg!(emulate_second_only_system) { + FileTime { + seconds: self.seconds, + nanos: 0, + } + } else { + self + } + } + + /// Creates a new timestamp representing the current system time. + /// + /// ``` + /// # use filetime::FileTime; + /// # + /// # fn example() -> std::io::Result<()> { + /// # let path = ""; + /// # + /// filetime::set_file_mtime(path, FileTime::now())?; + /// # + /// # Ok(()) + /// # } + /// ``` + /// + /// Equivalent to `FileTime::from_system_time(SystemTime::now())`. + pub fn now() -> FileTime { + FileTime::from_system_time(SystemTime::now()) + } + + /// Creates a new instance of `FileTime` with a number of seconds and + /// nanoseconds relative to the Unix epoch, 1970-01-01T00:00:00Z. + /// + /// Negative seconds represent times before the Unix epoch, and positive + /// values represent times after it. Nanos always count forwards in time. + /// + /// Note that this is typically the relative point that Unix time stamps are + /// from, but on Windows the native time stamp is relative to January 1, + /// 1601 so the return value of `seconds` from the returned `FileTime` + /// instance may not be the same as that passed in. + pub const fn from_unix_time(seconds: i64, nanos: u32) -> FileTime { + FileTime { + seconds: seconds + if cfg!(windows) { 11644473600 } else { 0 }, + nanos, + } + .emulate_second_only_system() + } + + /// Creates a new timestamp from the last modification time listed in the + /// specified metadata. + /// + /// The returned value corresponds to the `mtime` field of `stat` on Unix + /// platforms and the `ftLastWriteTime` field on Windows platforms. + pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + imp::from_last_modification_time(meta).emulate_second_only_system() + } + + /// Creates a new timestamp from the last access time listed in the + /// specified metadata. + /// + /// The returned value corresponds to the `atime` field of `stat` on Unix + /// platforms and the `ftLastAccessTime` field on Windows platforms. + pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + imp::from_last_access_time(meta).emulate_second_only_system() + } + + /// Creates a new timestamp from the creation time listed in the specified + /// metadata. + /// + /// The returned value corresponds to the `birthtime` field of `stat` on + /// Unix platforms and the `ftCreationTime` field on Windows platforms. Note + /// that not all Unix platforms have this field available and may return + /// `None` in some circumstances. + pub fn from_creation_time(meta: &fs::Metadata) -> Option { + imp::from_creation_time(meta).map(|x| x.emulate_second_only_system()) + } + + /// Creates a new timestamp from the given SystemTime. + /// + /// Windows counts file times since 1601-01-01T00:00:00Z, and cannot + /// represent times before this, but it's possible to create a SystemTime + /// that does. This function will error if passed such a SystemTime. + pub fn from_system_time(time: SystemTime) -> FileTime { + let epoch = if cfg!(windows) { + UNIX_EPOCH - Duration::from_secs(11644473600) + } else { + UNIX_EPOCH + }; + + time.duration_since(epoch) + .map(|d| FileTime { + seconds: d.as_secs() as i64, + nanos: d.subsec_nanos(), + }) + .unwrap_or_else(|e| { + let until_epoch = e.duration(); + let (sec_offset, nanos) = if until_epoch.subsec_nanos() == 0 { + (0, 0) + } else { + (-1, 1_000_000_000 - until_epoch.subsec_nanos()) + }; + + FileTime { + seconds: -1 * until_epoch.as_secs() as i64 + sec_offset, + nanos, + } + }) + .emulate_second_only_system() + } + + /// Returns the whole number of seconds represented by this timestamp. + /// + /// Note that this value's meaning is **platform specific**. On Unix + /// platform time stamps are typically relative to January 1, 1970, but on + /// Windows platforms time stamps are relative to January 1, 1601. + pub const fn seconds(&self) -> i64 { + self.seconds + } + + /// Returns the whole number of seconds represented by this timestamp, + /// relative to the Unix epoch start of January 1, 1970. + /// + /// Note that this does not return the same value as `seconds` for Windows + /// platforms as seconds are relative to a different date there. + pub const fn unix_seconds(&self) -> i64 { + self.seconds - if cfg!(windows) { 11644473600 } else { 0 } + } + + /// Returns the nanosecond precision of this timestamp. + /// + /// The returned value is always less than one billion and represents a + /// portion of a second forward from the seconds returned by the `seconds` + /// method. + pub const fn nanoseconds(&self) -> u32 { + self.nanos + } +} + +impl fmt::Display for FileTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{:09}s", self.seconds, self.nanos) + } +} + +impl From for FileTime { + fn from(time: SystemTime) -> FileTime { + FileTime::from_system_time(time) + } +} + +/// Set the last access and modification times for a file on the filesystem. +/// +/// This function will set the `atime` and `mtime` metadata fields for a file +/// on the local filesystem, returning any error encountered. +pub fn set_file_times

(p: P, atime: FileTime, mtime: FileTime) -> io::Result<()> +where + P: AsRef, +{ + imp::set_file_times(p.as_ref(), atime, mtime) +} + +/// Set the last access and modification times for a file handle. +/// +/// This function will either or both of the `atime` and `mtime` metadata +/// fields for a file handle , returning any error encountered. If `None` is +/// specified then the time won't be updated. If `None` is specified for both +/// options then no action is taken. +pub fn set_file_handle_times( + f: &fs::File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + imp::set_file_handle_times(f, atime, mtime) +} + +/// Set the last access and modification times for a file on the filesystem. +/// This function does not follow symlink. +/// +/// This function will set the `atime` and `mtime` metadata fields for a file +/// on the local filesystem, returning any error encountered. +pub fn set_symlink_file_times

(p: P, atime: FileTime, mtime: FileTime) -> io::Result<()> +where + P: AsRef, +{ + imp::set_symlink_file_times(p.as_ref(), atime, mtime) +} + +/// Set the last modification time for a file on the filesystem. +/// +/// This function will set the `mtime` metadata field for a file on the local +/// filesystem, returning any error encountered. +/// +/// # Platform support +/// +/// Where supported this will attempt to issue just one syscall to update only +/// the `mtime`, but where not supported this may issue one syscall to learn the +/// existing `atime` so only the `mtime` can be configured. +pub fn set_file_mtime

(p: P, mtime: FileTime) -> io::Result<()> +where + P: AsRef, +{ + imp::set_file_mtime(p.as_ref(), mtime) +} + +/// Set the last access time for a file on the filesystem. +/// +/// This function will set the `atime` metadata field for a file on the local +/// filesystem, returning any error encountered. +/// +/// # Platform support +/// +/// Where supported this will attempt to issue just one syscall to update only +/// the `atime`, but where not supported this may issue one syscall to learn the +/// existing `mtime` so only the `atime` can be configured. +pub fn set_file_atime

(p: P, atime: FileTime) -> io::Result<()> +where + P: AsRef, +{ + imp::set_file_atime(p.as_ref(), atime) +} + +#[cfg(test)] +mod tests { + use super::{ + set_file_atime, set_file_handle_times, set_file_mtime, set_file_times, + set_symlink_file_times, FileTime, + }; + use std::fs::{self, File}; + use std::io; + use std::path::Path; + use std::time::{Duration, UNIX_EPOCH}; + use tempfile::Builder; + + #[cfg(unix)] + fn make_symlink_file(src: P, dst: Q) -> io::Result<()> + where + P: AsRef, + Q: AsRef, + { + use std::os::unix::fs::symlink; + symlink(src, dst) + } + + #[cfg(windows)] + fn make_symlink_file(src: P, dst: Q) -> io::Result<()> + where + P: AsRef, + Q: AsRef, + { + use std::os::windows::fs::symlink_file; + symlink_file(src, dst) + } + + #[cfg(unix)] + fn make_symlink_dir(src: P, dst: Q) -> io::Result<()> + where + P: AsRef, + Q: AsRef, + { + use std::os::unix::fs::symlink; + symlink(src, dst) + } + + #[cfg(windows)] + fn make_symlink_dir(src: P, dst: Q) -> io::Result<()> + where + P: AsRef, + Q: AsRef, + { + use std::os::windows::fs::symlink_dir; + symlink_dir(src, dst) + } + + #[test] + #[cfg(windows)] + fn from_unix_time_test() { + let time = FileTime::from_unix_time(10, 100_000_000); + assert_eq!(11644473610, time.seconds); + assert_eq!(100_000_000, time.nanos); + + let time = FileTime::from_unix_time(-10, 100_000_000); + assert_eq!(11644473590, time.seconds); + assert_eq!(100_000_000, time.nanos); + + let time = FileTime::from_unix_time(-12_000_000_000, 0); + assert_eq!(-355526400, time.seconds); + assert_eq!(0, time.nanos); + } + + #[test] + #[cfg(not(windows))] + fn from_unix_time_test() { + let time = FileTime::from_unix_time(10, 100_000_000); + assert_eq!(10, time.seconds); + assert_eq!(100_000_000, time.nanos); + + let time = FileTime::from_unix_time(-10, 100_000_000); + assert_eq!(-10, time.seconds); + assert_eq!(100_000_000, time.nanos); + + let time = FileTime::from_unix_time(-12_000_000_000, 0); + assert_eq!(-12_000_000_000, time.seconds); + assert_eq!(0, time.nanos); + } + + #[test] + #[cfg(windows)] + fn from_system_time_test() { + let time = FileTime::from_system_time(UNIX_EPOCH + Duration::from_secs(10)); + assert_eq!(11644473610, time.seconds); + assert_eq!(0, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_secs(10)); + assert_eq!(11644473590, time.seconds); + assert_eq!(0, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_millis(1100)); + assert_eq!(11644473598, time.seconds); + assert_eq!(900_000_000, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_secs(12_000_000_000)); + assert_eq!(-355526400, time.seconds); + assert_eq!(0, time.nanos); + } + + #[test] + #[cfg(not(windows))] + fn from_system_time_test() { + let time = FileTime::from_system_time(UNIX_EPOCH + Duration::from_secs(10)); + assert_eq!(10, time.seconds); + assert_eq!(0, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_secs(10)); + assert_eq!(-10, time.seconds); + assert_eq!(0, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_millis(1100)); + assert_eq!(-2, time.seconds); + assert_eq!(900_000_000, time.nanos); + + let time = FileTime::from_system_time(UNIX_EPOCH - Duration::from_secs(12_000_000)); + assert_eq!(-12_000_000, time.seconds); + assert_eq!(0, time.nanos); + } + + #[test] + fn set_file_times_test() -> io::Result<()> { + let td = Builder::new().prefix("filetime").tempdir()?; + let path = td.path().join("foo.txt"); + let mut f = File::create(&path)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime)?; + + let new_mtime = FileTime::from_unix_time(10_000, 0); + set_file_times(&path, atime, new_mtime)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification should be updated"); + + // Update just mtime + let new_mtime = FileTime::from_unix_time(20_000, 0); + set_file_handle_times(&mut f, None, Some(new_mtime))?; + let metadata = f.metadata()?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification time should be updated"); + let new_atime = FileTime::from_last_access_time(&metadata); + assert_eq!(atime, new_atime, "accessed time should not be updated"); + + // Update just atime + let new_atime = FileTime::from_unix_time(30_000, 0); + set_file_handle_times(&mut f, Some(new_atime), None)?; + let metadata = f.metadata()?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification time should not be updated"); + let atime = FileTime::from_last_access_time(&metadata); + assert_eq!(atime, new_atime, "accessed time should be updated"); + + let spath = td.path().join("bar.txt"); + make_symlink_file(&path, &spath)?; + let metadata = fs::symlink_metadata(&spath)?; + let smtime = FileTime::from_last_modification_time(&metadata); + + set_file_times(&spath, atime, mtime)?; + + let metadata = fs::metadata(&path)?; + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, cur_mtime); + + let metadata = fs::symlink_metadata(&spath)?; + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(smtime, cur_mtime); + + set_file_times(&spath, atime, new_mtime)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, smtime); + Ok(()) + } + + #[test] + fn set_dir_times_test() -> io::Result<()> { + let td = Builder::new().prefix("filetime").tempdir()?; + let path = td.path().join("foo"); + fs::create_dir(&path)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime)?; + + let new_mtime = FileTime::from_unix_time(10_000, 0); + set_file_times(&path, atime, new_mtime)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification should be updated"); + + // Update just mtime + let new_mtime = FileTime::from_unix_time(20_000, 0); + set_file_mtime(&path, new_mtime)?; + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification time should be updated"); + let new_atime = FileTime::from_last_access_time(&metadata); + assert_eq!(atime, new_atime, "accessed time should not be updated"); + + // Update just atime + let new_atime = FileTime::from_unix_time(30_000, 0); + set_file_atime(&path, new_atime)?; + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification time should not be updated"); + let atime = FileTime::from_last_access_time(&metadata); + assert_eq!(atime, new_atime, "accessed time should be updated"); + + let spath = td.path().join("bar"); + make_symlink_dir(&path, &spath)?; + let metadata = fs::symlink_metadata(&spath)?; + let smtime = FileTime::from_last_modification_time(&metadata); + + set_file_times(&spath, atime, mtime)?; + + let metadata = fs::metadata(&path)?; + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, cur_mtime); + + let metadata = fs::symlink_metadata(&spath)?; + let cur_mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(smtime, cur_mtime); + + set_file_times(&spath, atime, new_mtime)?; + + let metadata = fs::metadata(&path)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath)?; + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, smtime); + Ok(()) + } + + #[test] + fn set_file_times_pre_unix_epoch_test() { + let td = Builder::new().prefix("filetime").tempdir().unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_unix_time(-10_000, 0); + if cfg!(target_os = "aix") { + // On AIX, os checks if the unix timestamp is valid. + let result = set_file_times(&path, atime, new_mtime); + assert!(result.is_err()); + assert!(result.err().unwrap().kind() == std::io::ErrorKind::InvalidInput); + } else { + set_file_times(&path, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + } + } + + #[test] + #[cfg(windows)] + fn set_file_times_pre_windows_epoch_test() { + let td = Builder::new().prefix("filetime").tempdir().unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_unix_time(-12_000_000_000, 0); + assert!(set_file_times(&path, atime, new_mtime).is_err()); + } + + #[test] + fn set_symlink_file_times_test() { + let td = Builder::new().prefix("filetime").tempdir().unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_unix_time(10_000, 0); + set_symlink_file_times(&path, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let spath = td.path().join("bar.txt"); + make_symlink_file(&path, &spath).unwrap(); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let smtime = FileTime::from_last_modification_time(&metadata); + let satime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&spath, smtime, satime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let new_smtime = FileTime::from_unix_time(20_000, 0); + set_symlink_file_times(&spath, atime, new_smtime).unwrap(); + + let metadata = fs::metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_smtime); + } + + #[test] + fn set_symlink_dir_times_test() { + let td = Builder::new().prefix("filetime").tempdir().unwrap(); + let path = td.path().join("foo"); + fs::create_dir(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_unix_time(10_000, 0); + set_symlink_file_times(&path, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let spath = td.path().join("bar"); + make_symlink_dir(&path, &spath).unwrap(); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let smtime = FileTime::from_last_modification_time(&metadata); + let satime = FileTime::from_last_access_time(&metadata); + set_symlink_file_times(&spath, smtime, satime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let new_smtime = FileTime::from_unix_time(20_000, 0); + set_symlink_file_times(&spath, atime, new_smtime).unwrap(); + + let metadata = fs::metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + + let metadata = fs::symlink_metadata(&spath).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_smtime); + } + + #[test] + fn set_single_time_test() { + use super::{set_file_atime, set_file_mtime}; + + let td = Builder::new().prefix("filetime").tempdir().unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_unix_time(10_000, 0); + set_file_mtime(&path, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime, "modification time should be updated"); + assert_eq!( + atime, + FileTime::from_last_access_time(&metadata), + "access time should not be updated", + ); + + let new_atime = FileTime::from_unix_time(20_000, 0); + set_file_atime(&path, new_atime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let atime = FileTime::from_last_access_time(&metadata); + assert_eq!(atime, new_atime, "access time should be updated"); + assert_eq!( + mtime, + FileTime::from_last_modification_time(&metadata), + "modification time should not be updated" + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/redox.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/redox.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d1a6eac5850249c23a195288dc23f90bd6c997d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/redox.rs @@ -0,0 +1,113 @@ +use crate::FileTime; +use std::fs::{self, File}; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +use libredox::{ + call, errno, + error::{Error, Result}, + flag, Fd, +}; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + let fd = open_redox(p, 0)?; + set_file_times_redox(fd.raw(), atime, mtime) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + let fd = open_redox(p, 0)?; + let st = fd.stat()?; + + set_file_times_redox( + fd.raw(), + FileTime { + seconds: st.st_atime as i64, + nanos: st.st_atime_nsec as u32, + }, + mtime, + )?; + Ok(()) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + let fd = open_redox(p, 0)?; + let st = fd.stat()?; + + set_file_times_redox( + fd.raw(), + atime, + FileTime { + seconds: st.st_mtime as i64, + nanos: st.st_mtime_nsec as u32, + }, + )?; + Ok(()) +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + let fd = open_redox(p, flag::O_NOFOLLOW)?; + set_file_times_redox(fd.raw(), atime, mtime)?; + Ok(()) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let (atime1, mtime1) = match (atime, mtime) { + (Some(a), Some(b)) => (a, b), + (None, None) => return Ok(()), + (Some(a), None) => { + let meta = f.metadata()?; + (a, FileTime::from_last_modification_time(&meta)) + } + (None, Some(b)) => { + let meta = f.metadata()?; + (FileTime::from_last_access_time(&meta), b) + } + }; + set_file_times_redox(f.as_raw_fd() as usize, atime1, mtime1) +} + +fn open_redox(path: &Path, flags: i32) -> Result { + match path.to_str() { + Some(string) => Fd::open(string, flags, 0), + None => Err(Error::new(errno::EINVAL)), + } +} + +fn set_file_times_redox(fd: usize, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use libredox::data::TimeSpec; + + fn to_timespec(ft: &FileTime) -> TimeSpec { + TimeSpec { + tv_sec: ft.seconds(), + tv_nsec: ft.nanoseconds() as _, + } + } + + let times = [to_timespec(&atime), to_timespec(&mtime)]; + + call::futimens(fd, ×)?; + Ok(()) +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.mtime(), + nanos: meta.mtime_nsec() as u32, + } +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.atime(), + nanos: meta.atime_nsec() as u32, + } +} + +pub fn from_creation_time(_meta: &fs::Metadata) -> Option { + None +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/android.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/android.rs new file mode 100644 index 0000000000000000000000000000000000000000..afcd448662e87d61d4ed37f02da05a7e3ef054c3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/android.rs @@ -0,0 +1,63 @@ +use crate::FileTime; +use std::ffi::CString; +use std::fs::File; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + + // On Android NDK before version 19, `futimens` is not available. + // + // For better compatibility, we reimplement `futimens` using `utimensat`, + // the same way as bionic libc uses it to implement `futimens`. + let rc = unsafe { libc::utimensat(f.as_raw_fd(), core::ptr::null(), times.as_ptr(), 0) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), true) +} + +fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + let flags = if symlink { + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { libc::utimensat(libc::AT_FDCWD, p.as_ptr(), times.as_ptr(), flags) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/linux.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/linux.rs new file mode 100644 index 0000000000000000000000000000000000000000..255fcfb612d57fc44e61c0cf6a088d501e53303f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/linux.rs @@ -0,0 +1,117 @@ +//! On Linux we try to use the more accurate `utimensat` syscall but this isn't +//! always available so we also fall back to `utimes` if we couldn't find +//! `utimensat` at runtime. + +use crate::FileTime; +use std::ffi::CString; +use std::fs; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; +use std::ptr; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +pub fn set_file_handle_times( + f: &fs::File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + // Attempt to use the `utimensat` syscall, but if it's not supported by the + // current kernel then fall back to an older syscall. + static INVALID: AtomicBool = AtomicBool::new(false); + if !INVALID.load(SeqCst) { + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + + // We normally use a syscall because the `utimensat` function is documented + // as not accepting a file descriptor in the first argument (even though, on + // Linux, the syscall itself can accept a file descriptor there). + #[cfg(not(target_env = "musl"))] + let rc = unsafe { + libc::syscall( + libc::SYS_utimensat, + f.as_raw_fd(), + ptr::null::(), + times.as_ptr(), + 0, + ) + }; + // However, on musl, we call the musl libc function instead. This is because + // on newer musl versions starting with musl 1.2, `timespec` is always a 64-bit + // value even on 32-bit targets. As a result, musl internally converts their + // `timespec` values to the correct ABI before invoking the syscall. Since we + // use `timespec` from the libc crate, it matches musl's definition and not + // the Linux kernel's version (for some platforms) so we must use musl's + // `utimensat` function to properly convert the value. musl's `utimensat` + // function allows file descriptors in the path argument so this is fine. + #[cfg(target_env = "musl")] + let rc = unsafe { + libc::utimensat( + f.as_raw_fd(), + ptr::null::(), + times.as_ptr(), + 0, + ) + }; + + if rc == 0 { + return Ok(()); + } + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(libc::ENOSYS) { + INVALID.store(true, SeqCst); + } else { + return Err(err); + } + } + + super::utimes::set_file_handle_times(f, atime, mtime) +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), true) +} + +fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + let flags = if symlink { + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + + // Same as the `if` statement above. + static INVALID: AtomicBool = AtomicBool::new(false); + if !INVALID.load(SeqCst) { + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { libc::utimensat(libc::AT_FDCWD, p.as_ptr(), times.as_ptr(), flags) }; + if rc == 0 { + return Ok(()); + } + let err = io::Error::last_os_error(); + if err.raw_os_error() == Some(libc::ENOSYS) { + INVALID.store(true, SeqCst); + } else { + return Err(err); + } + } + + super::utimes::set_times(p, atime, mtime, symlink) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/macos.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/macos.rs new file mode 100644 index 0000000000000000000000000000000000000000..efe92d4aed786be9ec63b159d48fd504b4bb4258 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/macos.rs @@ -0,0 +1,108 @@ +//! Beginning with macOS 10.13, `utimensat` is supported by the OS, so here, we check if the symbol exists +//! and if not, we fallback to `utimes`. +use crate::FileTime; +use libc::{c_char, c_int, timespec}; +use std::ffi::{CStr, CString}; +use std::fs::File; +use std::os::unix::prelude::*; +use std::path::Path; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; +use std::{io, mem}; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + // Attempt to use the `futimens` syscall, but if it's not supported by the + // current kernel then fall back to an older syscall. + if let Some(func) = futimens() { + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { func(f.as_raw_fd(), times.as_ptr()) }; + if rc == 0 { + return Ok(()); + } else { + return Err(io::Error::last_os_error()); + } + } + + super::utimes::set_file_handle_times(f, atime, mtime) +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), true) +} + +fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + // Attempt to use the `utimensat` syscall, but if it's not supported by the + // current kernel then fall back to an older syscall. + if let Some(func) = utimensat() { + let flags = if symlink { + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { func(libc::AT_FDCWD, p.as_ptr(), times.as_ptr(), flags) }; + if rc == 0 { + return Ok(()); + } else { + return Err(io::Error::last_os_error()); + } + } + + super::utimes::set_times(p, atime, mtime, symlink) +} + +fn utimensat() -> Option c_int> +{ + static ADDR: AtomicUsize = AtomicUsize::new(0); + unsafe { + fetch(&ADDR, CStr::from_bytes_with_nul_unchecked(b"utimensat\0")) + .map(|sym| mem::transmute(sym)) + } +} + +fn futimens() -> Option c_int> { + static ADDR: AtomicUsize = AtomicUsize::new(0); + unsafe { + fetch(&ADDR, CStr::from_bytes_with_nul_unchecked(b"futimens\0")) + .map(|sym| mem::transmute(sym)) + } +} + +fn fetch(cache: &AtomicUsize, name: &CStr) -> Option { + match cache.load(SeqCst) { + 0 => {} + 1 => return None, + n => return Some(n), + } + let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr() as *const _) }; + let (val, ret) = if sym.is_null() { + (1, None) + } else { + (sym as usize, Some(sym as usize)) + }; + cache.store(val, SeqCst); + return ret; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f5a48b29d252783498e5a0376424a1f874a8eb7e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/mod.rs @@ -0,0 +1,101 @@ +use crate::FileTime; +use libc::{time_t, timespec}; +use std::fs; +use std::os::unix::prelude::*; + +cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + mod utimes; + mod linux; + pub use self::linux::*; + } else if #[cfg(target_os = "android")] { + mod android; + pub use self::android::*; + } else if #[cfg(target_os = "macos")] { + mod utimes; + mod macos; + pub use self::macos::*; + } else if #[cfg(any(target_os = "aix", + target_os = "solaris", + target_os = "illumos", + target_os = "emscripten", + target_os = "freebsd", + target_os = "netbsd", + target_os = "openbsd", + target_os = "haiku"))] { + mod utimensat; + pub use self::utimensat::*; + } else { + mod utimes; + pub use self::utimes::*; + } +} + +#[allow(dead_code)] +fn to_timespec(ft: &Option) -> timespec { + cfg_if::cfg_if! { + if #[cfg(any(target_os = "macos", + target_os = "illumos", + target_os = "freebsd"))] { + // https://github.com/apple/darwin-xnu/blob/a449c6a3b8014d9406c2ddbdc81795da24aa7443/bsd/sys/stat.h#L541 + // https://github.com/illumos/illumos-gate/blob/master/usr/src/boot/sys/sys/stat.h#L312 + // https://svnweb.freebsd.org/base/head/sys/sys/stat.h?view=markup#l359 + const UTIME_OMIT: i64 = -2; + } else if #[cfg(target_os = "openbsd")] { + // https://github.com/openbsd/src/blob/master/sys/sys/stat.h#L189 + const UTIME_OMIT: i64 = -1; + } else if #[cfg(target_os = "haiku")] { + // https://git.haiku-os.org/haiku/tree/headers/posix/sys/stat.h?#n106 + const UTIME_OMIT: i64 = 1000000001; + } else if #[cfg(target_os = "aix")] { + // AIX hasn't disclosed system header files yet. + // https://github.com/golang/go/blob/master/src/cmd/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go#L1007 + const UTIME_OMIT: i64 = -3; + } else { + // http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/stat.h?annotate=1.68.30.1 + // https://github.com/emscripten-core/emscripten/blob/master/system/include/libc/sys/stat.h#L71 + const UTIME_OMIT: i64 = 1_073_741_822; + } + } + + let mut ts: timespec = unsafe { std::mem::zeroed() }; + if let &Some(ft) = ft { + ts.tv_sec = ft.seconds() as time_t; + ts.tv_nsec = ft.nanoseconds() as _; + } else { + ts.tv_sec = 0; + ts.tv_nsec = UTIME_OMIT as _; + } + + ts +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.mtime(), + nanos: meta.mtime_nsec() as u32, + } +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + FileTime { + seconds: meta.atime(), + nanos: meta.atime_nsec() as u32, + } +} + +pub fn from_creation_time(meta: &fs::Metadata) -> Option { + #[cfg(target_os = "bitrig")] + { + use std::os::bitrig::fs::MetadataExt; + Some(FileTime { + seconds: meta.st_birthtime(), + nanos: meta.st_birthtime_nsec() as u32, + }) + } + + #[cfg(not(target_os = "bitrig"))] + { + meta.created().map(|i| i.into()).ok() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimensat.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimensat.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9153a67fa3e0d0023bbf095f583d71d3ddee8eb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimensat.rs @@ -0,0 +1,64 @@ +use crate::FileTime; +use std::ffi::CString; +use std::fs::File; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { libc::futimens(f.as_raw_fd(), times.as_ptr()) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), true) +} + +fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + let flags = if symlink { + if cfg!(target_os = "emscripten") { + return Err(io::Error::new( + io::ErrorKind::Other, + "emscripten does not support utimensat for symlinks", + )); + } + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [super::to_timespec(&atime), super::to_timespec(&mtime)]; + let rc = unsafe { libc::utimensat(libc::AT_FDCWD, p.as_ptr(), times.as_ptr(), flags) }; + if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimes.rs new file mode 100644 index 0000000000000000000000000000000000000000..34bb882a2e78c9030e0221556c5bd5b34cfe3115 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/unix/utimes.rs @@ -0,0 +1,130 @@ +use crate::FileTime; +use std::ffi::CString; +use std::fs; +use std::io; +use std::os::unix::prelude::*; +use std::path::Path; + +#[allow(dead_code)] +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), false) +} + +#[allow(dead_code)] +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + set_times(p, None, Some(mtime), false) +} + +#[allow(dead_code)] +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), None, false) +} + +#[cfg(not(target_env = "uclibc"))] +#[allow(dead_code)] +pub fn set_file_handle_times( + f: &fs::File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let (atime, mtime) = match get_times(atime, mtime, || f.metadata())? { + Some(pair) => pair, + None => return Ok(()), + }; + let times = [to_timeval(&atime), to_timeval(&mtime)]; + let rc = unsafe { libc::futimes(f.as_raw_fd(), times.as_ptr()) }; + return if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + }; +} + +#[cfg(target_env = "uclibc")] +#[allow(dead_code)] +pub fn set_file_handle_times( + f: &fs::File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let (atime, mtime) = match get_times(atime, mtime, || f.metadata())? { + Some(pair) => pair, + None => return Ok(()), + }; + let times = [to_timespec(&atime), to_timespec(&mtime)]; + let rc = unsafe { libc::futimens(f.as_raw_fd(), times.as_ptr()) }; + return if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + }; +} + +fn get_times( + atime: Option, + mtime: Option, + current: impl FnOnce() -> io::Result, +) -> io::Result> { + let pair = match (atime, mtime) { + (Some(a), Some(b)) => (a, b), + (None, None) => return Ok(None), + (Some(a), None) => { + let meta = current()?; + (a, FileTime::from_last_modification_time(&meta)) + } + (None, Some(b)) => { + let meta = current()?; + (FileTime::from_last_access_time(&meta), b) + } + }; + Ok(Some(pair)) +} + +#[allow(dead_code)] +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + set_times(p, Some(atime), Some(mtime), true) +} + +pub fn set_times( + p: &Path, + atime: Option, + mtime: Option, + symlink: bool, +) -> io::Result<()> { + let (atime, mtime) = match get_times(atime, mtime, || p.metadata())? { + Some(pair) => pair, + None => return Ok(()), + }; + let p = CString::new(p.as_os_str().as_bytes())?; + let times = [to_timeval(&atime), to_timeval(&mtime)]; + let rc = unsafe { + if symlink { + libc::lutimes(p.as_ptr(), times.as_ptr()) + } else { + libc::utimes(p.as_ptr(), times.as_ptr()) + } + }; + return if rc == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + }; +} + +fn to_timeval(ft: &FileTime) -> libc::timeval { + libc::timeval { + tv_sec: ft.seconds() as libc::time_t, + tv_usec: (ft.nanoseconds() / 1000) as libc::suseconds_t, + } +} + +#[cfg(target_env = "uclibc")] +fn to_timespec(ft: &FileTime) -> libc::timespec { + libc::timespec { + tv_sec: ft.seconds() as libc::time_t, + #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] + tv_nsec: (ft.nanoseconds()) as i64, + #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] + tv_nsec: (ft.nanoseconds()) as libc::c_long, + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/wasm.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/wasm.rs new file mode 100644 index 0000000000000000000000000000000000000000..4a7a5bbb790e466cf8375b664e48135ec8299fb2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/wasm.rs @@ -0,0 +1,40 @@ +use crate::FileTime; +use std::fs::{self, File}; +use std::io; +use std::path::Path; + +pub fn set_file_times(_p: &Path, _atime: FileTime, _mtime: FileTime) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Wasm not implemented")) +} + +pub fn set_symlink_file_times(_p: &Path, _atime: FileTime, _mtime: FileTime) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Wasm not implemented")) +} + +pub fn set_file_mtime(_p: &Path, _mtime: FileTime) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Wasm not implemented")) +} + +pub fn set_file_atime(_p: &Path, _atime: FileTime) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Wasm not implemented")) +} + +pub fn from_last_modification_time(_meta: &fs::Metadata) -> FileTime { + unimplemented!() +} + +pub fn from_last_access_time(_meta: &fs::Metadata) -> FileTime { + unimplemented!() +} + +pub fn from_creation_time(_meta: &fs::Metadata) -> Option { + unimplemented!() +} + +pub fn set_file_handle_times( + _f: &File, + _atime: Option, + _mtime: Option, +) -> io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Wasm not implemented")) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/windows.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/windows.rs new file mode 100644 index 0000000000000000000000000000000000000000..5db8a2bb076963a08f2e5a8b08982e84b1456019 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/filetime-0.2.27/src/windows.rs @@ -0,0 +1,118 @@ +use crate::FileTime; +use std::fs::{self, File, OpenOptions}; +use std::io; +use std::os::windows::prelude::*; +use std::path::Path; +use std::ptr; + +#[repr(C)] +#[allow(non_snake_case)] +struct FILETIME { + pub dwLowDateTime: u32, + pub dwHighDateTime: u32, +} + +type HANDLE = *mut core::ffi::c_void; +type BOOL = i32; +const FILE_FLAG_BACKUP_SEMANTICS: u32 = 0x2000000; +const FILE_FLAG_OPEN_REPARSE_POINT: u32 = 0x200000; + +extern "system" { + fn SetFileTime( + hfile: HANDLE, + lpcreationtime: *const FILETIME, + lplastaccesstime: *const FILETIME, + lplastwritetime: *const FILETIME, + ) -> BOOL; +} + +pub fn set_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + let f = OpenOptions::new() + .write(true) + .custom_flags(FILE_FLAG_BACKUP_SEMANTICS) + .open(p)?; + set_file_handle_times(&f, Some(atime), Some(mtime)) +} + +pub fn set_file_mtime(p: &Path, mtime: FileTime) -> io::Result<()> { + let f = OpenOptions::new() + .write(true) + .custom_flags(FILE_FLAG_BACKUP_SEMANTICS) + .open(p)?; + set_file_handle_times(&f, None, Some(mtime)) +} + +pub fn set_file_atime(p: &Path, atime: FileTime) -> io::Result<()> { + let f = OpenOptions::new() + .write(true) + .custom_flags(FILE_FLAG_BACKUP_SEMANTICS) + .open(p)?; + set_file_handle_times(&f, Some(atime), None) +} + +pub fn set_file_handle_times( + f: &File, + atime: Option, + mtime: Option, +) -> io::Result<()> { + let atime = atime.map(to_filetime); + let mtime = mtime.map(to_filetime); + return unsafe { + let ret = SetFileTime( + f.as_raw_handle() as HANDLE, + ptr::null(), + atime + .as_ref() + .map(|p| p as *const FILETIME) + .unwrap_or(ptr::null()), + mtime + .as_ref() + .map(|p| p as *const FILETIME) + .unwrap_or(ptr::null()), + ); + if ret != 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } + }; + + fn to_filetime(ft: FileTime) -> FILETIME { + let intervals = ft.seconds() * (1_000_000_000 / 100) + ((ft.nanoseconds() as i64) / 100); + FILETIME { + dwLowDateTime: intervals as u32, + dwHighDateTime: (intervals >> 32) as u32, + } + } +} + +pub fn set_symlink_file_times(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use std::os::windows::fs::OpenOptionsExt; + + let f = OpenOptions::new() + .write(true) + .custom_flags(FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS) + .open(p)?; + set_file_handle_times(&f, Some(atime), Some(mtime)) +} + +pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + from_intervals(meta.last_write_time()) +} + +pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + from_intervals(meta.last_access_time()) +} + +pub fn from_creation_time(meta: &fs::Metadata) -> Option { + Some(from_intervals(meta.creation_time())) +} + +fn from_intervals(ticks: u64) -> FileTime { + // Windows write times are in 100ns intervals, so do a little math to + // get it into the right representation. + FileTime { + seconds: (ticks / (1_000_000_000 / 100)) as i64, + nanos: ((ticks % (1_000_000_000 / 100)) * 100) as u32, + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..c7c58dea24715f32c7f48f2663b057d20fa1354b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d9bba94c239daa1175a5bb2958f37a5c72db3f6a" + }, + "path_in_vcs": "futures-sink" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..2207374ad04119809d151ff86a7c906fc7946421 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.lock @@ -0,0 +1,6 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "futures-sink" +version = "0.3.32" + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..147716bad0e5d21dfdd5599af2cbef9e15204910 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml @@ -0,0 +1,54 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.36" +name = "futures-sink" +version = "0.3.32" +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +The asynchronous `Sink` trait for the futures-rs library. +""" +homepage = "https://rust-lang.github.io/futures-rs" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/futures-rs" + +[package.metadata.docs.rs] +all-features = true + +[features] +alloc = [] +default = ["std"] +std = ["alloc"] + +[lib] +name = "futures_sink" +path = "src/lib.rs" + +[dependencies] + +[lints.rust] +missing_debug_implementations = "warn" +rust_2018_idioms = "warn" +single_use_lifetimes = "warn" +unreachable_pub = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(futures_sanitizer)"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..3613f0bb67ad0d03c83a69576492695742b02173 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/Cargo.toml.orig @@ -0,0 +1,25 @@ +[package] +name = "futures-sink" +version = "0.3.32" +edition = "2018" +# NB: Sync with "Usage" section in README.md and core-msrv job in .github/workflows/ci.yml +rust-version = "1.36" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/futures-rs" +homepage = "https://rust-lang.github.io/futures-rs" +description = """ +The asynchronous `Sink` trait for the futures-rs library. +""" + +[features] +default = ["std"] +std = ["alloc"] +alloc = [] + +[dependencies] + +[package.metadata.docs.rs] +all-features = true + +[lints] +workspace = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..9eb0b097f5d0b5e147323e043b3adf35949b9f5a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..8ad082ec4f93b748959a7634b12d25da2000c59e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1d683e95b56e1b114886b41189bc97aa318fe0ef --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/README.md @@ -0,0 +1,23 @@ +# futures-sink + +The asynchronous `Sink` trait for the futures-rs library. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +futures-sink = "0.3" +``` + +The current `futures-sink` requires Rust 1.36 or later. + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..52394e8c2af5e38d74c59070ddb4ed73a4d6289a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/futures-sink-0.3.32/src/lib.rs @@ -0,0 +1,240 @@ +//! Asynchronous sinks +//! +//! This crate contains the `Sink` trait which allows values to be sent +//! asynchronously. + +#![no_std] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![warn(missing_docs, /* unsafe_op_in_unsafe_fn */)] // unsafe_op_in_unsafe_fn requires Rust 1.52 + +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +use core::ops::DerefMut; +use core::pin::Pin; +use core::task::{Context, Poll}; + +/// A `Sink` is a value into which other values can be sent, asynchronously. +/// +/// Basic examples of sinks include the sending side of: +/// +/// - Channels +/// - Sockets +/// - Pipes +/// +/// In addition to such "primitive" sinks, it's typical to layer additional +/// functionality, such as buffering, on top of an existing sink. +/// +/// Sending to a sink is "asynchronous" in the sense that the value may not be +/// sent in its entirety immediately. Instead, values are sent in a two-phase +/// way: first by initiating a send, and then by polling for completion. This +/// two-phase setup is analogous to buffered writing in synchronous code, where +/// writes often succeed immediately, but internally are buffered and are +/// *actually* written only upon flushing. +/// +/// In addition, the `Sink` may be *full*, in which case it is not even possible +/// to start the sending process. +/// +/// As with `Future` and `Stream`, the `Sink` trait is built from a few core +/// required methods, and a host of default methods for working in a +/// higher-level way. The `Sink::send_all` combinator is of particular +/// importance: you can use it to send an entire stream to a sink, which is +/// the simplest way to ultimately consume a stream. +#[must_use = "sinks do nothing unless polled"] +pub trait Sink { + /// The type of value produced by the sink when an error occurs. + type Error; + + /// Attempts to prepare the `Sink` to receive a value. + /// + /// This method must be called and return `Poll::Ready(Ok(()))` prior to + /// each call to `start_send`. + /// + /// This method returns `Poll::Ready` once the underlying sink is ready to + /// receive data. If this method returns `Poll::Pending`, the current task + /// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready` + /// should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Begin the process of sending a value to the sink. + /// Each call to this function must be preceded by a successful call to + /// `poll_ready` which returned `Poll::Ready(Ok(()))`. + /// + /// As the name suggests, this method only *begins* the process of sending + /// the item. If the sink employs buffering, the item isn't fully processed + /// until the buffer is fully flushed. Since sinks are designed to work with + /// asynchronous I/O, the process of actually writing out the data to an + /// underlying object takes place asynchronously. **You *must* use + /// `poll_flush` or `poll_close` in order to guarantee completion of a + /// send**. + /// + /// Implementations of `poll_ready` and `start_send` will usually involve + /// flushing behind the scenes in order to make room for new messages. + /// It is only necessary to call `poll_flush` if you need to guarantee that + /// *all* of the items placed into the `Sink` have been sent. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error>; + + /// Flush any remaining output from this sink. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. If this + /// value is returned then it is guaranteed that all previous values sent + /// via `start_send` have been flushed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_flush` should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Flush any remaining output and close this sink, if necessary. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain and the sink + /// has been successfully closed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_close` should be called again. + /// + /// If this function encounters an error, the sink should be considered to + /// have failed permanently, and no more `Sink` methods should be called. + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; +} + +impl + Unpin, Item> Sink for &mut S { + type Error = S::Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_ready(cx) + } + + fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { + Pin::new(&mut **self).start_send(item) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_close(cx) + } +} + +impl Sink for Pin

+where + P: DerefMut + Unpin, + P::Target: Sink, +{ + type Error = >::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_ready(cx) + } + + fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { + self.get_mut().as_mut().start_send(item) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_close(cx) + } +} + +#[cfg(feature = "alloc")] +mod if_alloc { + use super::*; + use core::convert::Infallible as Never; + + impl Sink for alloc::vec::Vec { + type Error = Never; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + // TODO: impl Unpin for Vec {} + unsafe { self.get_unchecked_mut() }.push(item); + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + } + + impl Sink for alloc::collections::VecDeque { + type Error = Never; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + // TODO: impl Unpin for Vec {} + unsafe { self.get_unchecked_mut() }.push_back(item); + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + } + + impl + Unpin, Item> Sink for alloc::boxed::Box { + type Error = S::Error; + + fn poll_ready( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_ready(cx) + } + + fn start_send(mut self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { + Pin::new(&mut **self).start_send(item) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_close( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut **self).poll_close(cx) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/benches/buffer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/benches/buffer.rs new file mode 100644 index 0000000000000000000000000000000000000000..01f610d497b11cc28f5db6046403b140b11212b8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/benches/buffer.rs @@ -0,0 +1,123 @@ +//! Basic benchmarks +#![feature(test, maybe_uninit_uninit_array_transpose)] +extern crate test; + +use std::{ + mem::{MaybeUninit, size_of}, + slice, +}; + +// Call getrandom on a zero-initialized stack buffer +#[inline(always)] +fn bench_fill() { + let mut buf = [0u8; N]; + getrandom::fill(&mut buf).unwrap(); + test::black_box(&buf[..]); +} + +// Call fill_uninit on an uninitialized stack buffer +#[inline(always)] +fn bench_fill_uninit() { + let mut uninit = [MaybeUninit::uninit(); N]; + let buf: &[u8] = getrandom::fill_uninit(&mut uninit).unwrap(); + test::black_box(buf); +} + +#[bench] +fn bench_u32(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u32 { + getrandom::u32().unwrap() + } + b.bytes = 4; + b.iter(inner); +} + +#[bench] +fn bench_u32_via_fill(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u32 { + let mut res = MaybeUninit::::uninit(); + let dst: &mut [MaybeUninit] = + unsafe { slice::from_raw_parts_mut(res.as_mut_ptr().cast(), size_of::()) }; + getrandom::fill_uninit(dst).unwrap(); + unsafe { res.assume_init() } + } + b.bytes = 4; + b.iter(inner); +} + +#[bench] +fn bench_u64(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u64 { + getrandom::u64().unwrap() + } + b.bytes = 8; + b.iter(inner); +} + +#[bench] +fn bench_u64_via_fill(b: &mut test::Bencher) { + #[inline(never)] + fn inner() -> u64 { + let mut res = MaybeUninit::::uninit(); + let dst: &mut [MaybeUninit] = + unsafe { slice::from_raw_parts_mut(res.as_mut_ptr().cast(), size_of::()) }; + getrandom::fill_uninit(dst).unwrap(); + unsafe { res.assume_init() } + } + b.bytes = 8; + b.iter(inner); +} + +// We benchmark using #[inline(never)] "inner" functions for two reasons: +// - Avoiding inlining reduces a source of variance when running benchmarks. +// - It is _much_ easier to get the assembly or IR for the inner loop. +// +// For example, using cargo-show-asm (https://github.com/pacak/cargo-show-asm), +// we can get the assembly for a particular benchmark's inner loop by running: +// cargo asm --bench buffer --release buffer::p384::bench_getrandom::inner +macro_rules! bench { + ( $name:ident, $size:expr ) => { + mod $name { + #[bench] + fn bench_fill(b: &mut test::Bencher) { + #[inline(never)] + fn inner() { + super::bench_fill::<{ $size }>() + } + + b.bytes = $size as u64; + b.iter(inner); + } + #[bench] + fn bench_fill_uninit(b: &mut test::Bencher) { + #[inline(never)] + fn inner() { + super::bench_fill_uninit::<{ $size }>() + } + + b.bytes = $size as u64; + b.iter(inner); + } + } + }; +} + +// 16 bytes (128 bits) is the size of an 128-bit AES key/nonce. +bench!(aes128, 128 / 8); + +// 32 bytes (256 bits) is the seed sized used for rand::thread_rng +// and the `random` value in a ClientHello/ServerHello for TLS. +// This is also the size of a 256-bit AES/HMAC/P-256/Curve25519 key +// and/or nonce. +bench!(p256, 256 / 8); + +// A P-384/HMAC-384 key and/or nonce. +bench!(p384, 384 / 8); + +// Initializing larger buffers is not the primary use case of this library, as +// this should normally be done by a userspace CSPRNG. However, we have a test +// here to see the effects of a lower (amortized) syscall overhead. +bench!(page, 4096); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends.rs new file mode 100644 index 0000000000000000000000000000000000000000..95547d9d3eda0f8d5cc9aa1e3aae0580c2f5ff64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends.rs @@ -0,0 +1,190 @@ +//! System-specific implementations. +//! +//! This module should provide `fill_inner` with the signature +//! `fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error>`. +//! The function MUST fully initialize `dest` when `Ok(())` is returned; +//! the function may need to use `sanitizer::unpoison` as well. +//! The function MUST NOT ever write uninitialized bytes into `dest`, +//! regardless of what value it returns. + +cfg_if! { + if #[cfg(getrandom_backend = "custom")] { + mod custom; + pub use custom::*; + } else if #[cfg(getrandom_backend = "linux_getrandom")] { + mod getrandom; + pub use getrandom::*; + } else if #[cfg(getrandom_backend = "linux_raw")] { + mod linux_raw; + pub use linux_raw::*; + } else if #[cfg(getrandom_backend = "rdrand")] { + mod rdrand; + pub use rdrand::*; + } else if #[cfg(getrandom_backend = "rndr")] { + mod rndr; + pub use rndr::*; + } else if #[cfg(getrandom_backend = "efi_rng")] { + mod efi_rng; + pub use efi_rng::*; + } else if #[cfg(getrandom_backend = "windows_legacy")] { + mod windows_legacy; + pub use windows_legacy::*; + } else if #[cfg(getrandom_backend = "unsupported")] { + mod unsupported; + pub use unsupported::*; + } else if #[cfg(getrandom_backend = "extern_impl")] { + pub(crate) mod extern_impl; + pub use extern_impl::*; + } else if #[cfg(all(target_os = "linux", target_env = ""))] { + mod linux_raw; + pub use linux_raw::*; + } else if #[cfg(target_os = "espidf")] { + mod esp_idf; + pub use esp_idf::*; + } else if #[cfg(any( + target_os = "haiku", + target_os = "redox", + target_os = "nto", + target_os = "aix", + ))] { + mod use_file; + pub use use_file::*; + } else if #[cfg(any( + target_os = "macos", + target_os = "openbsd", + target_os = "vita", + target_os = "emscripten", + ))] { + mod getentropy; + pub use getentropy::*; + } else if #[cfg(any( + // Rust supports Android API level 19 (KitKat) [0] and the next upgrade targets + // level 21 (Lollipop) [1], while `getrandom(2)` was added only in + // level 23 (Marshmallow). Note that it applies only to the "old" `target_arch`es, + // RISC-V Android targets sufficiently new API level, same will apply for potential + // new Android `target_arch`es. + // [0]: https://blog.rust-lang.org/2023/01/09/android-ndk-update-r25.html + // [1]: https://github.com/rust-lang/rust/pull/120593 + all( + target_os = "android", + any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "x86", + target_arch = "x86_64", + ), + ), + // Only on these `target_arch`es Rust supports Linux kernel versions (3.2+) + // that precede the version (3.17) in which `getrandom(2)` was added: + // https://doc.rust-lang.org/stable/rustc/platform-support.html + all( + target_os = "linux", + any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x", + target_arch = "x86", + target_arch = "x86_64", + // Minimum supported Linux kernel version for MUSL targets + // is not specified explicitly (as of Rust 1.77) and they + // are used in practice to target pre-3.17 kernels. + all( + target_env = "musl", + not( + any( + target_arch = "riscv64", + target_arch = "riscv32", + ), + ), + ), + ), + ) + ))] { + mod use_file; + mod linux_android_with_fallback; + pub use linux_android_with_fallback::*; + } else if #[cfg(any( + target_os = "android", + target_os = "linux", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "hurd", + target_os = "illumos", + target_os = "cygwin", + // Check for target_arch = "arm" to only include the 3DS. Does not + // include the Nintendo Switch (which is target_arch = "aarch64"). + all(target_os = "horizon", target_arch = "arm"), + ))] { + mod getrandom; + pub use getrandom::*; + } else if #[cfg(target_os = "solaris")] { + mod solaris; + pub use solaris::*; + } else if #[cfg(target_os = "netbsd")] { + mod netbsd; + pub use netbsd::*; + } else if #[cfg(target_os = "fuchsia")] { + mod fuchsia; + pub use fuchsia::*; + } else if #[cfg(any( + target_os = "ios", + target_os = "visionos", + target_os = "watchos", + target_os = "tvos", + ))] { + mod apple_other; + pub use apple_other::*; + } else if #[cfg(all(target_arch = "wasm32", target_os = "wasi"))] { + cfg_if! { + if #[cfg(target_env = "p1")] { + mod wasi_p1; + pub use wasi_p1::*; + } else { + mod wasi_p2_3; + pub use wasi_p2_3::*; + } + } + } else if #[cfg(target_os = "hermit")] { + mod hermit; + pub use hermit::*; + } else if #[cfg(all(target_arch = "x86_64", target_os = "motor"))] { + mod rdrand; + pub use rdrand::*; + } else if #[cfg(target_os = "vxworks")] { + mod vxworks; + pub use vxworks::*; + } else if #[cfg(target_os = "solid_asp3")] { + mod solid; + pub use solid::*; + } else if #[cfg(all(windows, target_vendor = "win7"))] { + mod windows_legacy; + pub use windows_legacy::*; + } else if #[cfg(windows)] { + mod windows; + pub use windows::*; + } else if #[cfg(all(target_arch = "x86_64", target_env = "sgx"))] { + mod rdrand; + pub use rdrand::*; + } else if #[cfg(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "none")))] { + cfg_if! { + if #[cfg(feature = "wasm_js")] { + mod wasm_js; + pub use wasm_js::*; + } else { + compile_error!(concat!( + "The wasm32-unknown-unknown targets are not supported by default; \ + you may need to enable the \"wasm_js\" crate feature. \ + For more information see: \ + https://docs.rs/getrandom/", env!("CARGO_PKG_VERSION"), "/#webassembly-support" + )); + } + } + } else { + compile_error!(concat!( + "target is not supported. You may need to define a custom backend see: \ + https://docs.rs/getrandom/", env!("CARGO_PKG_VERSION"), "/#custom-backend" + )); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/apple_other.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/apple_other.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7b51c0e01341a3857036aae6c665f1263b4529b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/apple_other.rs @@ -0,0 +1,21 @@ +//! Implementation for iOS, tvOS, and watchOS where `getentropy` is unavailable. +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; + +pub use crate::util::{inner_u32, inner_u64}; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let dst_ptr = dest.as_mut_ptr().cast::(); + let ret = unsafe { libc::CCRandomGenerateBytes(dst_ptr, dest.len()) }; + if ret == libc::kCCSuccess { + Ok(()) + } else { + Err(Error::IOS_RANDOM_GEN) + } +} + +impl Error { + /// Call to `CCRandomGenerateBytes` failed. + pub(crate) const IOS_RANDOM_GEN: Error = Self::new_internal(10); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/custom.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/custom.rs new file mode 100644 index 0000000000000000000000000000000000000000..ea22de7a57d66fc86662f71396e709e267dccb98 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/custom.rs @@ -0,0 +1,13 @@ +//! An implementation which calls out to an externally defined function. +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + unsafe extern "Rust" { + fn __getrandom_v03_custom(dest: *mut u8, len: usize) -> Result<(), Error>; + } + unsafe { __getrandom_v03_custom(dest.as_mut_ptr().cast(), dest.len()) } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/efi_rng.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/efi_rng.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d5888402b729eccccf0e6ac62610d9dd1350d26 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/efi_rng.rs @@ -0,0 +1,124 @@ +//! Implementation for UEFI using EFI_RNG_PROTOCOL +use crate::Error; +use core::{ + mem::MaybeUninit, + ptr::{self, NonNull, null_mut}, + sync::atomic::{AtomicPtr, Ordering::Relaxed}, +}; +use r_efi::{ + efi::{BootServices, Handle}, + protocols::rng, +}; + +extern crate std; + +pub use crate::util::{inner_u32, inner_u64}; + +#[cfg(not(target_os = "uefi"))] +compile_error!("`efi_rng` backend can be enabled only for UEFI targets!"); + +static RNG_PROTOCOL: AtomicPtr = AtomicPtr::new(null_mut()); + +#[cold] +#[inline(never)] +fn init() -> Result, Error> { + const HANDLE_SIZE: usize = size_of::(); + + let boot_services = std::os::uefi::env::boot_services() + .ok_or(Error::BOOT_SERVICES_UNAVAILABLE)? + .cast::(); + + let mut handles = [ptr::null_mut(); 16]; + // `locate_handle` operates with length in bytes + let mut buf_size = handles.len() * HANDLE_SIZE; + let mut guid = rng::PROTOCOL_GUID; + let ret = unsafe { + ((*boot_services.as_ptr()).locate_handle)( + r_efi::efi::BY_PROTOCOL, + &mut guid, + null_mut(), + &mut buf_size, + handles.as_mut_ptr(), + ) + }; + + if ret.is_error() { + return Err(Error::from_uefi_code(ret.as_usize())); + } + + let handles_len = buf_size / HANDLE_SIZE; + let handles = handles.get(..handles_len).ok_or(Error::UNEXPECTED)?; + + let system_handle = std::os::uefi::env::image_handle(); + for &handle in handles { + let mut protocol: MaybeUninit<*mut rng::Protocol> = MaybeUninit::uninit(); + + let mut protocol_guid = rng::PROTOCOL_GUID; + let ret = unsafe { + ((*boot_services.as_ptr()).open_protocol)( + handle, + &mut protocol_guid, + protocol.as_mut_ptr().cast(), + system_handle.as_ptr(), + ptr::null_mut(), + r_efi::system::OPEN_PROTOCOL_GET_PROTOCOL, + ) + }; + + let protocol = if ret.is_error() { + continue; + } else { + let protocol = unsafe { protocol.assume_init() }; + NonNull::new(protocol).ok_or(Error::UNEXPECTED)? + }; + + // Try to use the acquired protocol handle + let mut buf = [0u8; 8]; + let mut alg_guid = rng::ALGORITHM_RAW; + let ret = unsafe { + ((*protocol.as_ptr()).get_rng)( + protocol.as_ptr(), + &mut alg_guid, + buf.len(), + buf.as_mut_ptr(), + ) + }; + + if ret.is_error() { + continue; + } + + RNG_PROTOCOL.store(protocol.as_ptr(), Relaxed); + return Ok(protocol); + } + Err(Error::NO_RNG_HANDLE) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let protocol = match NonNull::new(RNG_PROTOCOL.load(Relaxed)) { + Some(p) => p, + None => init()?, + }; + + let mut alg_guid = rng::ALGORITHM_RAW; + let ret = unsafe { + ((*protocol.as_ptr()).get_rng)( + protocol.as_ptr(), + &mut alg_guid, + dest.len(), + dest.as_mut_ptr().cast::(), + ) + }; + + if ret.is_error() { + Err(Error::from_uefi_code(ret.as_usize())) + } else { + Ok(()) + } +} + +impl Error { + pub(crate) const BOOT_SERVICES_UNAVAILABLE: Error = Self::new_internal(10); + pub(crate) const NO_RNG_HANDLE: Error = Self::new_internal(11); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/esp_idf.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/esp_idf.rs new file mode 100644 index 0000000000000000000000000000000000000000..6e3039df852353a6009163e8db08aaaefd47e8d8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/esp_idf.rs @@ -0,0 +1,21 @@ +//! Implementation for ESP-IDF +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; + +pub use crate::util::{inner_u32, inner_u64}; + +unsafe extern "C" { + fn esp_fill_random(buf: *mut c_void, len: usize) -> u32; +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Not that NOT enabling WiFi, BT, or the voltage noise entropy source (via `bootloader_random_enable`) + // will cause ESP-IDF to return pseudo-random numbers based on the voltage noise entropy, after the initial boot process: + // https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/system/random.html + // + // However tracking if some of these entropy sources is enabled is way too difficult to implement here + unsafe { esp_fill_random(dest.as_mut_ptr().cast(), dest.len()) }; + + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/extern_impl.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/extern_impl.rs new file mode 100644 index 0000000000000000000000000000000000000000..f08d4fd06bb801b4c68b3dcc3cfeb56b5b10e0a9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/extern_impl.rs @@ -0,0 +1,19 @@ +//! An implementation which calls out to an externally defined function. +use crate::Error; +use core::mem::MaybeUninit; + +/// Declares this function as an external implementation of [`fill_uninit`](crate::fill_uninit). +#[eii(fill_uninit)] +pub(crate) fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error>; + +/// Declares this function as an external implementation of [`u32`](crate::u32). +#[eii(u32)] +pub(crate) fn inner_u32() -> Result { + crate::util::inner_u32() +} + +/// Declares this function as an external implementation of [`u64`](crate::u64). +#[eii(u64)] +pub(crate) fn inner_u64() -> Result { + crate::util::inner_u64() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/fuchsia.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/fuchsia.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8abfbef3ac45c38b174f8be722aeb05932e63da --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/fuchsia.rs @@ -0,0 +1,16 @@ +//! Implementation for Fuchsia Zircon +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +#[link(name = "zircon")] +unsafe extern "C" { + fn zx_cprng_draw(buffer: *mut u8, length: usize); +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + unsafe { zx_cprng_draw(dest.as_mut_ptr().cast::(), dest.len()) } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getentropy.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getentropy.rs new file mode 100644 index 0000000000000000000000000000000000000000..463c463be8be6b8f316c2d8b87e7a51ffa158907 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getentropy.rs @@ -0,0 +1,28 @@ +//! Implementation using getentropy(2) +//! +//! Available since: +//! - macOS 10.12 +//! - OpenBSD 5.6 +//! - Emscripten 2.0.5 +//! - vita newlib since Dec 2021 +//! +//! For these targets, we use getentropy(2) because getrandom(2) doesn't exist. +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; + +pub use crate::util::{inner_u32, inner_u64}; + +#[path = "../utils/get_errno.rs"] +mod utils; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + for chunk in dest.chunks_mut(256) { + let ret = unsafe { libc::getentropy(chunk.as_mut_ptr().cast::(), chunk.len()) }; + if ret != 0 { + let errno = utils::get_errno(); + return Err(Error::from_errno(errno)); + } + } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getrandom.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getrandom.rs new file mode 100644 index 0000000000000000000000000000000000000000..927dc85c51391041b6163aa95b7dfb5832939fb5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/getrandom.rs @@ -0,0 +1,31 @@ +//! Implementation using getrandom(2). +//! +//! Available since: +//! - Linux Kernel 3.17, Glibc 2.25, Musl 1.1.20 +//! - Android API level 23 (Marshmallow) +//! - NetBSD 10.0 +//! - FreeBSD 12.0 +//! - illumos since Dec 2018 +//! - DragonFly 5.7 +//! - Hurd Glibc 2.31 +//! - shim-3ds since Feb 2022 +//! +//! For these platforms, we always use the default pool and never set the +//! GRND_RANDOM flag to use the /dev/random pool. On Linux/Android/Hurd, using +//! GRND_RANDOM is not recommended. On NetBSD/FreeBSD/Dragonfly/3ds, it does +//! nothing. On illumos, the default pool is used to implement getentropy(2), +//! so we assume it is acceptable here. +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +#[path = "../utils/sys_fill_exact.rs"] +mod utils; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + utils::sys_fill_exact(dest, |buf| unsafe { + libc::getrandom(buf.as_mut_ptr().cast(), buf.len(), 0) + }) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/hermit.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/hermit.rs new file mode 100644 index 0000000000000000000000000000000000000000..aab301212a68dc493e840fa40882ec96d65a2144 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/hermit.rs @@ -0,0 +1,53 @@ +//! Implementation for Hermit +use crate::Error; +use core::mem::MaybeUninit; + +unsafe extern "C" { + fn sys_read_entropy(buffer: *mut u8, length: usize, flags: u32) -> isize; + // Note that `sys_secure_rand32/64` are implemented using `sys_read_entropy`: + // https://github.com/hermit-os/kernel/blob/430da84/src/syscalls/entropy.rs#L62-L104 + // But this may change in future and can depend on compilation target, + // so to future-proof we use these "syscalls". + fn sys_secure_rand32(value: *mut u32) -> i32; + fn sys_secure_rand64(value: *mut u64) -> i32; +} + +#[inline] +pub fn inner_u32() -> Result { + let mut res = MaybeUninit::uninit(); + let ret = unsafe { sys_secure_rand32(res.as_mut_ptr()) }; + match ret { + 0 => Ok(unsafe { res.assume_init() }), + -1 => Err(Error::UNSUPPORTED), + _ => Err(Error::UNEXPECTED), + } +} + +#[inline] +pub fn inner_u64() -> Result { + let mut res = MaybeUninit::uninit(); + let ret = unsafe { sys_secure_rand64(res.as_mut_ptr()) }; + match ret { + 0 => Ok(unsafe { res.assume_init() }), + -1 => Err(Error::UNSUPPORTED), + _ => Err(Error::UNEXPECTED), + } +} + +#[inline] +pub fn fill_inner(mut dest: &mut [MaybeUninit]) -> Result<(), Error> { + while !dest.is_empty() { + let res = unsafe { sys_read_entropy(dest.as_mut_ptr().cast::(), dest.len(), 0) }; + match res { + res if res > 0 => { + let len = usize::try_from(res).map_err(|_| Error::UNEXPECTED)?; + dest = dest.get_mut(len..).ok_or(Error::UNEXPECTED)?; + } + code => { + let code = i32::try_from(code).map_err(|_| Error::UNEXPECTED)?; + return Err(Error::from_neg_error_code(code)); + } + } + } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_android_with_fallback.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_android_with_fallback.rs new file mode 100644 index 0000000000000000000000000000000000000000..48ce628860edaf37c460a7d04a1a65227076b3f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_android_with_fallback.rs @@ -0,0 +1,101 @@ +//! Implementation for Linux / Android with `/dev/urandom` fallback +use super::use_file; +use crate::Error; +use core::{ + ffi::c_void, + mem::{MaybeUninit, transmute}, + ptr::NonNull, + sync::atomic::{AtomicPtr, Ordering}, +}; +use use_file::utils; + +pub use crate::util::{inner_u32, inner_u64}; + +type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; + +/// Sentinel value which indicates that `libc::getrandom` either not available, +/// or not supported by kernel. +const NOT_AVAILABLE: NonNull = unsafe { NonNull::new_unchecked(usize::MAX as *mut c_void) }; + +static GETRANDOM_FN: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); + +#[cold] +#[inline(never)] +fn init() -> NonNull { + // Use static linking to `libc::getrandom` on MUSL targets and `dlsym` everywhere else + #[cfg(not(target_env = "musl"))] + let raw_ptr = { + static NAME: &[u8] = b"getrandom\0"; + let name_ptr = NAME.as_ptr().cast::(); + unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } + }; + #[cfg(target_env = "musl")] + let raw_ptr = { + let fptr: GetRandomFn = libc::getrandom; + unsafe { transmute::(fptr) } + }; + + let res_ptr = match NonNull::new(raw_ptr) { + Some(fptr) => { + let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; + let dangling_ptr = NonNull::dangling().as_ptr(); + // Check that `getrandom` syscall is supported by kernel + let res = unsafe { getrandom_fn(dangling_ptr, 0, 0) }; + if cfg!(getrandom_test_linux_fallback) { + NOT_AVAILABLE + } else if res.is_negative() { + match utils::get_errno() { + libc::ENOSYS => NOT_AVAILABLE, // No kernel support + // The fallback on EPERM is intentionally not done on Android since this workaround + // seems to be needed only for specific Linux-based products that aren't based + // on Android. See https://github.com/rust-random/getrandom/issues/229. + #[cfg(target_os = "linux")] + libc::EPERM => NOT_AVAILABLE, // Blocked by seccomp + _ => fptr, + } + } else { + fptr + } + } + None => NOT_AVAILABLE, + }; + + #[cfg(getrandom_test_linux_without_fallback)] + if res_ptr == NOT_AVAILABLE { + panic!("Fallback is triggered with enabled `getrandom_test_linux_without_fallback`") + } + + GETRANDOM_FN.store(res_ptr.as_ptr(), Ordering::Release); + res_ptr +} + +// Prevent inlining of the fallback implementation +#[inline(never)] +fn use_file_fallback(dest: &mut [MaybeUninit]) -> Result<(), Error> { + use_file::fill_inner(dest) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Despite being only a single atomic variable, we still cannot always use + // Ordering::Relaxed, as we need to make sure a successful call to `init` + // is "ordered before" any data read through the returned pointer (which + // occurs when the function is called). Our implementation mirrors that of + // the one in libstd, meaning that the use of non-Relaxed operations is + // probably unnecessary. + let raw_ptr = GETRANDOM_FN.load(Ordering::Acquire); + let fptr = match NonNull::new(raw_ptr) { + Some(p) => p, + None => init(), + }; + + if fptr == NOT_AVAILABLE { + use_file_fallback(dest) + } else { + // note: `transmute` is currently the only way to convert a pointer into a function reference + let getrandom_fn = unsafe { transmute::, GetRandomFn>(fptr) }; + utils::sys_fill_exact(dest, |buf| unsafe { + getrandom_fn(buf.as_mut_ptr().cast(), buf.len(), 0) + }) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_raw.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_raw.rs new file mode 100644 index 0000000000000000000000000000000000000000..31cddfc8aa0ebecd427715a6e4f66d5a421b47f3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/linux_raw.rs @@ -0,0 +1,169 @@ +//! Implementation for Linux / Android using `asm!`-based syscalls. +pub use crate::util::{inner_u32, inner_u64}; +use crate::{Error, MaybeUninit}; + +#[cfg(not(any(target_os = "android", target_os = "linux")))] +compile_error!("`linux_raw` backend can be enabled only for Linux/Android targets!"); + +#[path = "../utils/sanitizer.rs"] +mod utils; + +#[allow(non_upper_case_globals)] +unsafe fn getrandom_syscall(buf: *mut u8, buflen: usize, flags: u32) -> isize { + let r0; + + // Based on `rustix` and `linux-raw-sys` code. + cfg_if! { + if #[cfg(all( + target_arch = "arm", + any(target_abi = "eabi", target_abi = "eabihf"), + ))] { + const __NR_getrandom: u32 = 384; + // In thumb-mode, r7 is the frame pointer and is not permitted to be used in + // an inline asm operand, so we have to use a different register and copy it + // into r7 inside the inline asm. + // Theoretically, we could detect thumb mode in the build script, but several + // register moves are cheap enough compared to the syscall cost, so we do not + // bother with it. + unsafe { + core::arch::asm!( + "mov {tmp}, r7", + "mov r7, {nr}", + "svc 0", + "mov r7, {tmp}", + tmp = out(reg) _, + nr = const __NR_getrandom, + inlateout("r0") buf => r0, + in("r1") buflen, + in("r2") flags, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(all( + target_arch = "aarch64", + any(target_abi = "", target_abi = "ilp32"), + ))] { + // According to the ILP32 patch for the kernel that hasn't yet + // been merged into the mainline, "AARCH64/ILP32 ABI uses standard + // syscall table [...] with the exceptions listed below," where + // getrandom is not mentioned as an exception. + const __NR_getrandom: u32 = 278; + unsafe { + core::arch::asm!( + "svc 0", + in("x8") __NR_getrandom, + inlateout("x0") buf => r0, + in("x1") buflen, + in("x2") flags, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(all( + target_arch = "loongarch64", + any(target_abi = "", target_abi = "ilp32"), + ))] { + const __NR_getrandom: u32 = 278; + unsafe { + core::arch::asm!( + "syscall 0", + in("$a7") __NR_getrandom, + inlateout("$a0") buf => r0, + in("$a1") buflen, + in("$a2") flags, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { + const __NR_getrandom: u32 = 278; + unsafe { + core::arch::asm!( + "ecall", + in("a7") __NR_getrandom, + inlateout("a0") buf => r0, + in("a1") buflen, + in("a2") flags, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(target_arch = "s390x")] { + const __NR_getrandom: u32 = 349; + unsafe { + core::arch::asm!( + "svc 0", + in("r1") __NR_getrandom, + inlateout("r2") buf => r0, + in("r3") buflen, + in("r4") flags, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(target_arch = "x86")] { + const __NR_getrandom: u32 = 355; + // `int 0x80` is famously slow, but implementing vDSO is too complex + // and `sysenter`/`syscall` have their own portability issues, + // so we use the simple "legacy" way of doing syscalls. + unsafe { + core::arch::asm!( + "int $$0x80", + in("eax") __NR_getrandom, + in("ebx") buf, + in("ecx") buflen, + in("edx") flags, + lateout("eax") r0, + options(nostack, preserves_flags) + ); + } + } else if #[cfg(all( + target_arch = "x86_64", + any(target_abi = "", target_abi = "x32"), + ))] { + const __X32_SYSCALL_BIT: u32 = 0x40000000; + const OFFSET: u32 = if cfg!(target_pointer_width = "32") { __X32_SYSCALL_BIT } else { 0 }; + const __NR_getrandom: u32 = OFFSET + 318; + + unsafe { + core::arch::asm!( + "syscall", + in("rax") __NR_getrandom, + in("rdi") buf, + in("rsi") buflen, + in("rdx") flags, + lateout("rax") r0, + lateout("rcx") _, + lateout("r11") _, + options(nostack, preserves_flags) + ); + } + } else { + compile_error!("`linux_raw` backend does not support this target arch"); + } + } + + r0 +} + +#[inline] +pub fn fill_inner(mut dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Value of this error code is stable across all target arches. + const EINTR: isize = -4; + + loop { + let ret = unsafe { getrandom_syscall(dest.as_mut_ptr().cast(), dest.len(), 0) }; + match usize::try_from(ret) { + Ok(0) => return Err(Error::UNEXPECTED), + Ok(len) => { + let (l, r) = dest.split_at_mut_checked(len).ok_or(Error::UNEXPECTED)?; + unsafe { utils::unpoison(l) }; + dest = r; + if dest.is_empty() { + return Ok(()); + } + } + Err(_) if ret == EINTR => continue, + Err(_) => { + let code = i32::try_from(ret).map_err(|_| Error::UNEXPECTED)?; + return Err(Error::from_neg_error_code(code)); + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/netbsd.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/netbsd.rs new file mode 100644 index 0000000000000000000000000000000000000000..0da0d492ce6596d38a99260a88aa8fdc931fa61e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/netbsd.rs @@ -0,0 +1,78 @@ +//! Implementation for NetBSD +//! +//! `getrandom(2)` was introduced in NetBSD 10. To support older versions we +//! implement our own weak linkage to it, and provide a fallback based on the +//! KERN_ARND sysctl. +use crate::Error; +use core::{ + cmp, + ffi::c_void, + mem::{self, MaybeUninit}, + ptr, + sync::atomic::{AtomicPtr, Ordering}, +}; + +pub use crate::util::{inner_u32, inner_u64}; + +#[path = "../utils/sys_fill_exact.rs"] +mod utils; + +unsafe extern "C" fn polyfill_using_kern_arand( + buf: *mut c_void, + buflen: libc::size_t, + flags: libc::c_uint, +) -> libc::ssize_t { + debug_assert_eq!(flags, 0); + + const MIB_LEN: libc::c_uint = 2; + static MIB: [libc::c_int; MIB_LEN as usize] = [libc::CTL_KERN, libc::KERN_ARND]; + + // NetBSD will only return up to 256 bytes at a time, and + // older NetBSD kernels will fail on longer buffers. + let mut len = cmp::min(buflen, 256); + let ret = unsafe { libc::sysctl(MIB.as_ptr(), MIB_LEN, buf, &mut len, ptr::null(), 0) }; + + match ret { + 0 if len <= 256 => libc::ssize_t::try_from(len).expect("len is in the range of 0..=256"), + -1 => -1, + // Zero return result will be converted into `Error::UNEXPECTED` by `sys_fill_exact` + _ => 0, + } +} + +type GetRandomFn = unsafe extern "C" fn(*mut c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; + +static GETRANDOM: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + +#[cold] +#[inline(never)] +fn init() -> *mut c_void { + static NAME: &[u8] = b"getrandom\0"; + let name_ptr = NAME.as_ptr().cast::(); + let mut ptr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) }; + if ptr.is_null() || cfg!(getrandom_test_netbsd_fallback) { + // Verify `polyfill_using_kern_arand` has the right signature. + const POLYFILL: GetRandomFn = polyfill_using_kern_arand; + ptr = POLYFILL as *mut c_void; + } + GETRANDOM.store(ptr, Ordering::Release); + ptr +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Despite being only a single atomic variable, we still cannot always use + // Ordering::Relaxed, as we need to make sure a successful call to `init` + // is "ordered before" any data read through the returned pointer (which + // occurs when the function is called). Our implementation mirrors that of + // the one in libstd, meaning that the use of non-Relaxed operations is + // probably unnecessary. + let mut fptr = GETRANDOM.load(Ordering::Acquire); + if fptr.is_null() { + fptr = init(); + } + let fptr = unsafe { mem::transmute::<*mut c_void, GetRandomFn>(fptr) }; + utils::sys_fill_exact(dest, |buf| unsafe { + fptr(buf.as_mut_ptr().cast::(), buf.len(), 0) + }) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rdrand.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rdrand.rs new file mode 100644 index 0000000000000000000000000000000000000000..d39c31512a4b460732311e38ddce67aaad9e2c9c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rdrand.rs @@ -0,0 +1,195 @@ +//! RDRAND backend for x86(-64) targets +use crate::{Error, util::slice_as_uninit}; +use core::mem::{MaybeUninit, size_of}; + +#[path = "../utils/lazy.rs"] +mod lazy; + +#[cfg(not(any(target_arch = "x86_64", target_arch = "x86")))] +compile_error!("`rdrand` backend can be enabled only for x86 and x86-64 targets!"); + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + use core::arch::x86_64 as arch; + use arch::_rdrand64_step as rdrand_step; + type Word = u64; + } else if #[cfg(target_arch = "x86")] { + use core::arch::x86 as arch; + use arch::_rdrand32_step as rdrand_step; + type Word = u32; + } +} + +static RDRAND_GOOD: lazy::LazyBool = lazy::LazyBool::new(); + +// Recommendation from "Intel® Digital Random Number Generator (DRNG) Software +// Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures +// Software Developer’s Manual" - Volume 1 - Section 7.3.17.1. +const RETRY_LIMIT: usize = 10; + +#[target_feature(enable = "rdrand")] +fn rdrand() -> Option { + for _ in 0..RETRY_LIMIT { + let mut val = 0; + // SAFETY: this function is safe to call from a `[target_feature(enable + // = "rdrand")]` context (it itself is annotated with + // `target_feature(enable = "rdrand")`) but was marked unsafe until + // https://github.com/rust-lang/stdarch/commit/59864cd which was pulled + // in via https://github.com/rust-lang/rust/commit/f2eb88b which is + // expected to be included in 1.93.0. Since our MSRV is 1.85, we need to + // use unsafe here and suppress the lint. + // + // TODO(MSRV 1.93): remove allow(unused_unsafe) and the unsafe block. + #[allow(unused_unsafe)] + if unsafe { rdrand_step(&mut val) } == 1 { + return Some(val); + } + } + None +} + +// "rdrand" target feature requires "+rdrand" flag, see https://github.com/rust-lang/rust/issues/49653. +#[cfg(all(target_env = "sgx", not(target_feature = "rdrand")))] +compile_error!( + "SGX targets require 'rdrand' target feature. Enable by using -C target-feature=+rdrand." +); + +// Run a small self-test to make sure we aren't repeating values +// Adapted from Linux's test in arch/x86/kernel/cpu/rdrand.c +// Fails with probability < 2^(-90) on 32-bit systems +#[target_feature(enable = "rdrand")] +fn self_test() -> bool { + // On AMD, RDRAND returns 0xFF...FF on failure, count it as a collision. + let mut prev = Word::MAX; + let mut fails = 0; + for _ in 0..8 { + match rdrand() { + Some(val) if val == prev => fails += 1, + Some(val) => prev = val, + None => return false, + }; + } + fails <= 2 +} + +fn is_rdrand_good() -> bool { + #[cfg(not(target_feature = "rdrand"))] + { + // SAFETY: All Rust x86 targets are new enough to have CPUID, and we + // check that leaf 1 is supported before using it. + // + // TODO(MSRV 1.94): remove allow(unused_unsafe) and the unsafe blocks for `__cpuid`. + #[allow(unused_unsafe)] + let cpuid0 = unsafe { arch::__cpuid(0) }; + if cpuid0.eax < 1 { + return false; + } + #[allow(unused_unsafe)] + let cpuid1 = unsafe { arch::__cpuid(1) }; + + let vendor_id = [ + cpuid0.ebx.to_le_bytes(), + cpuid0.edx.to_le_bytes(), + cpuid0.ecx.to_le_bytes(), + ]; + if vendor_id == [*b"Auth", *b"enti", *b"cAMD"] { + let mut family = (cpuid1.eax >> 8) & 0xF; + if family == 0xF { + family += (cpuid1.eax >> 20) & 0xFF; + } + // AMD CPUs families before 17h (Zen) sometimes fail to set CF when + // RDRAND fails after suspend. Don't use RDRAND on those families. + // See https://bugzilla.redhat.com/show_bug.cgi?id=1150286 + if family < 0x17 { + return false; + } + } + + const RDRAND_FLAG: u32 = 1 << 30; + if cpuid1.ecx & RDRAND_FLAG == 0 { + return false; + } + } + + // SAFETY: We have already checked that rdrand is available. + unsafe { self_test() } +} + +#[target_feature(enable = "rdrand")] +fn rdrand_exact(dest: &mut [MaybeUninit]) -> Option<()> { + // We use chunks_exact_mut instead of chunks_mut as it allows almost all + // calls to memcpy to be elided by the compiler. + let mut chunks = dest.chunks_exact_mut(size_of::()); + for chunk in chunks.by_ref() { + let src = rdrand()?.to_ne_bytes(); + chunk.copy_from_slice(slice_as_uninit(&src)); + } + + let tail = chunks.into_remainder(); + let n = tail.len(); + if n > 0 { + let src = rdrand()?.to_ne_bytes(); + tail.copy_from_slice(slice_as_uninit(&src[..n])); + } + Some(()) +} + +#[cfg(target_arch = "x86_64")] +#[target_feature(enable = "rdrand")] +fn rdrand_u32() -> Option { + rdrand().map(crate::util::truncate) +} + +#[cfg(target_arch = "x86_64")] +#[target_feature(enable = "rdrand")] +fn rdrand_u64() -> Option { + rdrand() +} + +#[cfg(target_arch = "x86")] +#[target_feature(enable = "rdrand")] +fn rdrand_u32() -> Option { + rdrand() +} + +#[cfg(target_arch = "x86")] +#[target_feature(enable = "rdrand")] +fn rdrand_u64() -> Option { + let a = rdrand()?; + let b = rdrand()?; + Some((u64::from(a) << 32) | u64::from(b)) +} + +#[inline] +pub fn inner_u32() -> Result { + if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + return Err(Error::NO_RDRAND); + } + // SAFETY: After this point, we know rdrand is supported. + unsafe { rdrand_u32() }.ok_or(Error::FAILED_RDRAND) +} + +#[inline] +pub fn inner_u64() -> Result { + if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + return Err(Error::NO_RDRAND); + } + // SAFETY: After this point, we know rdrand is supported. + unsafe { rdrand_u64() }.ok_or(Error::FAILED_RDRAND) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + return Err(Error::NO_RDRAND); + } + // SAFETY: After this point, we know rdrand is supported. + unsafe { rdrand_exact(dest) }.ok_or(Error::FAILED_RDRAND) +} + +impl Error { + /// RDRAND instruction failed due to a hardware issue. + pub(crate) const FAILED_RDRAND: Error = Self::new_internal(10); + /// RDRAND instruction unsupported on this target. + pub(crate) const NO_RDRAND: Error = Self::new_internal(11); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rndr.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rndr.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a24b49f7fed91b0e3512a1b41a390dfbf002f5a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/rndr.rs @@ -0,0 +1,147 @@ +//! RNDR register backend for aarch64 targets +//! +//! Arm Architecture Reference Manual for A-profile architecture: +//! ARM DDI 0487K.a, ID032224, D23.2.147 RNDR, Random Number +use crate::{ + Error, + util::{slice_as_uninit, truncate}, +}; +use core::arch::asm; +use core::mem::{MaybeUninit, size_of}; + +#[cfg(not(target_arch = "aarch64"))] +compile_error!("the `rndr` backend can be enabled only for AArch64 targets!"); + +const RETRY_LIMIT: usize = 5; + +/// Read a random number from the aarch64 RNDR register +/// +/// Callers must ensure that FEAT_RNG is available on the system +/// The function assumes that the RNDR register is available +/// If it fails to read a random number, it will retry up to 5 times +/// After 5 failed reads the function will return `None` +#[target_feature(enable = "rand")] +unsafe fn rndr() -> Option { + for _ in 0..RETRY_LIMIT { + let mut x: u64; + let mut nzcv: u64; + + // AArch64 RNDR register is accessible by s3_3_c2_c4_0 + unsafe { + asm!( + "mrs {x}, RNDR", + "mrs {nzcv}, NZCV", + x = out(reg) x, + nzcv = out(reg) nzcv, + ); + } + + // If the hardware returns a genuine random number, PSTATE.NZCV is set to 0b0000 + if nzcv == 0 { + return Some(x); + } + } + + None +} + +#[target_feature(enable = "rand")] +unsafe fn rndr_fill(dest: &mut [MaybeUninit]) -> Option<()> { + let mut chunks = dest.chunks_exact_mut(size_of::()); + for chunk in chunks.by_ref() { + let src = unsafe { rndr() }?.to_ne_bytes(); + chunk.copy_from_slice(slice_as_uninit(&src)); + } + + let tail = chunks.into_remainder(); + let n = tail.len(); + if n > 0 { + let src = unsafe { rndr() }?.to_ne_bytes(); + tail.copy_from_slice(slice_as_uninit(&src[..n])); + } + Some(()) +} + +#[cfg(target_feature = "rand")] +fn is_rndr_available() -> bool { + true +} + +#[cfg(not(target_feature = "rand"))] +fn is_rndr_available() -> bool { + #[path = "../utils/lazy.rs"] + mod lazy; + static RNDR_GOOD: lazy::LazyBool = lazy::LazyBool::new(); + + cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + extern crate std; + RNDR_GOOD.unsync_init(|| std::arch::is_aarch64_feature_detected!("rand")) + } else if #[cfg(target_os = "linux")] { + /// Check whether FEAT_RNG is available on the system + /// + /// Requires the caller either be running in EL1 or be on a system supporting MRS + /// emulation. Due to the above, the implementation is currently restricted to Linux. + /// + /// Relying on runtime detection bumps minimum supported Linux kernel version to 4.11. + fn mrs_check() -> bool { + let mut id_aa64isar0: u64; + + // If FEAT_RNG is implemented, ID_AA64ISAR0_EL1.RNDR (bits 60-63) are 0b0001 + // This is okay to do from EL0 in Linux because Linux will emulate MRS as per + // https://docs.kernel.org/arch/arm64/cpu-feature-registers.html + unsafe { + asm!( + "mrs {id}, ID_AA64ISAR0_EL1", + id = out(reg) id_aa64isar0, + ); + } + + (id_aa64isar0 >> 60) & 0xf >= 1 + } + + RNDR_GOOD.unsync_init(mrs_check) + } else { + compile_error!( + "RNDR `no_std` runtime detection is currently supported only on Linux targets. \ + Either enable the `std` crate feature, or `rand` target feature at compile time." + ); + } + } +} + +#[inline] +pub fn inner_u32() -> Result { + if !is_rndr_available() { + return Err(Error::RNDR_NOT_AVAILABLE); + } + // SAFETY: after this point, we know the `rand` target feature is enabled + let res = unsafe { rndr() }; + res.map(truncate).ok_or(Error::RNDR_FAILURE) +} + +#[inline] +pub fn inner_u64() -> Result { + if !is_rndr_available() { + return Err(Error::RNDR_NOT_AVAILABLE); + } + // SAFETY: after this point, we know the `rand` target feature is enabled + let res = unsafe { rndr() }; + res.ok_or(Error::RNDR_FAILURE) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + if !is_rndr_available() { + return Err(Error::RNDR_NOT_AVAILABLE); + } + // SAFETY: after this point, we know the `rand` target feature is enabled + unsafe { rndr_fill(dest).ok_or(Error::RNDR_FAILURE) } +} + +impl Error { + /// RNDR register read failed due to a hardware issue. + pub(crate) const RNDR_FAILURE: Error = Self::new_internal(10); + /// RNDR register is not supported on this target. + pub(crate) const RNDR_NOT_AVAILABLE: Error = Self::new_internal(11); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solaris.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solaris.rs new file mode 100644 index 0000000000000000000000000000000000000000..f3e22de058cea264a5b7e61c366db1faea8a8864 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solaris.rs @@ -0,0 +1,43 @@ +//! Solaris implementation using getrandom(2). +//! +//! While getrandom(2) has been available since Solaris 11.3, it has a few +//! quirks not present on other OSes. First, on Solaris 11.3, calls will always +//! fail if bufsz > 1024. Second, it will always either fail or completely fill +//! the buffer (returning bufsz). Third, error is indicated by returning 0, +//! rather than by returning -1. Finally, "if GRND_RANDOM is not specified +//! then getrandom(2) is always a non blocking call". This _might_ imply that +//! in early-boot scenarios with low entropy, getrandom(2) will not properly +//! block. To be safe, we set GRND_RANDOM, mirroring the man page examples. +//! +//! For more information, see the man page linked in lib.rs and this blog post: +//! https://blogs.oracle.com/solaris/post/solaris-new-system-calls-getentropy2-and-getrandom2 +//! which also explains why this crate should not use getentropy(2). +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; +use libc::___errno as errno_location; + +pub use crate::util::{inner_u32, inner_u64}; + +const MAX_BYTES: usize = 1024; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + for chunk in dest.chunks_mut(MAX_BYTES) { + let ptr = chunk.as_mut_ptr().cast::(); + let ret = unsafe { libc::getrandom(ptr, chunk.len(), libc::GRND_RANDOM) }; + // In case the man page has a typo, we also check for negative ret. + // If getrandom(2) succeeds, it should have completely filled chunk. + match usize::try_from(ret) { + // Good. Keep going. + Ok(ret) if ret == chunk.len() => {} + // The syscall failed. + Ok(0) => { + let errno = unsafe { core::ptr::read(errno_location()) }; + return Err(Error::from_errno(errno)); + } + // All other cases should be impossible. + _ => return Err(Error::UNEXPECTED), + } + } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solid.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solid.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd3335b4ab3a2a46914b4176b2e9a052cd045fbf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/solid.rs @@ -0,0 +1,19 @@ +//! Implementation for SOLID +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +unsafe extern "C" { + pub fn SOLID_RNG_SampleRandomBytes(buffer: *mut u8, length: usize) -> i32; +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let ret = unsafe { SOLID_RNG_SampleRandomBytes(dest.as_mut_ptr().cast::(), dest.len()) }; + if ret >= 0 { + Ok(()) + } else { + Err(Error::from_neg_error_code(ret)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/unsupported.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/unsupported.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ea381fc40a4c894c51671e8ebaceba246d7505a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/unsupported.rs @@ -0,0 +1,9 @@ +//! Implementation that errors at runtime. +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +pub fn fill_inner(_dest: &mut [MaybeUninit]) -> Result<(), Error> { + Err(Error::UNSUPPORTED) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/use_file.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/use_file.rs new file mode 100644 index 0000000000000000000000000000000000000000..a1d372a047a1b4d1ffb9da24645276eb9e229719 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/use_file.rs @@ -0,0 +1,223 @@ +//! Implementations that just need to read from a file +use crate::Error; +use core::{ + ffi::{CStr, c_void}, + mem::MaybeUninit, + sync::atomic::{AtomicI32, Ordering}, +}; + +#[cfg(not(any(target_os = "android", target_os = "linux")))] +pub use crate::util::{inner_u32, inner_u64}; + +#[path = "../utils/sys_fill_exact.rs"] +pub(super) mod utils; + +/// For all platforms, we use `/dev/urandom` rather than `/dev/random`. +/// For more information see the linked man pages in lib.rs. +/// - On Linux, "/dev/urandom is preferred and sufficient in all use cases". +/// - On Redox, only /dev/urandom is provided. +/// - On AIX, /dev/urandom will "provide cryptographically secure output". +/// - On Haiku and QNX Neutrino they are identical. +const FILE_PATH: &CStr = c"/dev/urandom"; + +// File descriptor is a "nonnegative integer", so we can safely use negative sentinel values. +const FD_UNINIT: libc::c_int = -1; +const FD_ONGOING_INIT: libc::c_int = -2; + +// In theory `libc::c_int` could be something other than `i32`, but for the +// targets we currently support that use `use_file`, it is always `i32`. +// If/when we add support for a target where that isn't the case, we may +// need to use a different atomic type or make other accommodations. The +// compiler will let us know if/when that is the case, because the +// `FD.store(fd)` would fail to compile. +// +// The opening of the file, by libc/libstd/etc. may write some unknown +// state into in-process memory. (Such state may include some sanitizer +// bookkeeping, or we might be operating in a unikernal-like environment +// where all the "kernel" file descriptor bookkeeping is done in our +// process.) `get_fd_locked` stores into FD using `Ordering::Release` to +// ensure any such state is synchronized. `get_fd` loads from `FD` with +// `Ordering::Acquire` to synchronize with it. +static FD: AtomicI32 = AtomicI32::new(FD_UNINIT); + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let mut fd = FD.load(Ordering::Acquire); + if fd == FD_UNINIT || fd == FD_ONGOING_INIT { + fd = open_or_wait()?; + } + utils::sys_fill_exact(dest, |buf| unsafe { + libc::read(fd, buf.as_mut_ptr().cast::(), buf.len()) + }) +} + +/// Open a file in read-only mode. +fn open_readonly(path: &CStr) -> Result { + loop { + let fd = unsafe { libc::open(path.as_ptr(), libc::O_RDONLY | libc::O_CLOEXEC) }; + if fd >= 0 { + return Ok(fd); + } + let errno = utils::get_errno(); + // We should try again if open() was interrupted. + if errno != libc::EINTR { + return Err(Error::from_errno(errno)); + } + } +} + +#[cold] +#[inline(never)] +fn open_or_wait() -> Result { + loop { + match FD.load(Ordering::Acquire) { + FD_UNINIT => { + let res = FD.compare_exchange_weak( + FD_UNINIT, + FD_ONGOING_INIT, + Ordering::AcqRel, + Ordering::Relaxed, + ); + if res.is_ok() { + break; + } + } + FD_ONGOING_INIT => sync::wait(), + fd => return Ok(fd), + } + } + + let res = open_fd(); + let val = match res { + Ok(fd) => fd, + Err(_) => FD_UNINIT, + }; + FD.store(val, Ordering::Release); + + // On non-Linux targets `wait` is just 1 ms sleep, + // so we don't need any explicit wake up in addition + // to updating value of `FD`. + #[cfg(any(target_os = "android", target_os = "linux"))] + sync::wake(); + + res +} + +fn open_fd() -> Result { + #[cfg(any(target_os = "android", target_os = "linux"))] + sync::wait_until_rng_ready()?; + let fd = open_readonly(FILE_PATH)?; + debug_assert!(fd >= 0); + Ok(fd) +} + +#[cfg(not(any(target_os = "android", target_os = "linux")))] +mod sync { + /// Sleep 1 ms before checking `FD` again. + /// + /// On non-Linux targets the critical section only opens file, + /// which should not block, so in the unlikely contended case, + /// we can sleep-wait for the opening operation to finish. + pub(super) fn wait() { + let rqtp = libc::timespec { + tv_sec: 0, + tv_nsec: 1_000_000, + }; + let mut rmtp = libc::timespec { + tv_sec: 0, + tv_nsec: 0, + }; + // We do not care if sleep gets interrupted, so the return value is ignored + unsafe { + libc::nanosleep(&rqtp, &mut rmtp); + } + } +} + +#[cfg(any(target_os = "android", target_os = "linux"))] +mod sync { + use super::{Error, FD, FD_ONGOING_INIT, open_readonly, utils}; + + /// Wait for atomic `FD` to change value from `FD_ONGOING_INIT` to something else. + /// + /// Futex syscall with `FUTEX_WAIT` op puts the current thread to sleep + /// until futex syscall with `FUTEX_WAKE` op gets executed for `FD`. + /// + /// For more information read: https://www.man7.org/linux/man-pages/man2/futex.2.html + pub(super) fn wait() { + let op = libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG; + let timeout_ptr = core::ptr::null::(); + let ret = unsafe { libc::syscall(libc::SYS_futex, &FD, op, FD_ONGOING_INIT, timeout_ptr) }; + // FUTEX_WAIT should return either 0 or EAGAIN error + debug_assert!({ + match ret { + 0 => true, + -1 => utils::get_errno() == libc::EAGAIN, + _ => false, + } + }); + } + + /// Wake up all threads which wait for value of atomic `FD` to change. + pub(super) fn wake() { + let op = libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG; + let ret = unsafe { libc::syscall(libc::SYS_futex, &FD, op, libc::INT_MAX) }; + debug_assert!(ret >= 0); + } + + // Polls /dev/random to make sure it is ok to read from /dev/urandom. + // + // Polling avoids draining the estimated entropy from /dev/random; + // short-lived processes reading even a single byte from /dev/random could + // be problematic if they are being executed faster than entropy is being + // collected. + // + // OTOH, reading a byte instead of polling is more compatible with + // sandboxes that disallow `poll()` but which allow reading /dev/random, + // e.g. sandboxes that assume that `poll()` is for network I/O. This way, + // fewer applications will have to insert pre-sandbox-initialization logic. + // Often (blocking) file I/O is not allowed in such early phases of an + // application for performance and/or security reasons. + // + // It is hard to write a sandbox policy to support `libc::poll()` because + // it may invoke the `poll`, `ppoll`, `ppoll_time64` (since Linux 5.1, with + // newer versions of glibc), and/or (rarely, and probably only on ancient + // systems) `select`. depending on the libc implementation (e.g. glibc vs + // musl), libc version, potentially the kernel version at runtime, and/or + // the target architecture. + // + // BoringSSL and libstd don't try to protect against insecure output from + // `/dev/urandom'; they don't open `/dev/random` at all. + // + // OpenSSL uses `libc::select()` unless the `dev/random` file descriptor + // is too large; if it is too large then it does what we do here. + // + // libsodium uses `libc::poll` similarly to this. + pub(super) fn wait_until_rng_ready() -> Result<(), Error> { + let fd = open_readonly(c"/dev/random")?; + let mut pfd = libc::pollfd { + fd, + events: libc::POLLIN, + revents: 0, + }; + + let res = loop { + // A negative timeout means an infinite timeout. + let res = unsafe { libc::poll(&mut pfd, 1, -1) }; + if res >= 0 { + // We only used one fd, and cannot timeout. + debug_assert_eq!(res, 1); + break Ok(()); + } + let errno = utils::get_errno(); + // Assuming that `poll` is called correctly, + // on Linux it can return only EINTR and ENOMEM errors. + match errno { + libc::EINTR => continue, + _ => break Err(Error::from_errno(errno)), + } + }; + unsafe { libc::close(fd) }; + res + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/vxworks.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/vxworks.rs new file mode 100644 index 0000000000000000000000000000000000000000..10d2e81bed2ce2ab1fc995b89efded27da27c586 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/vxworks.rs @@ -0,0 +1,52 @@ +//! Implementation for VxWorks +use crate::Error; +use core::{ + cmp::Ordering::{Equal, Greater, Less}, + mem::MaybeUninit, + sync::atomic::{AtomicBool, Ordering::Relaxed}, +}; + +pub use crate::util::{inner_u32, inner_u64}; + +static RNG_INIT: AtomicBool = AtomicBool::new(false); + +#[cold] +fn init() -> Result<(), Error> { + let ret = unsafe { libc::randSecure() }; + match ret.cmp(&0) { + Greater => RNG_INIT.store(true, Relaxed), + Equal => unsafe { + libc::usleep(10); + }, + Less => return Err(Error::VXWORKS_RAND_SECURE), + } + Ok(()) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + while !RNG_INIT.load(Relaxed) { + init()?; + } + + // Prevent overflow of i32 + let chunk_size = usize::try_from(i32::MAX).expect("VxWorks does not support 16-bit targets"); + for chunk in dest.chunks_mut(chunk_size) { + let chunk_len: libc::c_int = chunk + .len() + .try_into() + .expect("chunk size is bounded by i32::MAX"); + let p: *mut libc::c_uchar = chunk.as_mut_ptr().cast(); + let ret = unsafe { libc::randABytes(p, chunk_len) }; + if ret != 0 { + let errno = unsafe { libc::errnoGet() }; + return Err(Error::from_errno(errno)); + } + } + Ok(()) +} + +impl Error { + /// On VxWorks, call to `randSecure` failed (random number generator is not yet initialized). + pub(crate) const VXWORKS_RAND_SECURE: Error = Self::new_internal(10); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p1.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p1.rs new file mode 100644 index 0000000000000000000000000000000000000000..9e8a2a6bd418d320491804f4df88f418d1b4a8d3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p1.rs @@ -0,0 +1,32 @@ +//! Implementation for WASI Preview 1 +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +// This linking is vendored from the wasi crate: +// https://docs.rs/wasi/0.11.0+wasi-snapshot-preview1/src/wasi/lib_generated.rs.html#2344-2350 +#[link(wasm_import_module = "wasi_snapshot_preview1")] +unsafe extern "C" { + fn random_get(arg0: i32, arg1: i32) -> i32; +} + +/// WASI p1 uses `u16` for error codes in its witx definitions: +/// https://github.com/WebAssembly/WASI/blob/38454e9e/legacy/preview1/witx/typenames.witx#L34-L39 +const MAX_ERROR_CODE: i32 = u16::MAX as i32; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Based on the wasi code: + // https://docs.rs/wasi/0.11.0+wasi-snapshot-preview1/src/wasi/lib_generated.rs.html#2046-2062 + // Note that size of an allocated object can not be bigger than isize::MAX bytes. + // WASI 0.1 supports only 32-bit WASM, so casting length to `i32` is safe. + #[allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap)] + let ret = unsafe { random_get(dest.as_mut_ptr() as i32, dest.len() as i32) }; + match ret { + 0 => Ok(()), + // WASI functions should return positive error codes which are smaller than `MAX_ERROR_CODE` + code if code <= MAX_ERROR_CODE => Err(Error::from_neg_error_code(-code)), + _ => Err(Error::UNEXPECTED), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p2_3.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p2_3.rs new file mode 100644 index 0000000000000000000000000000000000000000..3b8ee968c0999b3d58f6177b82396527363436f9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasi_p2_3.rs @@ -0,0 +1,61 @@ +//! Implementation for WASIp2 and WASIp3. +use crate::Error; +use core::{mem::MaybeUninit, ptr::copy_nonoverlapping}; + +#[cfg(target_env = "p2")] +use wasip2 as wasi; + +// Workaround to silence `unexpected_cfgs` warning +// on Rust version between 1.85 and 1.91 +#[cfg(not(target_env = "p2"))] +#[cfg(target_env = "p3")] +use wasip3 as wasi; + +#[cfg(not(target_env = "p2"))] +#[cfg(not(target_env = "p3"))] +compile_error!("Unknown version of WASI (only previews 1, 2 and 3 are supported)"); + +use wasi::random::random::get_random_u64; + +#[inline] +pub fn inner_u32() -> Result { + let val = get_random_u64(); + Ok(crate::util::truncate(val)) +} + +#[inline] +pub fn inner_u64() -> Result { + Ok(get_random_u64()) +} + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let (prefix, chunks, suffix) = unsafe { dest.align_to_mut::>() }; + + // We use `get_random_u64` instead of `get_random_bytes` because the latter creates + // an allocation due to the Wit IDL [restrictions][0]. This should be fine since + // the main use case of `getrandom` is seed generation. + // + // [0]: https://github.com/WebAssembly/wasi-random/issues/27 + if !prefix.is_empty() { + let val = get_random_u64(); + let src = (&val as *const u64).cast(); + unsafe { + copy_nonoverlapping(src, prefix.as_mut_ptr(), prefix.len()); + } + } + + for dst in chunks { + dst.write(get_random_u64()); + } + + if !suffix.is_empty() { + let val = get_random_u64(); + let src = (&val as *const u64).cast(); + unsafe { + copy_nonoverlapping(src, suffix.as_mut_ptr(), suffix.len()); + } + } + + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasm_js.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasm_js.rs new file mode 100644 index 0000000000000000000000000000000000000000..ce49bec31a68a0b9a5be2e4cc82c7fdd282b33f5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/wasm_js.rs @@ -0,0 +1,72 @@ +//! Implementation for WASM based on Web and Node.js +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +#[cfg(not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "none"))))] +compile_error!("`wasm_js` backend can be enabled only for OS-less WASM targets!"); + +use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; + +// Maximum buffer size allowed in `Crypto.getRandomValuesSize` is 65536 bytes. +// See https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues +const MAX_BUFFER_SIZE: usize = 65536; + +#[cfg(not(target_feature = "atomics"))] +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + for chunk in dest.chunks_mut(MAX_BUFFER_SIZE) { + if get_random_values(chunk).is_err() { + return Err(Error::WEB_CRYPTO); + } + } + Ok(()) +} + +#[cfg(target_feature = "atomics")] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // getRandomValues does not work with all types of WASM memory, + // so we initially write to browser memory to avoid exceptions. + let buf_len = usize::min(dest.len(), MAX_BUFFER_SIZE); + let buf_len_u32 = buf_len + .try_into() + .expect("buffer length is bounded by MAX_BUFFER_SIZE"); + let buf = js_sys::Uint8Array::new_with_length(buf_len_u32); + for chunk in dest.chunks_mut(buf_len) { + let chunk_len = chunk + .len() + .try_into() + .expect("chunk length is bounded by MAX_BUFFER_SIZE"); + // The chunk can be smaller than buf's length, so we call to + // JS to create a smaller view of buf without allocation. + let sub_buf = if chunk_len == buf_len_u32 { + &buf + } else { + &buf.subarray(0, chunk_len) + }; + + if get_random_values(sub_buf).is_err() { + return Err(Error::WEB_CRYPTO); + } + + sub_buf.copy_to_uninit(chunk); + } + Ok(()) +} + +#[wasm_bindgen] +unsafe extern "C" { + // Crypto.getRandomValues() + #[cfg(not(target_feature = "atomics"))] + #[wasm_bindgen(js_namespace = ["globalThis", "crypto"], js_name = getRandomValues, catch)] + fn get_random_values(buf: &mut [MaybeUninit]) -> Result<(), JsValue>; + #[cfg(target_feature = "atomics")] + #[wasm_bindgen(js_namespace = ["globalThis", "crypto"], js_name = getRandomValues, catch)] + fn get_random_values(buf: &js_sys::Uint8Array) -> Result<(), JsValue>; +} + +impl Error { + /// The environment does not support the Web Crypto API. + pub(crate) const WEB_CRYPTO: Error = Self::new_internal(10); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6ea763455d083bbf83e21fb99e9bdb52f5fe898 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows.rs @@ -0,0 +1,57 @@ +//! Implementation for Windows 10 and later +//! +//! On Windows 10 and later, ProcessPrng "is the primary interface to the +//! user-mode per-processor PRNGs" and only requires bcryptprimitives.dll, +//! making it a better option than the other Windows RNG APIs: +//! - BCryptGenRandom: https://learn.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom +//! - Requires bcrypt.dll (which loads bcryptprimitives.dll anyway) +//! - Can cause crashes/hangs as BCrypt accesses the Windows Registry: +//! https://github.com/rust-lang/rust/issues/99341 +//! - Causes issues inside sandboxed code: +//! https://issues.chromium.org/issues/40277768 +//! - CryptGenRandom: https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-cryptgenrandom +//! - Deprecated and not available on UWP targets +//! - Requires advapi32.lib/advapi32.dll (in addition to bcryptprimitives.dll) +//! - Thin wrapper around ProcessPrng +//! - RtlGenRandom: https://learn.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom +//! - Deprecated and not available on UWP targets +//! - Requires advapi32.dll (in addition to bcryptprimitives.dll) +//! - Requires using name "SystemFunction036" +//! - Thin wrapper around ProcessPrng +//! +//! For more information see the Windows RNG Whitepaper: https://aka.ms/win10rng +use crate::Error; +use core::mem::MaybeUninit; + +pub use crate::util::{inner_u32, inner_u64}; + +// Binding to the Windows.Win32.Security.Cryptography.ProcessPrng API. As +// bcryptprimitives.dll lacks an import library, we use "raw-dylib". +#[cfg_attr( + target_arch = "x86", + link( + name = "bcryptprimitives", + kind = "raw-dylib", + import_name_type = "undecorated" + ) +)] +#[cfg_attr( + not(target_arch = "x86"), + link(name = "bcryptprimitives", kind = "raw-dylib") +)] +unsafe extern "system" { + fn ProcessPrng(pbdata: *mut u8, cbdata: usize) -> BOOL; +} +#[expect(clippy::upper_case_acronyms)] +type BOOL = core::ffi::c_int; +const TRUE: BOOL = 1; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let result = unsafe { ProcessPrng(dest.as_mut_ptr().cast::(), dest.len()) }; + // `ProcessPrng` is documented to always return TRUE. All potential errors are handled + // during loading of `BCryptPrimitive.dll`. See the "Process base PRNG" section + // in the aforementioned Windows RNG whitepaper for more information. + debug_assert!(result == TRUE); + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows_legacy.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows_legacy.rs new file mode 100644 index 0000000000000000000000000000000000000000..0e917015d76a5f4ddecd3a556d974835103b30ca --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/backends/windows_legacy.rs @@ -0,0 +1,48 @@ +//! Legacy implementation for Windows XP and later +//! +//! For targets where we cannot use ProcessPrng (added in Windows 10), we use +//! RtlGenRandom. See windows.rs for a more detailed discussion of the Windows +//! RNG APIs (and why we don't use BCryptGenRandom). On versions prior to +//! Windows 10, this implementation is secure. On Windows 10 and later, this +//! implementation behaves identically to the windows.rs implementation, except +//! that it forces the loading of an additional DLL (advapi32.dll). +//! +//! This implementation will not work on UWP targets (which lack advapi32.dll), +//! but such targets require Windows 10, so can use the standard implementation. +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; + +pub use crate::util::{inner_u32, inner_u64}; + +#[cfg(not(windows))] +compile_error!("`windows_legacy` backend can be enabled only for Windows targets!"); + +// Binding to the Windows.Win32.Security.Authentication.Identity.RtlGenRandom +// API. Don't use windows-targets as it doesn't support Windows 7 targets. +#[link(name = "advapi32")] +unsafe extern "system" { + #[link_name = "SystemFunction036"] + fn RtlGenRandom(randombuffer: *mut c_void, randombufferlength: u32) -> BOOLEAN; +} +#[allow(clippy::upper_case_acronyms)] +type BOOLEAN = u8; +const TRUE: BOOLEAN = 1u8; + +#[inline] +pub fn fill_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Prevent overflow of u32 + let chunk_size = usize::try_from(i32::MAX).expect("Windows does not support 16-bit targets"); + for chunk in dest.chunks_mut(chunk_size) { + let chunk_len = u32::try_from(chunk.len()).expect("chunk size is bounded by i32::MAX"); + let ret = unsafe { RtlGenRandom(chunk.as_mut_ptr().cast::(), chunk_len) }; + if ret != TRUE { + return Err(Error::WINDOWS_RTL_GEN_RANDOM); + } + } + Ok(()) +} + +impl Error { + /// Call to Windows [`RtlGenRandom`](https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom) failed. + pub(crate) const WINDOWS_RTL_GEN_RANDOM: Error = Self::new_internal(10); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..73a827431396271a5e4f0ee7eff2ac12c598ab4c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error.rs @@ -0,0 +1,245 @@ +#[cfg(feature = "std")] +extern crate std; + +use core::fmt; + +cfg_if::cfg_if!( + if #[cfg(target_os = "uefi")] { + // See the UEFI spec for more information: + // https://uefi.org/specs/UEFI/2.10/Apx_D_Status_Codes.html + + /// Raw error code. + /// + /// This alias mirrors unstable [`std::io::RawOsError`]. + /// + /// [`std::io::RawOsError`]: https://doc.rust-lang.org/std/io/type.RawOsError.html + pub type RawOsError = usize; + type NonZeroRawOsError = core::num::NonZeroUsize; + const UEFI_ERROR_FLAG: RawOsError = 1 << (RawOsError::BITS - 1); + } else { + /// Raw error code. + /// + /// This alias mirrors unstable [`std::io::RawOsError`]. + /// + /// [`std::io::RawOsError`]: https://doc.rust-lang.org/std/io/type.RawOsError.html + pub type RawOsError = i32; + type NonZeroRawOsError = core::num::NonZeroI32; + } +); + +/// A small and `no_std` compatible error type +/// +/// The [`Error::raw_os_error()`] will indicate if the error is from the OS, and +/// if so, which error code the OS gave the application. If such an error is +/// encountered, please consult with your system documentation. +/// +/// *If this crate's `"std"` Cargo feature is enabled*, then: +/// - [`getrandom::Error`][Error] implements +/// [`std::error::Error`](https://doc.rust-lang.org/std/error/trait.Error.html) +/// - [`std::io::Error`](https://doc.rust-lang.org/std/io/struct.Error.html) implements +/// [`From`](https://doc.rust-lang.org/std/convert/trait.From.html). + +// note: on non-UEFI targets OS errors are represented as negative integers, +// while on UEFI targets OS errors have the highest bit set to 1. +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Error(NonZeroRawOsError); + +impl Error { + /// This target/platform is not supported by `getrandom`. + pub const UNSUPPORTED: Error = Self::new_internal(0); + /// The platform-specific `errno` returned a non-positive value. + pub const ERRNO_NOT_POSITIVE: Error = Self::new_internal(1); + /// Encountered an unexpected situation which should not happen in practice. + pub const UNEXPECTED: Error = Self::new_internal(2); + + /// Internal errors can be in the range of 2^16..2^17 + const INTERNAL_START: RawOsError = 1 << 16; + /// Custom errors can be in the range of 2^17..(2^17 + 2^16) + const CUSTOM_START: RawOsError = 1 << 17; + + /// Creates a new `Error` instance from a positive error code. + /// + /// Returns [`Error::ERRNO_NOT_POSITIVE`] for zero and negative error codes. + #[cfg(not(target_os = "uefi"))] + #[allow(dead_code)] + pub(super) fn from_errno(errno: i32) -> Self { + if errno > 0 { + let code = errno + .checked_neg() + .expect("Positive number can be always negated"); + Error::from_neg_error_code(code) + } else { + Error::ERRNO_NOT_POSITIVE + } + } + + /// Creates a new `Error` instance from a negative error code. + /// + /// Returns [`Error::UNEXPECTED`] for zero and positive error codes. + #[cfg(not(target_os = "uefi"))] + #[allow(dead_code)] + pub(super) fn from_neg_error_code(code: RawOsError) -> Self { + if code < 0 { + let code = NonZeroRawOsError::new(code).expect("`code` is negative"); + Self(code) + } else { + Error::UNEXPECTED + } + } + + /// Creates a new instance of an `Error` from an UEFI error code. + #[cfg(target_os = "uefi")] + #[allow(dead_code)] + pub(super) fn from_uefi_code(code: RawOsError) -> Self { + if code & UEFI_ERROR_FLAG != 0 { + let code = NonZeroRawOsError::new(code).expect("The highest bit of `code` is set to 1"); + Self(code) + } else { + Self::UNEXPECTED + } + } + + /// Extract the raw OS error code (if this error came from the OS) + /// + /// This method is identical to [`std::io::Error::raw_os_error()`][1], except + /// that it works in `no_std` contexts. On most targets this method returns + /// `Option`, but some platforms (e.g. UEFI) may use a different primitive + /// type like `usize`. Consult with the [`RawOsError`] docs for more information. + /// + /// If this method returns `None`, the error value can still be formatted via + /// the `Display` implementation. + /// + /// [1]: https://doc.rust-lang.org/std/io/struct.Error.html#method.raw_os_error + /// [`RawOsError`]: https://doc.rust-lang.org/std/io/type.RawOsError.html + #[inline] + pub fn raw_os_error(self) -> Option { + let code = self.0.get(); + + // note: in this method we need to cover only backends which rely on + // `Error::{from_error_code, from_errno, from_uefi_code}` methods, + // on all other backends this method always returns `None`. + + #[cfg(target_os = "uefi")] + { + if code & UEFI_ERROR_FLAG != 0 { + Some(code) + } else { + None + } + } + + #[cfg(not(target_os = "uefi"))] + { + // On most targets `std` expects positive error codes while retrieving error strings: + // - `libc`-based targets use `strerror_r` which expects positive error codes. + // - Hermit relies on the `hermit-abi` crate, which expects positive error codes: + // https://docs.rs/hermit-abi/0.4.0/src/hermit_abi/errno.rs.html#400-532 + // - WASIp1 uses the same conventions as `libc`: + // https://github.com/rust-lang/rust/blob/1.85.0/library/std/src/sys/pal/wasi/os.rs#L57-L67 + // + // The only exception is Solid, `std` expects negative system error codes, see: + // https://github.com/rust-lang/rust/blob/1.85.0/library/std/src/sys/pal/solid/error.rs#L5-L31 + if code >= 0 { + None + } else if cfg!(not(target_os = "solid_asp3")) { + code.checked_neg() + } else { + Some(code) + } + } + } + + /// Creates a new instance of an `Error` from a particular custom error code. + pub const fn new_custom(n: u16) -> Error { + // SAFETY: code > 0 as CUSTOM_START > 0 and adding `n` won't overflow `RawOsError`. + let code = Error::CUSTOM_START + (n as RawOsError); + Error(unsafe { NonZeroRawOsError::new_unchecked(code) }) + } + + /// Creates a new instance of an `Error` from a particular internal error code. + pub(crate) const fn new_internal(n: u16) -> Error { + // SAFETY: code > 0 as INTERNAL_START > 0 and adding `n` won't overflow `RawOsError`. + let code = Error::INTERNAL_START + (n as RawOsError); + Error(unsafe { NonZeroRawOsError::new_unchecked(code) }) + } + + fn internal_desc(&self) -> Option<&'static str> { + let desc = match *self { + Error::UNSUPPORTED => "getrandom: this target is not supported", + Error::ERRNO_NOT_POSITIVE => "errno: did not return a positive value", + Error::UNEXPECTED => "unexpected situation", + #[cfg(any( + target_os = "ios", + target_os = "visionos", + target_os = "watchos", + target_os = "tvos", + ))] + Error::IOS_RANDOM_GEN => "SecRandomCopyBytes: iOS Security framework failure", + #[cfg(all(windows, target_vendor = "win7"))] + Error::WINDOWS_RTL_GEN_RANDOM => "RtlGenRandom: Windows system function failure", + #[cfg(all( + feature = "wasm_js", + target_arch = "wasm32", + any(target_os = "unknown", target_os = "none") + ))] + Error::WEB_CRYPTO => "Web Crypto API is unavailable", + #[cfg(target_os = "vxworks")] + Error::VXWORKS_RAND_SECURE => "randSecure: VxWorks RNG module is not initialized", + + #[cfg(any( + getrandom_backend = "rdrand", + all(target_arch = "x86_64", target_env = "sgx") + ))] + Error::FAILED_RDRAND => "RDRAND: failed multiple times: CPU issue likely", + #[cfg(any( + getrandom_backend = "rdrand", + all(target_arch = "x86_64", target_env = "sgx") + ))] + Error::NO_RDRAND => "RDRAND: instruction not supported", + + #[cfg(getrandom_backend = "rndr")] + Error::RNDR_FAILURE => "RNDR: Could not generate a random number", + #[cfg(getrandom_backend = "rndr")] + Error::RNDR_NOT_AVAILABLE => "RNDR: Register not supported", + _ => return None, + }; + Some(desc) + } +} + +impl core::error::Error for Error {} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut dbg = f.debug_struct("Error"); + if let Some(errno) = self.raw_os_error() { + dbg.field("os_error", &errno); + #[cfg(feature = "std")] + dbg.field("description", &std::io::Error::from_raw_os_error(errno)); + } else if let Some(desc) = self.internal_desc() { + dbg.field("internal_code", &self.0.get()); + dbg.field("description", &desc); + } else { + dbg.field("unknown_code", &self.0.get()); + } + dbg.finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(errno) = self.raw_os_error() { + cfg_if! { + if #[cfg(feature = "std")] { + std::io::Error::from_raw_os_error(errno).fmt(f) + } else { + write!(f, "OS Error: {errno}") + } + } + } else if let Some(desc) = self.internal_desc() { + f.write_str(desc) + } else { + write!(f, "Unknown Error: {}", self.0.get()) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error_std_impls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error_std_impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdaabb3f5b2fb830f3c58fa046f0acba1f7e38e8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/error_std_impls.rs @@ -0,0 +1,13 @@ +extern crate std; + +use crate::Error; +use std::io; + +impl From for io::Error { + fn from(err: Error) -> Self { + match err.raw_os_error() { + Some(errno) => io::Error::from_raw_os_error(errno), + None => io::Error::other(err), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ecec900263d3e94a92755f29ccc7c8647bc2bc5c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/lib.rs @@ -0,0 +1,159 @@ +// Overwrite links to crate items with intra-crate links +//! [`Error::UNEXPECTED`]: Error::UNEXPECTED +//! [`fill_uninit`]: fill_uninit + +#![no_std] +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico" +)] +#![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(getrandom_backend = "efi_rng", feature(uefi_std))] +#![cfg_attr(getrandom_backend = "extern_impl", feature(extern_item_impls))] + +#[macro_use] +extern crate cfg_if; + +use core::mem::MaybeUninit; + +mod backends; +mod error; +mod util; + +#[cfg(feature = "std")] +mod error_std_impls; + +/// `rand_core` adapter +#[cfg(feature = "sys_rng")] +mod sys_rng; + +#[cfg(feature = "sys_rng")] +pub use rand_core; +#[cfg(feature = "sys_rng")] +pub use sys_rng::SysRng; + +pub use crate::error::{Error, RawOsError}; + +/// Attribute macros for overwriting the core functionality of this crate. +/// +/// This allows `getrandom` to provide a default implementation and a common interface +/// for all crates to use, while giving users a safe way to override that default where required. +/// +/// Must be enabled via the `extern_impl` opt-in backend, as this functionality +/// is currently limited to nightly. +/// +/// # Examples +/// +/// ```rust +/// # use core::mem::MaybeUninit; +/// # #[cfg(getrandom_backend = "extern_impl")] +/// #[getrandom::implementation::fill_uninit] +/// fn my_fill_uninit_implementation( +/// dest: &mut [MaybeUninit] +/// ) -> Result<(), getrandom::Error> { +/// // ... +/// # let _ = dest; +/// # Err(Error::UNSUPPORTED) +/// } +/// ``` +#[cfg(getrandom_backend = "extern_impl")] +pub mod implementation { + pub use crate::backends::extern_impl::{fill_uninit, u32, u64}; +} + +/// Fill `dest` with random bytes from the system's preferred random number source. +/// +/// This function returns an error on any failure, including partial reads. We +/// make no guarantees regarding the contents of `dest` on error. If `dest` is +/// empty, `getrandom` immediately returns success, making no calls to the +/// underlying operating system. +/// +/// Blocking is possible, at least during early boot; see module documentation. +/// +/// In general, `getrandom` will be fast enough for interactive usage, though +/// significantly slower than a user-space CSPRNG; for the latter consider +/// [`rand::thread_rng`](https://docs.rs/rand/*/rand/fn.thread_rng.html). +/// +/// # Examples +/// +/// ``` +/// # fn main() -> Result<(), getrandom::Error> { +/// let mut buf = [0u8; 32]; +/// getrandom::fill(&mut buf)?; +/// # Ok(()) } +/// ``` +#[inline] +pub fn fill(dest: &mut [u8]) -> Result<(), Error> { + // SAFETY: The `&mut MaybeUninit<_>` reference doesn't escape, + // and `fill_uninit` guarantees it will never de-initialize + // any part of `dest`. + fill_uninit(unsafe { util::slice_as_uninit_mut(dest) })?; + Ok(()) +} + +/// Fill potentially uninitialized buffer `dest` with random bytes from +/// the system's preferred random number source and return a mutable +/// reference to those bytes. +/// +/// On successful completion this function is guaranteed to return a slice +/// which points to the same memory as `dest` and has the same length. +/// In other words, it's safe to assume that `dest` is initialized after +/// this function has returned `Ok`. +/// +/// No part of `dest` will ever be de-initialized at any point, regardless +/// of what is returned. +/// +/// # Examples +/// +/// ```ignore +/// # // We ignore this test since `uninit_array` is unstable. +/// #![feature(maybe_uninit_uninit_array)] +/// # fn main() -> Result<(), getrandom::Error> { +/// let mut buf = core::mem::MaybeUninit::uninit_array::<1024>(); +/// let buf: &mut [u8] = getrandom::fill_uninit(&mut buf)?; +/// # Ok(()) } +/// ``` +#[inline] +pub fn fill_uninit(dest: &mut [MaybeUninit]) -> Result<&mut [u8], Error> { + if !dest.is_empty() { + backends::fill_inner(dest)?; + } + + #[cfg(getrandom_msan)] + unsafe extern "C" { + fn __msan_unpoison(a: *mut core::ffi::c_void, size: usize); + } + + // SAFETY: `dest` has been fully initialized by `imp::fill_inner` + // since it returned `Ok`. + Ok(unsafe { util::slice_assume_init_mut(dest) }) +} + +/// Get random `u32` from the system's preferred random number source. +/// +/// # Examples +/// +/// ``` +/// # fn main() -> Result<(), getrandom::Error> { +/// let rng_seed = getrandom::u32()?; +/// # Ok(()) } +/// ``` +#[inline] +pub fn u32() -> Result { + backends::inner_u32() +} + +/// Get random `u64` from the system's preferred random number source. +/// +/// # Examples +/// +/// ``` +/// # fn main() -> Result<(), getrandom::Error> { +/// let rng_seed = getrandom::u64()?; +/// # Ok(()) } +/// ``` +#[inline] +pub fn u64() -> Result { + backends::inner_u64() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/sys_rng.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/sys_rng.rs new file mode 100644 index 0000000000000000000000000000000000000000..74e9f32065ab2d3df1b645d4bc17d68a311accc5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/sys_rng.rs @@ -0,0 +1,55 @@ +use crate::Error; +use rand_core::{TryCryptoRng, TryRng}; + +/// A [`TryRng`] interface over the system's preferred random number source +/// +/// This is a zero-sized struct. It can be freely constructed with just `SysRng`. +/// +/// This struct is also available as [`rand::rngs::SysRng`] when using [rand]. +/// +/// # Usage example +/// +/// `SysRng` implements [`TryRng`]: +/// ``` +/// use getrandom::{rand_core::TryRng, SysRng}; +/// +/// let mut key = [0u8; 32]; +/// SysRng.try_fill_bytes(&mut key).unwrap(); +/// ``` +/// +/// Using it as an [`Rng`] is possible using [`UnwrapErr`]: +/// ``` +/// use getrandom::rand_core::{Rng, UnwrapErr}; +/// use getrandom::SysRng; +/// +/// let mut rng = UnwrapErr(SysRng); +/// let random_u64 = rng.next_u64(); +/// ``` +/// +/// [rand]: https://crates.io/crates/rand +/// [`rand::rngs::SysRng`]: https://docs.rs/rand/latest/rand/rngs/struct.SysRng.html +/// [`Rng`]: rand_core::Rng +/// [`UnwrapErr`]: rand_core::UnwrapErr +#[derive(Clone, Copy, Debug, Default)] +pub struct SysRng; + +impl TryRng for SysRng { + type Error = Error; + + #[inline] + fn try_next_u32(&mut self) -> Result { + crate::u32() + } + + #[inline] + fn try_next_u64(&mut self) -> Result { + crate::u64() + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + crate::fill(dest) + } +} + +impl TryCryptoRng for SysRng {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/util.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef700e85f0324d3d593b9904a9cbe32c47c7385e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/util.rs @@ -0,0 +1,72 @@ +#![allow(dead_code)] +use crate::Error; +use core::{mem::MaybeUninit, ptr, slice}; + +/// Polyfill for `maybe_uninit_slice` feature's +/// `MaybeUninit::slice_assume_init_mut`. Every element of `slice` must have +/// been initialized. +#[inline(always)] +pub unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [T] { + let ptr = ptr::from_mut(slice) as *mut [T]; + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + unsafe { &mut *ptr } +} + +#[inline] +pub fn uninit_slice_fill_zero(slice: &mut [MaybeUninit]) -> &mut [u8] { + unsafe { ptr::write_bytes(slice.as_mut_ptr(), 0, slice.len()) }; + unsafe { slice_assume_init_mut(slice) } +} + +#[inline(always)] +pub fn slice_as_uninit(slice: &[T]) -> &[MaybeUninit] { + let ptr = ptr::from_ref(slice) as *const [MaybeUninit]; + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + unsafe { &*ptr } +} + +/// View an mutable initialized array as potentially-uninitialized. +/// +/// This is unsafe because it allows assigning uninitialized values into +/// `slice`, which would be undefined behavior. +#[inline(always)] +pub unsafe fn slice_as_uninit_mut(slice: &mut [T]) -> &mut [MaybeUninit] { + let ptr = ptr::from_mut(slice) as *mut [MaybeUninit]; + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + unsafe { &mut *ptr } +} + +/// Default implementation of `inner_u32` on top of `fill_uninit` +#[inline] +pub fn inner_u32() -> Result { + let mut res = MaybeUninit::::uninit(); + // SAFETY: the created slice has the same size as `res` + let dst = unsafe { + let p: *mut MaybeUninit = res.as_mut_ptr().cast(); + slice::from_raw_parts_mut(p, core::mem::size_of::()) + }; + crate::fill_uninit(dst)?; + // SAFETY: `dst` has been fully initialized by `imp::fill_inner` + // since it returned `Ok`. + Ok(unsafe { res.assume_init() }) +} + +/// Default implementation of `inner_u64` on top of `fill_uninit` +#[inline] +pub fn inner_u64() -> Result { + let mut res = MaybeUninit::::uninit(); + // SAFETY: the created slice has the same size as `res` + let dst = unsafe { + let p: *mut MaybeUninit = res.as_mut_ptr().cast(); + slice::from_raw_parts_mut(p, core::mem::size_of::()) + }; + crate::fill_uninit(dst)?; + // SAFETY: `dst` has been fully initialized by `imp::fill_inner` + // since it returned `Ok`. + Ok(unsafe { res.assume_init() }) +} + +/// Truncates `u64` and returns the lower 32 bits as `u32` +pub(crate) fn truncate(val: u64) -> u32 { + u32::try_from(val & u64::from(u32::MAX)).expect("The higher 32 bits are masked") +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/get_errno.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/get_errno.rs new file mode 100644 index 0000000000000000000000000000000000000000..1895fc05ed9fa92f4c3729025138bedead65873b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/get_errno.rs @@ -0,0 +1,29 @@ +cfg_if! { + if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android", target_os = "cygwin"))] { + use libc::__errno as errno_location; + } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox", target_os = "dragonfly"))] { + use libc::__errno_location as errno_location; + } else if #[cfg(target_os = "illumos")] { + use libc::___errno as errno_location; + } else if #[cfg(any(target_os = "macos", target_os = "freebsd"))] { + use libc::__error as errno_location; + } else if #[cfg(target_os = "haiku")] { + use libc::_errnop as errno_location; + } else if #[cfg(target_os = "nto")] { + use libc::__get_errno_ptr as errno_location; + } else if #[cfg(any(all(target_os = "horizon", target_arch = "arm"), target_os = "vita"))] { + unsafe extern "C" { + // Not provided by libc: https://github.com/rust-lang/libc/issues/1995 + fn __errno() -> *mut libc::c_int; + } + use __errno as errno_location; + } else if #[cfg(target_os = "aix")] { + use libc::_Errno as errno_location; + } else { + compile_error!("errno_location is not provided for the target"); + } +} + +pub(crate) fn get_errno() -> libc::c_int { + unsafe { core::ptr::read(errno_location()) } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/lazy.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/lazy.rs new file mode 100644 index 0000000000000000000000000000000000000000..b191aa6d7fde173bf6b80a5f8477e95c9d8887fc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/lazy.rs @@ -0,0 +1,64 @@ +//! Helpers built around pointer-sized atomics. +use core::sync::atomic::{AtomicUsize, Ordering}; + +// This structure represents a lazily initialized static usize value. Useful +// when it is preferable to just rerun initialization instead of locking. +// unsync_init will invoke an init() function until it succeeds, then return the +// cached value for future calls. +// +// unsync_init supports init() "failing". If the init() method returns UNINIT, +// that value will be returned as normal, but will not be cached. +// +// Users should only depend on the _value_ returned by init() functions. +// Specifically, for the following init() function: +// fn init() -> usize { +// a(); +// let v = b(); +// c(); +// v +// } +// the effects of c() or writes to shared memory will not necessarily be +// observed and additional synchronization methods may be needed. +struct LazyUsize(AtomicUsize); + +impl LazyUsize { + // The initialization is not completed. + const UNINIT: usize = usize::MAX; + + const fn new() -> Self { + Self(AtomicUsize::new(Self::UNINIT)) + } + + // Runs the init() function at most once, returning the value of some run of + // init(). Multiple callers can run their init() functions in parallel. + // init() should always return the same value, if it succeeds. + fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { + #[cold] + fn do_init(this: &LazyUsize, init: impl FnOnce() -> usize) -> usize { + let val = init(); + this.0.store(val, Ordering::Relaxed); + val + } + + // Relaxed ordering is fine, as we only have a single atomic variable. + let val = self.0.load(Ordering::Relaxed); + if val != Self::UNINIT { + val + } else { + do_init(self, init) + } + } +} + +// Identical to LazyUsize except with bool instead of usize. +pub(crate) struct LazyBool(LazyUsize); + +impl LazyBool { + pub const fn new() -> Self { + Self(LazyUsize::new()) + } + + pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { + self.0.unsync_init(|| usize::from(init())) != 0 + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sanitizer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sanitizer.rs new file mode 100644 index 0000000000000000000000000000000000000000..205dc94a5fab4575175a7987ff12862da1b15300 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sanitizer.rs @@ -0,0 +1,28 @@ +use core::mem::MaybeUninit; + +/// Unpoisons `buf` if MSAN support is enabled. +/// +/// Most backends do not need to unpoison their output. Rust language- and +/// library- provided functionality unpoisons automatically. Similarly, libc +/// either natively supports MSAN and/or MSAN hooks libc-provided functions +/// to unpoison outputs on success. Only when all of these things are +/// bypassed do we need to do it ourselves. +/// +/// The call to unpoison should be done as close to the write as possible. +/// For example, if the backend partially fills the output buffer in chunks, +/// each chunk should be unpoisoned individually. This way, the correctness of +/// the chunking logic can be validated (in part) using MSAN. +pub unsafe fn unpoison(buf: &mut [MaybeUninit]) { + cfg_if! { + if #[cfg(getrandom_msan)] { + unsafe extern "C" { + fn __msan_unpoison(a: *mut core::ffi::c_void, size: usize); + } + let a = buf.as_mut_ptr().cast(); + let size = buf.len(); + unsafe { __msan_unpoison(a, size) }; + } else { + let _ = buf; + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sys_fill_exact.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sys_fill_exact.rs new file mode 100644 index 0000000000000000000000000000000000000000..13d7a386a3fbb2cbc311240bc168e2a8696f49ac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/src/utils/sys_fill_exact.rs @@ -0,0 +1,41 @@ +use crate::Error; +use core::mem::MaybeUninit; + +mod get_errno; +mod sanitizer; + +pub(crate) use get_errno::get_errno; + +/// Fill a buffer by repeatedly invoking `sys_fill`. +/// +/// The `sys_fill` function: +/// - should return -1 and set errno on failure +/// - should return the number of bytes written on success +pub(crate) fn sys_fill_exact( + mut buf: &mut [MaybeUninit], + sys_fill: impl Fn(&mut [MaybeUninit]) -> libc::ssize_t, +) -> Result<(), Error> { + while !buf.is_empty() { + let res = sys_fill(buf); + match res { + res if res > 0 => { + let len = usize::try_from(res).map_err(|_| Error::UNEXPECTED)?; + let (l, r) = buf.split_at_mut_checked(len).ok_or(Error::UNEXPECTED)?; + unsafe { sanitizer::unpoison(l) }; + buf = r; + } + -1 => { + let errno = get_errno(); + // We should try again if the call was interrupted. + if errno != libc::EINTR { + return Err(Error::from_errno(errno)); + } + } + // Negative return codes not equal to -1 should be impossible. + // EOF (ret = 0) should be impossible, as the data we are reading + // should be an infinite stream of random bytes. + _ => return Err(Error::UNEXPECTED), + } + } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..48ca59d24300e9bead0a40ee9087672511b00a17 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/mod.rs @@ -0,0 +1,209 @@ +//! Main `getrandom` tests +use core::mem::MaybeUninit; +use getrandom::{fill, fill_uninit}; + +#[cfg(all(feature = "wasm_js", target_arch = "wasm32", target_os = "unknown"))] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn test_zero() { + // Test that APIs are happy with zero-length requests + fill(&mut [0u8; 0]).unwrap(); + let res = fill_uninit(&mut []).unwrap(); + assert!(res.is_empty()); +} + +trait DiffBits: Sized { + fn diff_bits(ab: (&Self, &Self)) -> usize; +} + +impl DiffBits for u8 { + fn diff_bits((a, b): (&Self, &Self)) -> usize { + (a ^ b).count_ones() as usize + } +} + +impl DiffBits for u32 { + fn diff_bits((a, b): (&Self, &Self)) -> usize { + (a ^ b).count_ones() as usize + } +} + +impl DiffBits for u64 { + fn diff_bits((a, b): (&Self, &Self)) -> usize { + (a ^ b).count_ones() as usize + } +} + +// Return the number of bits in which s1 and s2 differ +fn num_diff_bits(s1: &[T], s2: &[T]) -> usize { + assert_eq!(s1.len(), s2.len()); + s1.iter().zip(s2.iter()).map(T::diff_bits).sum() +} + +// Tests the quality of calling getrandom on two large buffers +#[test] +fn test_diff() { + const N: usize = 1000; + let mut v1 = [0u8; N]; + let mut v2 = [0u8; N]; + fill(&mut v1).unwrap(); + fill(&mut v2).unwrap(); + + let mut t1 = [MaybeUninit::uninit(); N]; + let mut t2 = [MaybeUninit::uninit(); N]; + let r1 = fill_uninit(&mut t1).unwrap(); + let r2 = fill_uninit(&mut t2).unwrap(); + assert_eq!(r1.len(), N); + assert_eq!(r2.len(), N); + + // Between 3.5 and 4.5 bits per byte should differ. Probability of failure: + // ~ 2^(-94) = 2 * CDF[BinomialDistribution[8000, 0.5], 3500] + let d1 = num_diff_bits(&v1, &v2); + assert!(d1 > 3500); + assert!(d1 < 4500); + let d2 = num_diff_bits(r1, r2); + assert!(d2 > 3500); + assert!(d2 < 4500); +} + +#[test] +fn test_diff_u32() { + const N: usize = 1000 / 4; + let mut v1 = [0u32; N]; + let mut v2 = [0u32; N]; + for v in v1.iter_mut() { + *v = getrandom::u32().unwrap(); + } + for v in v2.iter_mut() { + *v = getrandom::u32().unwrap(); + } + + // Between 3.5 and 4.5 bits per byte should differ. Probability of failure: + // ~ 2^(-94) = 2 * CDF[BinomialDistribution[8000, 0.5], 3500] + let d1 = num_diff_bits(&v1, &v2); + assert!(d1 > 3500); + assert!(d1 < 4500); +} + +#[test] +fn test_diff_u64() { + const N: usize = 1000 / 8; + let mut v1 = [0u64; N]; + let mut v2 = [0u64; N]; + for v in v1.iter_mut() { + *v = getrandom::u64().unwrap(); + } + for v in v2.iter_mut() { + *v = getrandom::u64().unwrap(); + } + + // Between 3.5 and 4.5 bits per byte should differ. Probability of failure: + // ~ 2^(-94) = 2 * CDF[BinomialDistribution[8000, 0.5], 3500] + let d1 = num_diff_bits(&v1, &v2); + assert!(d1 > 3500); + assert!(d1 < 4500); +} + +#[test] +fn test_small() { + const N: usize = 64; + // For each buffer size, get at least 256 bytes and check that between + // 3 and 5 bits per byte differ. Probability of failure: + // ~ 2^(-91) = 64 * 2 * CDF[BinomialDistribution[8*256, 0.5], 3*256] + for size in 1..=N { + let mut num_bytes = 0; + let mut diff_bits = 0; + while num_bytes < 256 { + let mut buf1 = [0u8; N]; + let mut buf2 = [0u8; N]; + + let s1 = &mut buf1[..size]; + let s2 = &mut buf2[..size]; + + fill(s1).unwrap(); + fill(s2).unwrap(); + + num_bytes += size; + diff_bits += num_diff_bits(s1, s2); + } + assert!(diff_bits > 3 * num_bytes); + assert!(diff_bits < 5 * num_bytes); + } +} + +// Tests the quality of calling getrandom repeatedly on small buffers +#[test] +fn test_small_uninit() { + const N: usize = 64; + // For each buffer size, get at least 256 bytes and check that between + // 3 and 5 bits per byte differ. Probability of failure: + // ~ 2^(-91) = 64 * 2 * CDF[BinomialDistribution[8*256, 0.5], 3*256] + for size in 1..=N { + let mut num_bytes = 0; + let mut diff_bits = 0; + while num_bytes < 256 { + let mut buf1 = [MaybeUninit::uninit(); N]; + let mut buf2 = [MaybeUninit::uninit(); N]; + + let s1 = &mut buf1[..size]; + let s2 = &mut buf2[..size]; + + let r1 = fill_uninit(s1).unwrap(); + let r2 = fill_uninit(s2).unwrap(); + assert_eq!(r1.len(), size); + assert_eq!(r2.len(), size); + + num_bytes += size; + diff_bits += num_diff_bits(r1, r2); + } + assert!(diff_bits > 3 * num_bytes); + assert!(diff_bits < 5 * num_bytes); + } +} + +#[test] +fn test_huge() { + let mut huge = [0u8; 100_000]; + fill(&mut huge).unwrap(); +} + +#[test] +fn test_huge_uninit() { + const N: usize = 100_000; + let mut huge = [MaybeUninit::uninit(); N]; + let res = fill_uninit(&mut huge).unwrap(); + assert_eq!(res.len(), N); +} + +#[test] +#[cfg_attr( + target_arch = "wasm32", + ignore = "The thread API always fails/panics on WASM" +)] +fn test_multithreading() { + extern crate std; + use std::{sync::mpsc::channel, thread, vec}; + + let mut txs = vec![]; + for _ in 0..20 { + let (tx, rx) = channel(); + txs.push(tx); + + thread::spawn(move || { + // wait until all the tasks are ready to go. + rx.recv().unwrap(); + let mut v = [0u8; 1000]; + + for _ in 0..100 { + fill(&mut v).unwrap(); + thread::yield_now(); + } + }); + } + + // start all the tasks + for tx in txs.iter() { + tx.send(()).unwrap(); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/sys_rng.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/sys_rng.rs new file mode 100644 index 0000000000000000000000000000000000000000..d921a42f8041992489c65c161c576a3af7914667 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/getrandom-0.4.1/tests/sys_rng.rs @@ -0,0 +1,18 @@ +//! Tests for `SysRng` +#![cfg(feature = "sys_rng")] + +use getrandom::SysRng; +use getrandom::rand_core::TryRng; + +#[test] +fn test_sys_rng() { + let x = SysRng.try_next_u64().unwrap(); + let y = SysRng.try_next_u64().unwrap(); + assert!(x != 0); + assert!(x != y); +} + +#[test] +fn test_construction() { + assert!(SysRng.try_next_u64().unwrap() != 0); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/errno.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/errno.rs new file mode 100644 index 0000000000000000000000000000000000000000..1c4a320afa69419fb5f5a67235f70db70780daf6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/errno.rs @@ -0,0 +1,532 @@ +/// Operation not permitted +pub const EPERM: i32 = 1; + +/// No such file or directory +pub const ENOENT: i32 = 2; + +/// No such process +pub const ESRCH: i32 = 3; + +/// Interrupted system call +pub const EINTR: i32 = 4; + +/// I/O error +pub const EIO: i32 = 5; + +/// No such device or address +pub const ENXIO: i32 = 6; + +/// Argument list too long +pub const E2BIG: i32 = 7; + +/// Exec format error +pub const ENOEXEC: i32 = 8; + +/// Bad file number +pub const EBADF: i32 = 9; + +/// No child processes +pub const ECHILD: i32 = 10; + +/// Try again +pub const EAGAIN: i32 = 11; + +/// Out of memory +pub const ENOMEM: i32 = 12; + +/// Permission denied +pub const EACCES: i32 = 13; + +/// Bad address +pub const EFAULT: i32 = 14; + +/// Block device required +pub const ENOTBLK: i32 = 15; + +/// Device or resource busy +pub const EBUSY: i32 = 16; + +/// File exists +pub const EEXIST: i32 = 17; + +/// Cross-device link +pub const EXDEV: i32 = 18; + +/// No such device +pub const ENODEV: i32 = 19; + +/// Not a directory +pub const ENOTDIR: i32 = 20; + +/// Is a directory +pub const EISDIR: i32 = 21; + +/// Invalid argument +pub const EINVAL: i32 = 22; + +/// File table overflow +pub const ENFILE: i32 = 23; + +/// Too many open files +pub const EMFILE: i32 = 24; + +/// Not a typewriter +pub const ENOTTY: i32 = 25; + +/// Text file busy +pub const ETXTBSY: i32 = 26; + +/// File too large +pub const EFBIG: i32 = 27; + +/// No space left on device +pub const ENOSPC: i32 = 28; + +/// Illegal seek +pub const ESPIPE: i32 = 29; + +/// Read-only file system +pub const EROFS: i32 = 30; + +/// Too many links +pub const EMLINK: i32 = 31; + +/// Broken pipe +pub const EPIPE: i32 = 32; + +/// Math argument out of domain of func +pub const EDOM: i32 = 33; + +/// Math result not representable +pub const ERANGE: i32 = 34; + +/// Resource deadlock would occur +pub const EDEADLK: i32 = 35; + +/// File name too long +pub const ENAMETOOLONG: i32 = 36; + +/// No record locks available +pub const ENOLCK: i32 = 37; + +/// Function not implemented +pub const ENOSYS: i32 = 38; + +/// Directory not empty +pub const ENOTEMPTY: i32 = 39; + +/// Too many symbolic links encountered +pub const ELOOP: i32 = 40; + +/// Operation would block +pub const EWOULDBLOCK: i32 = EAGAIN; + +/// No message of desired type +pub const ENOMSG: i32 = 42; + +/// Identifier removed +pub const EIDRM: i32 = 43; + +/// Channel number out of range +pub const ECHRNG: i32 = 44; + +/// Level 2 not synchronized +pub const EL2NSYNC: i32 = 45; + +/// Level 3 halted +pub const EL3HLT: i32 = 46; + +/// Level 3 reset +pub const EL3RST: i32 = 47; + +/// Link number out of range +pub const ELNRNG: i32 = 48; + +/// Protocol driver not attached +pub const EUNATCH: i32 = 49; + +/// No CSI structure available +pub const ENOCSI: i32 = 50; + +/// Level 2 halted +pub const EL2HLT: i32 = 51; + +/// Invalid exchange +pub const EBADE: i32 = 52; + +/// Invalid request descriptor +pub const EBADR: i32 = 53; + +/// Exchange full +pub const EXFULL: i32 = 54; + +/// No anode +pub const ENOANO: i32 = 55; + +/// Invalid request code +pub const EBADRQC: i32 = 56; + +/// Invalid slot +pub const EBADSLT: i32 = 57; + +pub const EDEADLOCK: i32 = EDEADLK; + +/// Bad font file format +pub const EBFONT: i32 = 59; + +/// Device not a stream +pub const ENOSTR: i32 = 60; + +/// No data available +pub const ENODATA: i32 = 61; + +/// Timer expired +pub const ETIME: i32 = 62; + +/// Out of streams resources +pub const ENOSR: i32 = 63; + +/// Machine is not on the network +pub const ENONET: i32 = 64; + +/// Package not installed +pub const ENOPKG: i32 = 65; + +/// Object is remote +pub const EREMOTE: i32 = 66; + +/// Link has been severed +pub const ENOLINK: i32 = 67; + +/// Advertise error +pub const EADV: i32 = 68; + +/// Srmount error +pub const ESRMNT: i32 = 69; + +/// Communication error on send +pub const ECOMM: i32 = 70; + +/// Protocol error +pub const EPROTO: i32 = 71; + +/// Multihop attempted +pub const EMULTIHOP: i32 = 72; + +/// RFS specific error +pub const EDOTDOT: i32 = 73; + +/// Not a data message +pub const EBADMSG: i32 = 74; + +/// Value too large for defined data type +pub const EOVERFLOW: i32 = 75; + +/// Name not unique on network +pub const ENOTUNIQ: i32 = 76; + +/// File descriptor in bad state +pub const EBADFD: i32 = 77; + +/// Remote address changed +pub const EREMCHG: i32 = 78; + +/// Can not access a needed shared library +pub const ELIBACC: i32 = 79; + +/// Accessing a corrupted shared library +pub const ELIBBAD: i32 = 80; + +/// .lib section in a.out corrupted +pub const ELIBSCN: i32 = 81; + +/// Attempting to link in too many shared libraries +pub const ELIBMAX: i32 = 82; + +/// Cannot exec a shared library directly +pub const ELIBEXEC: i32 = 83; + +/// Illegal byte sequence +pub const EILSEQ: i32 = 84; + +/// Interrupted system call should be restarted +pub const ERESTART: i32 = 85; + +/// Streams pipe error +pub const ESTRPIPE: i32 = 86; + +/// Too many users +pub const EUSERS: i32 = 87; + +/// Socket operation on non-socket +pub const ENOTSOCK: i32 = 88; + +/// Destination address required +pub const EDESTADDRREQ: i32 = 89; + +/// Message too long +pub const EMSGSIZE: i32 = 90; + +/// Protocol wrong type for socket +pub const EPROTOTYPE: i32 = 91; + +/// Protocol not available +pub const ENOPROTOOPT: i32 = 92; + +/// Protocol not supported +pub const EPROTONOSUPPORT: i32 = 93; + +/// Socket type not supported +pub const ESOCKTNOSUPPORT: i32 = 94; + +/// Operation not supported on transport endpoint +pub const EOPNOTSUPP: i32 = 95; + +/// Protocol family not supported +pub const EPFNOSUPPORT: i32 = 96; + +/// Address family not supported by protocol +pub const EAFNOSUPPORT: i32 = 97; + +/// Address already in use +pub const EADDRINUSE: i32 = 98; + +/// Cannot assign requested address +pub const EADDRNOTAVAIL: i32 = 99; + +/// Network is down +pub const ENETDOWN: i32 = 100; + +/// Network is unreachable +pub const ENETUNREACH: i32 = 101; + +/// Network dropped connection because of reset +pub const ENETRESET: i32 = 102; + +/// Software caused connection abort +pub const ECONNABORTED: i32 = 103; + +/// Connection reset by peer +pub const ECONNRESET: i32 = 104; + +/// No buffer space available +pub const ENOBUFS: i32 = 105; + +/// Transport endpoint is already connected +pub const EISCONN: i32 = 106; + +/// Transport endpoint is not connected +pub const ENOTCONN: i32 = 107; + +/// Cannot send after transport endpoint shutdown +pub const ESHUTDOWN: i32 = 108; + +/// Too many references: cannot splice +pub const ETOOMANYREFS: i32 = 109; + +/// Connection timed out +pub const ETIMEDOUT: i32 = 110; + +/// Connection refused +pub const ECONNREFUSED: i32 = 111; + +/// Host is down +pub const EHOSTDOWN: i32 = 112; + +/// No route to host +pub const EHOSTUNREACH: i32 = 113; + +/// Operation already in progress +pub const EALREADY: i32 = 114; + +/// Operation now in progress +pub const EINPROGRESS: i32 = 115; + +/// Stale file handle +pub const ESTALE: i32 = 116; + +/// Structure needs cleaning +pub const EUCLEAN: i32 = 117; + +/// Not a XENIX named type file +pub const ENOTNAM: i32 = 118; + +/// No XENIX semaphores available +pub const ENAVAIL: i32 = 119; + +/// Is a named type file +pub const EISNAM: i32 = 120; + +/// Remote I/O error +pub const EREMOTEIO: i32 = 121; + +/// Quota exceeded +pub const EDQUOT: i32 = 122; + +/// No medium found +pub const ENOMEDIUM: i32 = 123; + +/// Wrong medium type +pub const EMEDIUMTYPE: i32 = 124; + +/// Operation Canceled +pub const ECANCELED: i32 = 125; + +/// Required key not available +pub const ENOKEY: i32 = 126; + +/// Key has expired +pub const EKEYEXPIRED: i32 = 127; + +/// Key has been revoked +pub const EKEYREVOKED: i32 = 128; + +/// Key was rejected by service +pub const EKEYREJECTED: i32 = 129; + +/// Robust mutexes: Owner died +pub const EOWNERDEAD: i32 = 130; + +/// Robust mutexes: State not recoverable +pub const ENOTRECOVERABLE: i32 = 131; + +/// Robust mutexes: Operation not possible due to RF-kill +pub const ERFKILL: i32 = 132; + +/// Robust mutexes: Memory page has hardware error +pub const EHWPOISON: i32 = 133; + +/// Converts an error number to a corresponding error string +pub fn error_string(errno: i32) -> &'static str { + match errno { + 0 => "Operation successful", + EPERM => "Operation not permitted", + ENOENT => "No such file or directory", + ESRCH => "No such process", + EINTR => "Interrupted system call", + EIO => "I/O error", + ENXIO => "No such device or address", + E2BIG => "Argument list too long", + ENOEXEC => "Exec format error", + EBADF => "Bad file number", + ECHILD => "No child processes", + EAGAIN => "Try again", + ENOMEM => "Out of memory", + EACCES => "Permission denied", + EFAULT => "Bad address", + ENOTBLK => "Block device required", + EBUSY => "Device or resource busy", + EEXIST => "File exists", + EXDEV => "Cross-device link", + ENODEV => "No such device", + ENOTDIR => "Not a directory", + EISDIR => "Is a directory", + EINVAL => "Invalid argument", + ENFILE => "File table overflow", + EMFILE => "Too many open files", + ENOTTY => "Not a typewriter", + ETXTBSY => "Text file busy", + EFBIG => "File too large", + ENOSPC => "No space left on device", + ESPIPE => "Illegal seek", + EROFS => "Read-only file system", + EMLINK => "Too many links", + EPIPE => "Broken pipe", + EDOM => "Math argument out of domain of func", + ERANGE => "Math result not representable", + EDEADLK => "Resource deadlock would occur", + ENAMETOOLONG => "File name too long", + ENOLCK => "No record locks available", + ENOSYS => "Function not implemented", + ENOTEMPTY => "Directory not empty", + ELOOP => "Too many symbolic links encountered", + ENOMSG => "No message of desired type", + EIDRM => "Identifier removed", + ECHRNG => "Channel number out of range", + EL2NSYNC => "Level 2 not synchronized", + EL3HLT => "Level 3 halted", + EL3RST => "Level 3 reset", + ELNRNG => "Link number out of range", + EUNATCH => "Protocol driver not attached", + ENOCSI => "No CSI structure available", + EL2HLT => "Level 2 halted", + EBADE => "Invalid exchange", + EBADR => "Invalid request descriptor", + EXFULL => "Exchange full", + ENOANO => "No anode", + EBADRQC => "Invalid request code", + EBADSLT => "Invalid slot", + EBFONT => "Bad font file format", + ENOSTR => "Device not a stream", + ENODATA => "No data available", + ETIME => "Timer expired", + ENOSR => "Out of streams resources", + ENONET => "Machine is not on the network", + ENOPKG => "Package not installed", + EREMOTE => "Object is remote", + ENOLINK => "Link has been severed", + EADV => "Advertise error", + ESRMNT => "Srmount error", + ECOMM => "Communication error on send", + EPROTO => "Protocol error", + EMULTIHOP => "Multihop attempted", + EDOTDOT => "RFS specific error", + EBADMSG => "Not a data message", + EOVERFLOW => "Value too large for defined data type", + ENOTUNIQ => "Name not unique on network", + EBADFD => "File descriptor in bad state", + EREMCHG => "Remote address changed", + ELIBACC => "Can not access a needed shared library", + ELIBBAD => "Accessing a corrupted shared library", + ELIBSCN => "Lib section in a.out corrupted", + ELIBMAX => "Attempting to link in too many shared libraries", + ELIBEXEC => "Cannot exec a shared library directly", + EILSEQ => "Illegal byte sequence", + ERESTART => "Interrupted system call should be restarted", + ESTRPIPE => "Streams pipe error", + EUSERS => "Too many users", + ENOTSOCK => "Socket operation on non-socket", + EDESTADDRREQ => "Destination address required", + EMSGSIZE => "Message too long", + EPROTOTYPE => "Protocol wrong type for socket", + ENOPROTOOPT => "Protocol not available", + EPROTONOSUPPORT => "Protocol not supported", + ESOCKTNOSUPPORT => "Socket type not supported", + EOPNOTSUPP => "Operation not supported on transport endpoint", + EPFNOSUPPORT => "Protocol family not supported", + EAFNOSUPPORT => "Address family not supported by protocol", + EADDRINUSE => "Address already in use", + EADDRNOTAVAIL => "Cannot assign requested address", + ENETDOWN => "Network is down", + ENETUNREACH => "Network is unreachable", + ENETRESET => "Network dropped connection because of reset", + ECONNABORTED => "Software caused connection abort", + ECONNRESET => "Connection reset by peer", + ENOBUFS => "No buffer space available", + EISCONN => "Transport endpoint is already connected", + ENOTCONN => "Transport endpoint is not connected", + ESHUTDOWN => "Cannot send after transport endpoint shutdown", + ETOOMANYREFS => "Too many references: cannot splice", + ETIMEDOUT => "Connection timed out", + ECONNREFUSED => "Connection refused", + EHOSTDOWN => "Host is down", + EHOSTUNREACH => "No route to host", + EALREADY => "Operation already in progress", + EINPROGRESS => "Operation now in progress", + ESTALE => "Stale file handle", + EUCLEAN => "Structure needs cleaning", + EDQUOT => "Quota exceeded", + ENOMEDIUM => "No medium found", + EMEDIUMTYPE => "Wrong medium type", + ECANCELED => "Operation Canceled", + ENOKEY => "Required key not available", + EKEYEXPIRED => "Key has expired", + EKEYREVOKED => "Key has been revoked", + EKEYREJECTED => "Key was rejected by service", + EOWNERDEAD => "Robust mutexes: Owner died", + ENOTRECOVERABLE => "Robust mutexes: State not recoverable", + ERFKILL => "Robust mutexes: Operation not possible due to RF-kill", + EHWPOISON => "Robust mutexes: Memory page has hardware error", + _ => "Unknown error", + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ed7f1275b66a4e8d4625f7200e319ece8574ed3a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/hermit-abi-0.5.2/src/lib.rs @@ -0,0 +1,910 @@ +//! `hermit-abi` is small interface to call functions from the +//! [Hermit unikernel](https://github.com/hermit-os/kernel). + +#![no_std] +#![allow(nonstandard_style)] +#![allow(dead_code)] +#![allow(clippy::missing_safety_doc)] +#![allow(clippy::result_unit_err)] + +pub mod errno; + +use core::ffi::c_char; +pub use core::ffi::{c_int, c_short, c_void}; + +pub use self::errno::*; + +/// A thread handle type +pub type Tid = u32; + +/// Maximum number of priorities +pub const NO_PRIORITIES: usize = 31; + +/// Priority of a thread +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] +pub struct Priority(u8); + +impl Priority { + pub const fn into(self) -> u8 { + self.0 + } + + pub const fn from(x: u8) -> Self { + Priority(x) + } +} + +pub const HIGH_PRIO: Priority = Priority::from(3); +pub const NORMAL_PRIO: Priority = Priority::from(2); +pub const LOW_PRIO: Priority = Priority::from(1); + +pub const FUTEX_RELATIVE_TIMEOUT: u32 = 1; +pub const CLOCK_REALTIME: clockid_t = 1; +pub const CLOCK_MONOTONIC: clockid_t = 4; +pub const STDIN_FILENO: c_int = 0; +pub const STDOUT_FILENO: c_int = 1; +pub const STDERR_FILENO: c_int = 2; +pub const O_RDONLY: i32 = 0o0; +pub const O_WRONLY: i32 = 0o1; +pub const O_RDWR: i32 = 0o2; +pub const O_CREAT: i32 = 0o100; +pub const O_EXCL: i32 = 0o200; +pub const O_TRUNC: i32 = 0o1000; +pub const O_APPEND: i32 = 0o2000; +pub const O_NONBLOCK: i32 = 0o4000; +pub const O_DIRECTORY: i32 = 0o200000; +pub const F_DUPFD: i32 = 0; +pub const F_GETFD: i32 = 1; +pub const F_SETFD: i32 = 2; +pub const F_GETFL: i32 = 3; +pub const F_SETFL: i32 = 4; +pub const FD_CLOEXEC: i32 = 1; + +/// returns true if file descriptor `fd` is a tty +pub fn isatty(_fd: c_int) -> bool { + false +} + +/// `timespec` is used by `clock_gettime` to retrieve the +/// current time +#[derive(Default, Copy, Clone, Debug)] +#[repr(C)] +pub struct timespec { + /// seconds + pub tv_sec: time_t, + /// nanoseconds + pub tv_nsec: i32, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct timeval { + pub tv_sec: time_t, + pub tv_usec: suseconds_t, +} + +/// The largest number `rand` will return +pub const RAND_MAX: i32 = 2_147_483_647; + +pub const AF_UNSPEC: i32 = 0; +/// Socket address family: IPv4 +pub const AF_INET: i32 = 3; +/// Socket address family: IPv6 +pub const AF_INET6: i32 = 1; +/// Socket address family: VSOCK protocol for hypervisor-guest communication +pub const AF_VSOCK: i32 = 2; +pub const IPPROTO_IP: i32 = 0; +pub const IPPROTO_IPV6: i32 = 41; +pub const IPPROTO_UDP: i32 = 17; +pub const IPPROTO_TCP: i32 = 6; +pub const IPV6_ADD_MEMBERSHIP: i32 = 12; +pub const IPV6_DROP_MEMBERSHIP: i32 = 13; +pub const IPV6_MULTICAST_LOOP: i32 = 19; +pub const IPV6_V6ONLY: i32 = 27; +pub const IP_TOS: i32 = 1; +pub const IP_TTL: i32 = 2; +pub const IP_MULTICAST_TTL: i32 = 5; +pub const IP_MULTICAST_LOOP: i32 = 7; +pub const IP_ADD_MEMBERSHIP: i32 = 3; +pub const IP_DROP_MEMBERSHIP: i32 = 4; +pub const SHUT_RD: i32 = 0; +pub const SHUT_WR: i32 = 1; +pub const SHUT_RDWR: i32 = 2; +/// Socket supports datagrams (connectionless, unreliable messages of a fixed maximum length) +pub const SOCK_DGRAM: i32 = 2; +/// Socket provides sequenced, reliable, two-way, connection-based byte streams. +pub const SOCK_STREAM: i32 = 1; +/// Set the O_NONBLOCK file status flag on the open socket +pub const SOCK_NONBLOCK: i32 = 0o4000; +/// Set the close-on-exec flag on the new socket +pub const SOCK_CLOEXEC: i32 = 0o40000; +pub const SOL_SOCKET: i32 = 4095; +pub const SO_REUSEADDR: i32 = 0x0004; +pub const SO_KEEPALIVE: i32 = 0x0008; +pub const SO_BROADCAST: i32 = 0x0020; +pub const SO_LINGER: i32 = 0x0080; +pub const SO_SNDBUF: i32 = 0x1001; +pub const SO_RCVBUF: i32 = 0x1002; +pub const SO_SNDTIMEO: i32 = 0x1005; +pub const SO_RCVTIMEO: i32 = 0x1006; +pub const SO_ERROR: i32 = 0x1007; +pub const TCP_NODELAY: i32 = 1; +pub const MSG_PEEK: i32 = 1; +pub const FIONBIO: i32 = 0x8008667eu32 as i32; +pub const EAI_AGAIN: i32 = 2; +pub const EAI_BADFLAGS: i32 = 3; +pub const EAI_FAIL: i32 = 4; +pub const EAI_FAMILY: i32 = 5; +pub const EAI_MEMORY: i32 = 6; +pub const EAI_NODATA: i32 = 7; +pub const EAI_NONAME: i32 = 8; +pub const EAI_SERVICE: i32 = 9; +pub const EAI_SOCKTYPE: i32 = 10; +pub const EAI_SYSTEM: i32 = 11; +pub const EAI_OVERFLOW: i32 = 14; +pub const POLLIN: i16 = 0x1; +pub const POLLPRI: i16 = 0x2; +pub const POLLOUT: i16 = 0x4; +pub const POLLERR: i16 = 0x8; +pub const POLLHUP: i16 = 0x10; +pub const POLLNVAL: i16 = 0x20; +pub const POLLRDNORM: i16 = 0x040; +pub const POLLRDBAND: i16 = 0x080; +pub const POLLWRNORM: i16 = 0x0100; +pub const POLLWRBAND: i16 = 0x0200; +pub const POLLRDHUP: i16 = 0x2000; +pub const EFD_SEMAPHORE: i16 = 0o1; +pub const EFD_NONBLOCK: i16 = 0o4000; +pub const EFD_CLOEXEC: i16 = 0o40000; +pub const IOV_MAX: usize = 1024; +/// VMADDR_CID_ANY means that any address is possible for binding +pub const VMADDR_CID_ANY: u32 = u32::MAX; +pub const VMADDR_CID_HYPERVISOR: u32 = 0; +pub const VMADDR_CID_LOCAL: u32 = 1; +pub const VMADDR_CID_HOST: u32 = 2; +pub type sa_family_t = u8; +pub type socklen_t = u32; +pub type in_addr_t = u32; +pub type in_port_t = u16; +pub type time_t = i64; +pub type useconds_t = u32; +pub type suseconds_t = i32; +pub type nfds_t = usize; +pub type sem_t = *const c_void; +pub type pid_t = i32; +pub type clockid_t = i32; + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct in_addr { + pub s_addr: in_addr_t, +} + +#[repr(C, align(4))] +#[derive(Debug, Copy, Clone, Default)] +pub struct in6_addr { + pub s6_addr: [u8; 16], +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct sockaddr { + pub sa_len: u8, + pub sa_family: sa_family_t, + pub sa_data: [c_char; 14], +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct sockaddr_vm { + pub svm_len: u8, + pub svm_family: sa_family_t, + pub svm_reserved1: u16, + pub svm_port: u32, + pub svm_cid: u32, + pub svm_zero: [u8; 4], +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct sockaddr_in { + pub sin_len: u8, + pub sin_family: sa_family_t, + pub sin_port: in_port_t, + pub sin_addr: in_addr, + pub sin_zero: [c_char; 8], +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct sockaddr_in6 { + pub sin6_len: u8, + pub sin6_family: sa_family_t, + pub sin6_port: in_port_t, + pub sin6_flowinfo: u32, + pub sin6_addr: in6_addr, + pub sin6_scope_id: u32, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct addrinfo { + pub ai_flags: i32, + pub ai_family: i32, + pub ai_socktype: i32, + pub ai_protocol: i32, + pub ai_addrlen: socklen_t, + pub ai_canonname: *mut c_char, + pub ai_addr: *mut sockaddr, + pub ai_next: *mut addrinfo, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sockaddr_storage { + pub s2_len: u8, + pub ss_family: sa_family_t, + __ss_pad1: [u8; 6], + __ss_align: i64, + __ss_pad2: [u8; 112], +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct ip_mreq { + pub imr_multiaddr: in_addr, + pub imr_interface: in_addr, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct ipv6_mreq { + pub ipv6mr_multiaddr: in6_addr, + pub ipv6mr_interface: u32, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct linger { + pub l_onoff: i32, + pub l_linger: i32, +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct pollfd { + /// file descriptor + pub fd: i32, + /// events to look for + pub events: i16, + /// events returned + pub revents: i16, +} + +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct stat { + pub st_dev: u64, + pub st_ino: u64, + pub st_nlink: u64, + /// access permissions + pub st_mode: u32, + /// user id + pub st_uid: u32, + /// group id + pub st_gid: u32, + /// device id + pub st_rdev: u64, + /// size in bytes + pub st_size: i64, + /// block size + pub st_blksize: i64, + /// size in blocks + pub st_blocks: i64, + /// time of last access + pub st_atim: timespec, + /// time of last modification + pub st_mtim: timespec, + /// time of last status change + pub st_ctim: timespec, +} + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct dirent64 { + /// 64-bit inode number + pub d_ino: u64, + /// 64-bit offset to next structure + pub d_off: i64, + /// Size of this dirent + pub d_reclen: u16, + /// File type + pub d_type: u8, + /// Filename (null-terminated) + pub d_name: [c_char; 256], +} + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +/// Describes a region of memory, beginning at `iov_base` address and with the size of `iov_len` bytes. +pub struct iovec { + /// Starting address + pub iov_base: *mut c_void, + /// Size of the memory pointed to by iov_base. + pub iov_len: usize, +} + +pub const DT_UNKNOWN: u8 = 0; +pub const DT_FIFO: u8 = 1; +pub const DT_CHR: u8 = 2; +pub const DT_DIR: u8 = 4; +pub const DT_BLK: u8 = 6; +pub const DT_REG: u8 = 8; +pub const DT_LNK: u8 = 10; +pub const DT_SOCK: u8 = 12; +pub const DT_WHT: u8 = 14; + +pub const S_IFIFO: u32 = 0o1_0000; +pub const S_IFCHR: u32 = 0o2_0000; +pub const S_IFBLK: u32 = 0o6_0000; +pub const S_IFDIR: u32 = 0o4_0000; +pub const S_IFREG: u32 = 0o10_0000; +pub const S_IFLNK: u32 = 0o12_0000; +pub const S_IFSOCK: u32 = 0o14_0000; +pub const S_IFMT: u32 = 0o17_0000; + +/// Pages may not be accessed. +pub const PROT_NONE: u32 = 0; +/// Indicates that the memory region should be readable. +pub const PROT_READ: u32 = 1 << 0; +/// Indicates that the memory region should be writable. +pub const PROT_WRITE: u32 = 1 << 1; +/// Indicates that the memory region should be executable. +pub const PROT_EXEC: u32 = 1 << 2; + +/// The file offset is set to offset bytes. +pub const SEEK_SET: i32 = 0; +/// The file offset is set to its current location plus offset bytes. +pub const SEEK_CUR: i32 = 1; +/// The file offset is set to the size of the file plus offset bytes. +pub const SEEK_END: i32 = 2; + +// symbols, which are part of the library operating system +extern "C" { + /// Get the last error number from the thread local storage + #[link_name = "sys_get_errno"] + pub fn get_errno() -> i32; + + /// Get the last error number from the thread local storage + #[link_name = "sys_errno"] + pub fn errno() -> i32; + + /// Get memory page size + #[link_name = "sys_getpagesize"] + pub fn getpagesize() -> i32; + + /// Creates a new virtual memory mapping of the `size` specified with + /// protection bits specified in `prot_flags`. + #[link_name = "sys_mmap"] + pub fn mmap(size: usize, prot_flags: u32, ret: &mut *mut u8) -> i32; + + /// Unmaps memory at the specified `ptr` for `size` bytes. + #[link_name = "sys_munmap"] + pub fn munmap(ptr: *mut u8, size: usize) -> i32; + + /// Configures the protections associated with a region of virtual memory + /// starting at `ptr` and going to `size`. + /// + /// Returns 0 on success and an error code on failure. + #[link_name = "sys_mprotect"] + pub fn mprotect(ptr: *mut u8, size: usize, prot_flags: u32) -> i32; + + /// If the value at address matches the expected value, park the current thread until it is either + /// woken up with [`futex_wake`] (returns 0) or an optional timeout elapses (returns -ETIMEDOUT). + /// + /// Setting `timeout` to null means the function will only return if [`futex_wake`] is called. + /// Otherwise, `timeout` is interpreted as an absolute time measured with [`CLOCK_MONOTONIC`]. + /// If [`FUTEX_RELATIVE_TIMEOUT`] is set in `flags` the timeout is understood to be relative + /// to the current time. + /// + /// Returns -EINVAL if `address` is null, the timeout is negative or `flags` contains unknown values. + #[link_name = "sys_futex_wait"] + pub fn futex_wait( + address: *mut u32, + expected: u32, + timeout: *const timespec, + flags: u32, + ) -> i32; + + /// Wake `count` threads waiting on the futex at `address`. Returns the number of threads + /// woken up (saturates to `i32::MAX`). If `count` is `i32::MAX`, wake up all matching + /// waiting threads. If `count` is negative or `address` is null, returns -EINVAL. + #[link_name = "sys_futex_wake"] + pub fn futex_wake(address: *mut u32, count: i32) -> i32; + + /// sem_init() initializes the unnamed semaphore at the address + /// pointed to by `sem`. The `value` argument specifies the + /// initial value for the semaphore. If `pshared` is nonzero, + /// then the semaphore is shared between processes (currently + /// not supported). + #[link_name = "sys_sem_init"] + pub fn sem_init(sem: *mut sem_t, pshared: i32, value: u32) -> i32; + + /// sem_destroy() frees the unnamed semaphore at the address + /// pointed to by `sem`. + #[link_name = "sys_sem_destroy"] + pub fn sem_destroy(sem: *mut sem_t) -> i32; + + /// sem_post() increments the semaphore pointed to by `sem`. + /// If the semaphore's value consequently becomes greater + /// than zero, then another thread blocked in a sem_wait call + /// will be woken up and proceed to lock the semaphore. + #[link_name = "sys_sem_post"] + pub fn sem_post(sem: *mut sem_t) -> i32; + + /// try to decrement a semaphore + /// + /// sem_trywait() is the same as sem_timedwait(), except that + /// if the decrement cannot be immediately performed, then call + /// returns a negative value instead of blocking. + #[link_name = "sys_sem_trywait"] + pub fn sem_trywait(sem: *mut sem_t) -> i32; + + /// decrement a semaphore + /// + /// sem_timedwait() decrements the semaphore pointed to by `sem`. + /// If the semaphore's value is greater than zero, then the + /// the function returns immediately. If the semaphore currently + /// has the value zero, then the call blocks until either + /// it becomes possible to perform the decrement of the time limit + /// to wait for the semaphore is expired. A time limit `ms` of + /// means infinity waiting time. + #[link_name = "sys_sem_timedwait"] + pub fn sem_timedwait(sem: *mut sem_t, abs_timeout: *const timespec) -> i32; + + /// Determines the id of the current thread + #[link_name = "sys_getpid"] + pub fn getpid() -> pid_t; + + /// cause normal termination and return `status` + /// to the host system + #[link_name = "sys_exit"] + pub fn exit(status: i32) -> !; + + /// cause abnormal termination + #[link_name = "sys_abort"] + pub fn abort() -> !; + + /// suspend execution for microsecond intervals + /// + /// The usleep() function suspends execution of the calling + /// thread for (at least) `usecs` microseconds. + #[link_name = "sys_usleep"] + pub fn usleep(usecs: u64); + + /// suspend thread execution for an interval measured in nanoseconds + #[link_name = "sys_nanosleep"] + pub fn nanosleep(req: *const timespec) -> i32; + + /// spawn a new thread + /// + /// spawn() starts a new thread. The new thread starts execution + /// by invoking `func(usize)`; `arg` is passed as the argument + /// to `func`. `prio` defines the priority of the new thread, + /// which can be between `LOW_PRIO` and `HIGH_PRIO`. + /// `core_id` defines the core, where the thread is located. + /// A negative value give the operating system the possibility + /// to select the core by its own. + #[link_name = "sys_spawn"] + pub fn spawn( + id: *mut Tid, + func: extern "C" fn(usize), + arg: usize, + prio: u8, + core_id: isize, + ) -> i32; + + /// spawn a new thread with user-specified stack size + /// + /// spawn2() starts a new thread. The new thread starts execution + /// by invoking `func(usize)`; `arg` is passed as the argument + /// to `func`. `prio` defines the priority of the new thread, + /// which can be between `LOW_PRIO` and `HIGH_PRIO`. + /// `core_id` defines the core, where the thread is located. + /// A negative value give the operating system the possibility + /// to select the core by its own. + /// In contrast to spawn(), spawn2() is able to define the + /// stack size. + #[link_name = "sys_spawn2"] + pub fn spawn2( + func: extern "C" fn(usize), + arg: usize, + prio: u8, + stack_size: usize, + core_id: isize, + ) -> Tid; + + /// join with a terminated thread + /// + /// The join() function waits for the thread specified by `id` + /// to terminate. + #[link_name = "sys_join"] + pub fn join(id: Tid) -> i32; + + /// yield the processor + /// + /// causes the calling thread to relinquish the CPU. The thread + /// is moved to the end of the queue for its static priority. + #[link_name = "sys_yield"] + pub fn yield_now(); + + /// get current time + /// + /// The clock_gettime() functions allow the calling thread + /// to retrieve the value used by a clock which is specified + /// by `clockid`. + /// + /// `CLOCK_REALTIME`: the system's real time clock, + /// expressed as the amount of time since the Epoch. + /// + /// `CLOCK_MONOTONIC`: clock that increments monotonically, + /// tracking the time since an arbitrary point + #[link_name = "sys_clock_gettime"] + pub fn clock_gettime(clockid: clockid_t, tp: *mut timespec) -> i32; + + /// open and possibly create a file + /// + /// The open() system call opens the file specified by `name`. + /// If the specified file does not exist, it may optionally + /// be created by open(). + #[link_name = "sys_open"] + pub fn open(name: *const c_char, flags: i32, mode: i32) -> i32; + + /// open a directory + /// + /// The opendir() system call opens the directory specified by `name`. + #[deprecated(since = "0.4.0", note = "please use `open`")] + #[link_name = "sys_opendir"] + pub fn opendir(name: *const c_char) -> i32; + + /// delete the file it refers to `name` + #[link_name = "sys_unlink"] + pub fn unlink(name: *const c_char) -> i32; + + /// remove directory it refers to `name` + #[link_name = "sys_rmdir"] + pub fn rmdir(name: *const c_char) -> i32; + + /// stat + #[link_name = "sys_stat"] + pub fn stat(name: *const c_char, stat: *mut stat) -> i32; + + /// lstat + #[link_name = "sys_lstat"] + pub fn lstat(name: *const c_char, stat: *mut stat) -> i32; + + /// fstat + #[link_name = "sys_fstat"] + pub fn fstat(fd: i32, stat: *mut stat) -> i32; + + /// Returns an estimate of the default amount of parallelism + /// a program should use. This number often corresponds to the + /// amount of CPUs a computer has, but it may diverge in + /// various cases. + #[link_name = "sys_available_parallelism"] + pub fn available_parallelism() -> usize; + + /// determines the number of activated processors + #[deprecated(since = "0.4.0", note = "please use `available_parallelism`")] + #[link_name = "sys_get_processor_count"] + pub fn get_processor_count() -> usize; + + #[link_name = "sys_malloc"] + pub fn malloc(size: usize, align: usize) -> *mut u8; + + #[link_name = "sys_alloc"] + pub fn alloc(size: usize, align: usize) -> *mut u8; + + #[link_name = "sys_alloc_zeroed"] + pub fn alloc_zeroed(size: usize, align: usize) -> *mut u8; + + #[link_name = "sys_realloc"] + pub fn realloc(ptr: *mut u8, size: usize, align: usize, new_size: usize) -> *mut u8; + + #[link_name = "sys_free"] + pub fn free(ptr: *mut u8, size: usize, align: usize); + + #[link_name = "sys_dealloc"] + pub fn dealloc(ptr: *mut u8, size: usize, align: usize); + + #[link_name = "sys_notify"] + pub fn notify(id: usize, count: i32) -> i32; + + #[doc(hidden)] + #[link_name = "sys_add_queue"] + pub fn add_queue(id: usize, timeout_ns: i64) -> i32; + + #[doc(hidden)] + #[link_name = "sys_wait"] + pub fn wait(id: usize) -> i32; + + #[doc(hidden)] + #[link_name = "sys_init_queue"] + pub fn init_queue(id: usize) -> i32; + + #[doc(hidden)] + #[link_name = "sys_destroy_queue"] + pub fn destroy_queue(id: usize) -> i32; + + /// initialize the network stack + #[link_name = "sys_network_init"] + pub fn network_init() -> i32; + + /// Add current task to the queue of blocked tasks. After calling `block_current_task`, + /// call `yield_now` to switch to another task. + #[link_name = "sys_block_current_task"] + pub fn block_current_task(); + + /// Add current task to the queue of blocked tasks, but wake it when `timeout` milliseconds + /// have elapsed. + /// + /// After calling `block_current_task`, call `yield_now` to switch to another task. + #[link_name = "sys_block_current_task_with_timeout"] + pub fn block_current_task_with_timeout(timeout: u64); + + /// Wakeup task with the thread id `tid` + #[link_name = "sys_wakeup_taskt"] + pub fn wakeup_task(tid: Tid); + + /// The system call `getaddrbyname` determine the network host entry. + /// It expects an array of u8 with a size of in_addr or of in6_addr. + /// The result of the DNS request will be stored in this array. + /// + /// # Example + /// + /// ``` + /// use hermit_abi::in_addr; + /// let c_string = std::ffi::CString::new("rust-lang.org").expect("CString::new failed"); + /// let name = c_string.into_raw(); + /// let mut inaddr: in_addr = Default::default(); + /// let _ = unsafe { + /// hermit_abi::getaddrbyname( + /// name, + /// &mut inaddr as *mut _ as *mut u8, + /// std::mem::size_of::(), + /// ) + /// }; + /// + /// // retake pointer to free memory + /// let _ = CString::from_raw(name); + /// ``` + #[link_name = "sys_getaddrbyname"] + pub fn getaddrbyname(name: *const c_char, inaddr: *mut u8, len: usize) -> i32; + + #[link_name = "sys_accept"] + pub fn accept(s: i32, addr: *mut sockaddr, addrlen: *mut socklen_t) -> i32; + + /// bind a name to a socket + #[link_name = "sys_bind"] + pub fn bind(s: i32, name: *const sockaddr, namelen: socklen_t) -> i32; + + #[link_name = "sys_connect"] + pub fn connect(s: i32, name: *const sockaddr, namelen: socklen_t) -> i32; + + /// read from a file descriptor + /// + /// read() attempts to read `len` bytes of data from the object + /// referenced by the descriptor `fd` into the buffer pointed + /// to by `buf`. + #[link_name = "sys_read"] + pub fn read(fd: i32, buf: *mut u8, len: usize) -> isize; + + /// `read()` attempts to read `nbyte` of data to the object referenced by the + /// descriptor `fd` from a buffer. `read()` performs the same + /// action, but scatters the input data from the `iovcnt` buffers specified by the + /// members of the iov array: `iov[0], iov[1], ..., iov[iovcnt-1]`. + /// + /// ``` + /// struct iovec { + /// char *iov_base; /* Base address. */ + /// size_t iov_len; /* Length. */ + /// }; + /// ``` + /// + /// Each `iovec` entry specifies the base address and length of an area in memory from + /// which data should be written. `readv()` will always fill an completely + /// before proceeding to the next. + #[link_name = "sys_readv"] + pub fn readv(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; + + /// `getdents64` reads directory entries from the directory referenced + /// by the file descriptor `fd` into the buffer pointed to by `buf`. + #[link_name = "sys_getdents64"] + pub fn getdents64(fd: i32, dirp: *mut dirent64, count: usize) -> i64; + + /// 'mkdir' attempts to create a directory, + /// it returns 0 on success and -1 on error + #[link_name = "sys_mkdir"] + pub fn mkdir(name: *const i8, mode: u32) -> i32; + + /// Fill `len` bytes in `buf` with cryptographically secure random data. + /// + /// Returns either the number of bytes written to buf (a positive value) or + /// * `-EINVAL` if `flags` contains unknown flags. + /// * `-ENOSYS` if the system does not support random data generation. + #[link_name = "sys_read_entropy"] + pub fn read_entropy(buf: *mut u8, len: usize, flags: u32) -> isize; + + /// receive() a message from a socket + #[link_name = "sys_recv"] + pub fn recv(socket: i32, buf: *mut u8, len: usize, flags: i32) -> isize; + + /// receive() a message from a socket + #[link_name = "sys_recvfrom"] + pub fn recvfrom( + socket: i32, + buf: *mut u8, + len: usize, + flags: i32, + addr: *mut sockaddr, + addrlen: *mut socklen_t, + ) -> isize; + + /// The fseek() function sets the file position indicator for the stream pointed to by stream. + /// The new position, measured in bytes, is obtained by adding offset bytes to the position + /// specified by whence. If whence is set to SEEK_SET, SEEK_CUR, or SEEK_END, the offset is + /// relative to the start of the file, the current position indicator, or end-of-file, + /// respectively. + #[link_name = "sys_lseek"] + pub fn lseek(fd: i32, offset: isize, whence: i32) -> isize; + + /// write to a file descriptor + /// + /// write() attempts to write `len` of data to the object + /// referenced by the descriptor `fd` from the + /// buffer pointed to by `buf`. + #[link_name = "sys_write"] + pub fn write(fd: i32, buf: *const u8, len: usize) -> isize; + + /// `write()` attempts to write `nbyte` of data to the object referenced by the + /// descriptor `fd` from a buffer. `writev()` performs the same + /// action, but gathers the output data from the `iovcnt` buffers specified by the + /// members of the iov array: `iov[0], iov[1], ..., iov[iovcnt-1]`. + /// + /// ``` + /// struct iovec { + /// char *iov_base; /* Base address. */ + /// size_t iov_len; /* Length. */ + /// }; + /// ``` + /// + /// Each `iovec` entry specifies the base address and length of an area in memory from + /// which data should be written. `writev()` will always write a + /// complete area before proceeding to the next. + #[link_name = "sys_writev"] + pub fn writev(fd: i32, iov: *const iovec, iovcnt: usize) -> isize; + + /// close a file descriptor + /// + /// The close() call deletes a file descriptor `fd` from the object + /// reference table. + #[link_name = "sys_close"] + pub fn close(fd: i32) -> i32; + + /// duplicate an existing file descriptor + #[link_name = "sys_dup"] + pub fn dup(fd: i32) -> i32; + + #[link_name = "sys_getpeername"] + pub fn getpeername(s: i32, name: *mut sockaddr, namelen: *mut socklen_t) -> i32; + + #[link_name = "sys_getsockname"] + pub fn getsockname(s: i32, name: *mut sockaddr, namelen: *mut socklen_t) -> i32; + + #[link_name = "sys_getsockopt"] + pub fn getsockopt( + s: i32, + level: i32, + optname: i32, + optval: *mut c_void, + optlen: *mut socklen_t, + ) -> i32; + + #[link_name = "sys_setsockopt"] + pub fn setsockopt( + s: i32, + level: i32, + optname: i32, + optval: *const c_void, + optlen: socklen_t, + ) -> i32; + + #[link_name = "sys_ioctl"] + pub fn ioctl(s: i32, cmd: i32, argp: *mut c_void) -> i32; + + #[link_name = "sys_fcntl"] + pub fn fcntl(fd: i32, cmd: i32, arg: i32) -> i32; + + /// `eventfd` creates an linux-like "eventfd object" that can be used + /// as an event wait/notify mechanism by user-space applications, and by + /// the kernel to notify user-space applications of events. The + /// object contains an unsigned 64-bit integer counter + /// that is maintained by the kernel. This counter is initialized + /// with the value specified in the argument `initval`. + /// + /// As its return value, `eventfd` returns a new file descriptor that + /// can be used to refer to the eventfd object. + /// + /// The following values may be bitwise set in flags to change the + /// behavior of `eventfd`: + /// + /// `EFD_NONBLOCK`: Set the file descriptor in non-blocking mode + /// `EFD_SEMAPHORE`: Provide semaphore-like semantics for reads + /// from the new file descriptor. + #[link_name = "sys_eventfd"] + pub fn eventfd(initval: u64, flags: i16) -> i32; + + /// The unix-like `poll` waits for one of a set of file descriptors + /// to become ready to perform I/O. The set of file descriptors to be + /// monitored is specified in the `fds` argument, which is an array + /// of structures of `pollfd`. + #[link_name = "sys_poll"] + pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: i32) -> i32; + + /// listen for connections on a socket + /// + /// The `backlog` parameter defines the maximum length for the queue of pending + /// connections. Currently, the `backlog` must be one. + #[link_name = "sys_listen"] + pub fn listen(s: i32, backlog: i32) -> i32; + + #[link_name = "sys_send"] + pub fn send(s: i32, mem: *const c_void, len: usize, flags: i32) -> isize; + + #[link_name = "sys_sendto"] + pub fn sendto( + s: i32, + mem: *const c_void, + len: usize, + flags: i32, + to: *const sockaddr, + tolen: socklen_t, + ) -> isize; + + /// shut down part of a full-duplex connection + #[link_name = "sys_shutdown"] + pub fn shutdown(sockfd: i32, how: i32) -> i32; + + #[deprecated(since = "0.4.0", note = "use `shutdown` instead")] + #[link_name = "sys_shutdown_socket"] + pub fn shutdown_socket(s: i32, how: i32) -> i32; + + #[link_name = "sys_socket"] + pub fn socket(domain: i32, type_: i32, protocol: i32) -> i32; + + #[link_name = "sys_freeaddrinfo"] + pub fn freeaddrinfo(ai: *mut addrinfo); + + #[link_name = "sys_getaddrinfo"] + pub fn getaddrinfo( + nodename: *const c_char, + servname: *const c_char, + hints: *const addrinfo, + res: *mut *mut addrinfo, + ) -> i32; + + fn sys_get_priority() -> u8; + fn sys_set_priority(tid: Tid, prio: u8); +} + +/// Determine the priority of the current thread +#[inline(always)] +pub unsafe fn get_priority() -> Priority { + Priority::from(sys_get_priority()) +} + +/// Determine the priority of the current thread +#[inline(always)] +pub unsafe fn set_priority(tid: Tid, prio: Priority) { + sys_set_priority(tid, prio.into()); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..d4f845beab62fefab45b6cde76c934b1d53d2a78 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "73fca9e70090212fd6affb63e3907c55851ef71e" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..affbd3dbbe44bbecedf6bfd2e37062eb01e1458d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/CODE_OF_CONDUCT.md @@ -0,0 +1,49 @@ +# Contributor Covenant Code of Conduct + +*Note*: this Code of Conduct pertains to individuals' behavior. Please also see the [Organizational Code of Conduct][OCoC]. + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the Bytecode Alliance CoC team at [report@bytecodealliance.org](mailto:report@bytecodealliance.org). The CoC team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The CoC team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the Bytecode Alliance's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[OCoC]: https://github.com/sunfishcode/linux-raw-sys/blob/main/ORG_CODE_OF_CONDUCT.md +[homepage]: https://www.contributor-covenant.org +[version]: https://www.contributor-covenant.org/version/1/4/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/COPYRIGHT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/COPYRIGHT new file mode 100644 index 0000000000000000000000000000000000000000..719e3dbc85e2e7b0d2f902b01b311dfefc70138d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/COPYRIGHT @@ -0,0 +1,29 @@ +Short version for non-lawyers: + +`linux-raw-sys` is triple-licensed under Apache 2.0 with the LLVM Exception, +Apache 2.0, and MIT terms. + + +Longer version: + +Copyrights in the `linux-raw-sys` project are retained by their contributors. +No copyright assignment is required to contribute to the `linux-raw-sys` +project. + +Some files include code derived from Rust's `libstd`; see the comments in +the code for details. + +Except as otherwise noted (below and/or in individual files), `linux-raw-sys` +is licensed under: + + - the Apache License, Version 2.0, with the LLVM Exception + or + + - the Apache License, Version 2.0 + or + , + - or the MIT license + or + , + +at your option. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..f5fb72ff92e22b47993f5f200c2bfd0284c39f2d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.lock @@ -0,0 +1,30 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "libc" +version = "0.2.175" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +dependencies = [ + "libc", + "rustc-std-workspace-core", + "static_assertions", +] + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2442964df09be15c9dddc12668b8dbba940741ae --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml @@ -0,0 +1,121 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "linux-raw-sys" +version = "0.11.0" +authors = ["Dan Gohman "] +build = false +exclude = [ + "/gen", + "/.*", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Generated bindings for Linux's userspace API" +documentation = "https://docs.rs/linux-raw-sys" +readme = "README.md" +keywords = [ + "linux", + "uapi", + "ffi", +] +categories = ["external-ffi-bindings"] +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +repository = "https://github.com/sunfishcode/linux-raw-sys" + +[package.metadata.docs.rs] +features = [ + "default", + "bootparam", + "btrfs", + "elf_uapi", + "image", + "ioctl", + "landlock", + "netlink", + "io_uring", + "if_arp", + "if_ether", + "if_packet", + "net", + "ptrace", + "prctl", + "elf", + "xdp", + "mempolicy", + "system", + "loop_device", +] +targets = [ + "x86_64-unknown-linux-gnu", + "i686-unknown-linux-gnu", +] + +[features] +auxvec = [] +bootparam = [] +btrfs = [] +default = [ + "std", + "general", + "errno", +] +elf = [] +elf_uapi = [] +errno = [] +general = [] +if_arp = [] +if_ether = [] +if_packet = [] +image = [] +io_uring = [] +ioctl = [] +landlock = [] +loop_device = [] +mempolicy = [] +net = [] +netlink = [] +no_std = [] +prctl = [] +ptrace = [] +rustc-dep-of-std = [ + "core", + "no_std", +] +std = [] +system = [] +xdp = [] + +[lib] +name = "linux_raw_sys" +path = "src/lib.rs" + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dev-dependencies.libc] +version = "0.2.100" + +[dev-dependencies.static_assertions] +version = "1.1.0" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ['cfg(target_arch, values("xtensa"))'] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..79dd47195a25d3960e0b7628d80844c7cd6c2edf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/Cargo.toml.orig @@ -0,0 +1,59 @@ +[package] +name = "linux-raw-sys" +version = "0.11.0" +authors = ["Dan Gohman "] +description = "Generated bindings for Linux's userspace API" +documentation = "https://docs.rs/linux-raw-sys" +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +repository = "https://github.com/sunfishcode/linux-raw-sys" +edition = "2021" +keywords = ["linux", "uapi", "ffi"] +categories = ["external-ffi-bindings"] +exclude = ["/gen", "/.*"] +rust-version = "1.63" + +[dependencies] +core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" } + +[dev-dependencies] +static_assertions = "1.1.0" +libc = "0.2.100" + +[package.metadata.docs.rs] +features = ["default", "bootparam", "btrfs", "elf_uapi", "image", "ioctl", "landlock", "netlink", "io_uring", "if_arp", "if_ether", "if_packet", "net", "ptrace", "prctl", "elf", "xdp", "mempolicy", "system", "loop_device"] +targets = ["x86_64-unknown-linux-gnu", "i686-unknown-linux-gnu"] + +[lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = [ + 'cfg(target_arch, values("xtensa"))', +] + +# The rest of this file is auto-generated! +[features] +auxvec = [] +bootparam = [] +btrfs = [] +elf_uapi = [] +errno = [] +general = [] +if_arp = [] +if_ether = [] +if_packet = [] +image = [] +io_uring = [] +ioctl = [] +landlock = [] +loop_device = [] +mempolicy = [] +net = [] +netlink = [] +prctl = [] +ptrace = [] +system = [] +xdp = [] +default = ["std", "general", "errno"] +std = [] +no_std = [] +elf = [] +rustc-dep-of-std = ["core", "no_std"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-Apache-2.0_WITH_LLVM-exception b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-Apache-2.0_WITH_LLVM-exception new file mode 100644 index 0000000000000000000000000000000000000000..f9d81955f4bcb8f96a025e2ecc46f39ec536d465 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-Apache-2.0_WITH_LLVM-exception @@ -0,0 +1,220 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/ORG_CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/ORG_CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..6f4fb3f537d154768878020bbbb7fc2897956066 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/ORG_CODE_OF_CONDUCT.md @@ -0,0 +1,143 @@ +# Bytecode Alliance Organizational Code of Conduct (OCoC) + +*Note*: this Code of Conduct pertains to organizations' behavior. Please also see the [Individual Code of Conduct](CODE_OF_CONDUCT.md). + +## Preamble + +The Bytecode Alliance (BA) welcomes involvement from organizations, +including commercial organizations. This document is an +*organizational* code of conduct, intended particularly to provide +guidance to commercial organizations. It is distinct from the +[Individual Code of Conduct (ICoC)](CODE_OF_CONDUCT.md), and does not +replace the ICoC. This OCoC applies to any group of people acting in +concert as a BA member or as a participant in BA activities, whether +or not that group is formally incorporated in some jurisdiction. + +The code of conduct described below is not a set of rigid rules, and +we did not write it to encompass every conceivable scenario that might +arise. For example, it is theoretically possible there would be times +when asserting patents is in the best interest of the BA community as +a whole. In such instances, consult with the BA, strive for +consensus, and interpret these rules with an intent that is generous +to the community the BA serves. + +While we may revise these guidelines from time to time based on +real-world experience, overall they are based on a simple principle: + +*Bytecode Alliance members should observe the distinction between + public community functions and private functions — especially + commercial ones — and should ensure that the latter support, or at + least do not harm, the former.* + +## Guidelines + + * **Do not cause confusion about Wasm standards or interoperability.** + + Having an interoperable WebAssembly core is a high priority for + the BA, and members should strive to preserve that core. It is fine + to develop additional non-standard features or APIs, but they + should always be clearly distinguished from the core interoperable + Wasm. + + Treat the WebAssembly name and any BA-associated names with + respect, and follow BA trademark and branding guidelines. If you + distribute a customized version of software originally produced by + the BA, or if you build a product or service using BA-derived + software, use names that clearly distinguish your work from the + original. (You should still provide proper attribution to the + original, of course, wherever such attribution would normally be + given.) + + Further, do not use the WebAssembly name or BA-associated names in + other public namespaces in ways that could cause confusion, e.g., + in company names, names of commercial service offerings, domain + names, publicly-visible social media accounts or online service + accounts, etc. It may sometimes be reasonable, however, to + register such a name in a new namespace and then immediately donate + control of that account to the BA, because that would help the project + maintain its identity. + + For further guidance, see the BA Trademark and Branding Policy + [TODO: create policy, then insert link]. + + * **Do not restrict contributors.** If your company requires + employees or contractors to sign non-compete agreements, those + agreements must not prevent people from participating in the BA or + contributing to related projects. + + This does not mean that all non-compete agreements are incompatible + with this code of conduct. For example, a company may restrict an + employee's ability to solicit the company's customers. However, an + agreement must not block any form of technical or social + participation in BA activities, including but not limited to the + implementation of particular features. + + The accumulation of experience and expertise in individual persons, + who are ultimately free to direct their energy and attention as + they decide, is one of the most important drivers of progress in + open source projects. A company that limits this freedom may hinder + the success of the BA's efforts. + + * **Do not use patents as offensive weapons.** If any BA participant + prevents the adoption or development of BA technologies by + asserting its patents, that undermines the purpose of the + coalition. The collaboration fostered by the BA cannot include + members who act to undermine its work. + + * **Practice responsible disclosure** for security vulnerabilities. + Use designated, non-public reporting channels to disclose technical + vulnerabilities, and give the project a reasonable period to + respond, remediate, and patch. [TODO: optionally include the + security vulnerability reporting URL here.] + + Vulnerability reporters may patch their company's own offerings, as + long as that patching does not significantly delay the reporting of + the vulnerability. Vulnerability information should never be used + for unilateral commercial advantage. Vendors may legitimately + compete on the speed and reliability with which they deploy + security fixes, but withholding vulnerability information damages + everyone in the long run by risking harm to the BA project's + reputation and to the security of all users. + + * **Respect the letter and spirit of open source practice.** While + there is not space to list here all possible aspects of standard + open source practice, some examples will help show what we mean: + + * Abide by all applicable open source license terms. Do not engage + in copyright violation or misattribution of any kind. + + * Do not claim others' ideas or designs as your own. + + * When others engage in publicly visible work (e.g., an upcoming + demo that is coordinated in a public issue tracker), do not + unilaterally announce early releases or early demonstrations of + that work ahead of their schedule in order to secure private + advantage (such as marketplace advantage) for yourself. + + The BA reserves the right to determine what constitutes good open + source practices and to take action as it deems appropriate to + encourage, and if necessary enforce, such practices. + +## Enforcement + +Instances of organizational behavior in violation of the OCoC may +be reported by contacting the Bytecode Alliance CoC team at +[report@bytecodealliance.org](mailto:report@bytecodealliance.org). The +CoC team will review and investigate all complaints, and will respond +in a way that it deems appropriate to the circumstances. The CoC team +is obligated to maintain confidentiality with regard to the reporter of +an incident. Further details of specific enforcement policies may be +posted separately. + +When the BA deems an organization in violation of this OCoC, the BA +will, at its sole discretion, determine what action to take. The BA +will decide what type, degree, and duration of corrective action is +needed, if any, before a violating organization can be considered for +membership (if it was not already a member) or can have its membership +reinstated (if it was a member and the BA canceled its membership due +to the violation). + +In practice, the BA's first approach will be to start a conversation, +with punitive enforcement used only as a last resort. Violations +often turn out to be unintentional and swiftly correctable with all +parties acting in good faith. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b44ef22e72ee46f63d3580498fc444f632f9dbe9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/linux-raw-sys-0.11.0/README.md @@ -0,0 +1,44 @@ +

+

linux-raw-sys

+ +

+ Generated bindings for Linux's userspace API +

+ +

+ Github Actions CI Status + zulip chat + crates.io page + docs.rs docs +

+
+ +This crate contains bindgen-generated bindings for Linux's userspace API. + +This is primarily of interest if you want to make raw system calls directly, +which is tedious and error prone and not necessary for most use cases. For a +minimal type-safe, memory-safe, and I/O-safe API to the Linux system calls +built on these bindings, see the [rustix crate]. + +The full bindings are quite large, so they've been split up into modules and +cargo features. By default, `general` and `errno` are enabled, which provide +most things needed by general-purpose code. + +To regenerate the generated bindings, run `cargo update && cd gen && cargo run --release`. + +## Similar crates + +This is similar to [linux-sys], except the bindings are generated offline, +rather than in a build.rs, making downstream builds simpler. And, this crate +has bindings for more headers, as well as supplementary definitions not +exported by Linux's headers but nonetheless needed by userspace. + +# Minimum Supported Rust Version (MSRV) + +This crate currently works on the version of [Rust on Debian stable], which is +currently Rust 1.63. This policy may change in the future, in minor version +releases, so users using a fixed version of Rust should pin to a specific +version of this crate. + +[linux-sys]: https://crates.io/crates/linux-sys +[rustix crate]: https://github.com/bytecodealliance/rustix#linux-raw-syscall-support diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..6d478c2bde4533e7eb1f044ff7935fc28a1283b4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/lib.rs @@ -0,0 +1,10 @@ +//! > Polyfill for `OnceCell` stdlib feature for use with older MSRVs + +#![warn(clippy::print_stderr)] +#![warn(clippy::print_stdout)] + +pub mod sync; + +#[doc = include_str!("../README.md")] +#[cfg(doctest)] +pub struct ReadmeDoctests; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/sync/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/sync/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a580157dab4b56c743b169d60f0b03cb38c99b6a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/once_cell_polyfill-1.70.2/src/sync/mod.rs @@ -0,0 +1,41 @@ +#[derive(Debug, Clone, Default, Eq, PartialEq)] +pub struct OnceLock(std::sync::OnceLock); + +impl OnceLock { + pub const fn new() -> Self { + Self(std::sync::OnceLock::new()) + } + + pub fn get(&self) -> Option<&T> { + self.0.get() + } + + pub fn get_mut(&mut self) -> Option<&mut T> { + self.0.get_mut() + } + + pub fn set(&self, value: T) -> Result<(), T> { + self.0.set(value) + } + + pub fn get_or_init(&self, f: F) -> &T + where + F: FnOnce() -> T, + { + self.0.get_or_init(f) + } + + pub fn into_inner(self) -> Option { + self.0.into_inner() + } + + pub fn take(&mut self) -> Option { + self.0.take() + } +} + +impl From for OnceLock { + fn from(value: T) -> Self { + Self(value.into()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..26975005966d324ac464de4f6eb8ea4d1ee1c7cf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "7d6157704805dcedf703229926938a71b1ddd0b1" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..139c48656ea3145c9d8b3f97dda172d3e92997be --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/.gitignore @@ -0,0 +1,5 @@ +target +Cargo.lock +*.bk +.*.swp +.idea/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/CHANGELOG.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..2372ee1cf5958254e1799d6a40139dba45d74463 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/CHANGELOG.md @@ -0,0 +1,51 @@ +# 3.0.6 + - switch from serde to serde_core + +# 3.0.5 + - updated base version in the docs + +# 3.0.4 + - updated base64 to 0.22.0 + +# 3.0.3 + - allow general whitespace separators instead of just newlines + +# 3.0.2 + - allow EncodeConfig to be built in a const context + +# 3.0.1 + - reduce allocations in `pem::encode` + +# 3.0.0 + - trim `proptest` features to prevent an MSRV break for testing + - make EncodeConfig struct extendable and add a line_wrap config option + +# 2.0.1 + + - Fix serde support on no\_std + - Drop MSRV to 1.60 + +# 2.0 + + - Add no\_std support + - Bump MSRV to 1.67 + - Refactor API to prevent direct modification and access of elements and to + allow access to the optional rfc1421-described headers. + +# 1.1.1 + - Allow PEM files to be parsed with the optional rfc1421-described headers + (although you cannot retrieve the headers) + +# 1.1.0 + - Add optional serde support + +# 1.0.2 + - Remove dependency on Regex in favor of a hand-rolled parser + +# 1.0.1 + + - hide the ASCII\_ARMOR symbol to work around a linking issue with 32-bit windows builds + +# 1.0 + + - `pem::parse_many` now returns a `Result>` instead of a `Vec` that silently discarded invalid sections. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..167fe152b2b00bdac75600d41312947d131fd7f9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.lock @@ -0,0 +1,680 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "bitflags 1.3.2", + "textwrap", + "unicode-width", +] + +[[package]] +name = "criterion" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +dependencies = [ + "atty", + "cast", + "clap", + "criterion-plot", + "csv", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.151" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "pem" +version = "3.0.6" +dependencies = [ + "base64", + "criterion", + "proptest", + "serde_core", + "serde_json", +] + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bitflags 2.4.1", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "unarray", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rayon" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "ryu" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-width" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" + +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e1bcfddc236142ba18999e4aac02846f4906b1c0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml @@ -0,0 +1,76 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60.0" +name = "pem" +version = "3.0.6" +authors = ["Jonathan Creekmore "] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Parse and encode PEM-encoded data." +homepage = "https://github.com/jcreekmore/pem-rs.git" +documentation = "https://docs.rs/pem/" +readme = "README.md" +keywords = [ + "no-std", + "no_std", + "pem", +] +categories = ["cryptography"] +license = "MIT" +repository = "https://github.com/jcreekmore/pem-rs.git" + +[badges.travis-ci] +repository = "jcreekmore/pem-rs" + +[features] +default = ["std"] +serde = ["dep:serde_core"] +std = [ + "base64/std", + "serde_core?/std", +] + +[lib] +name = "pem" +path = "src/lib.rs" + +[[bench]] +name = "pem_benchmark" +path = "benches/pem_benchmark.rs" +harness = false + +[dependencies.base64] +version = "0.22.0" +features = ["alloc"] +default-features = false + +[dependencies.serde_core] +version = "1" +optional = true +default-features = false + +[dev-dependencies.criterion] +version = "0.3.0" + +[dev-dependencies.proptest] +version = "1" +features = ["std"] +default-features = false + +[dev-dependencies.serde_json] +version = "1" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..1571a5ee037996d1f9da93c0ce1e42f85651c903 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/Cargo.toml.orig @@ -0,0 +1,49 @@ +[package] +authors = ["Jonathan Creekmore "] +description = "Parse and encode PEM-encoded data." +documentation = "https://docs.rs/pem/" +homepage = "https://github.com/jcreekmore/pem-rs.git" +license = "MIT" +name = "pem" +readme = "README.md" +repository = "https://github.com/jcreekmore/pem-rs.git" +version = "3.0.6" +categories = [ "cryptography" ] +keywords = [ + "no-std", + "no_std", + "pem", +] +edition = "2021" +rust-version = "1.60.0" + +[features] +default = ["std"] +std = [ + "base64/std", + # enable serde_core's std feature iff the serde and std features are both activated + "serde_core?/std", +] +serde = ["dep:serde_core"] + +[dependencies.base64] +version = "0.22.0" +default-features = false +features = ["alloc"] + +[dependencies.serde_core] +version = "1" +default-features = false +optional = true + +[dev-dependencies] +criterion = "0.3.0" +proptest = { version = "1", default-features = false, features = ["std"] } +serde_json = "1" + +[[bench]] +name = "pem_benchmark" +harness = false + +[badges] +travis-ci = { repository = "jcreekmore/pem-rs" } diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/LICENSE.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..8d7f438312d7a663a2142c5be8442b8428814582 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jonathan Creekmore + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca6bd9c9f0f003894f35f45c3e4116daebd92273 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/pem-3.0.6/README.md @@ -0,0 +1,40 @@ +pem +=== + +A Rust library for parsing and encoding PEM-encoded data. + +![Build Status](https://github.com/github/docs/actions/workflows/ci.yml/badge.svg) + + +### Documentation +[Module documentation with examples](https://docs.rs/pem/) + +### Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +pem = "3.0" +``` + +Here is a simple example that parse PEM-encoded data and prints the tag: + +```rust +use pem::parse; + +const SAMPLE: &'static str = "-----BEGIN RSA PRIVATE KEY----- +MIIBPQIBAAJBAOsfi5AGYhdRs/x6q5H7kScxA0Kzzqe6WI6gf6+tc6IvKQJo5rQc +dWWSQ0nRGt2hOPDO+35NKhQEjBQxPh/v7n0CAwEAAQJBAOGaBAyuw0ICyENy5NsO +2gkT00AWTSzM9Zns0HedY31yEabkuFvrMCHjscEF7u3Y6PB7An3IzooBHchsFDei +AAECIQD/JahddzR5K3A6rzTidmAf1PBtqi7296EnWv8WvpfAAQIhAOvowIXZI4Un +DXjgZ9ekuUjZN+GUQRAVlkEEohGLVy59AiEA90VtqDdQuWWpvJX0cM08V10tLXrT +TTGsEtITid1ogAECIQDAaFl90ZgS5cMrL3wCeatVKzVUmuJmB/VAmlLFFGzK0QIh +ANJGc7AFk4fyFD/OezhwGHbWmo/S+bfeAiIh2Ss2FxKJ +-----END RSA PRIVATE KEY----- +"; + +let pem = parse(SAMPLE)?; +println!("PEM tag: {}", pem.tag); + +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..cb5fae6ea68d40c200bc5c960e3350b816e94d9b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "cb7791f6814024d85aa6f9968e73051a4b295bf0" + }, + "path_in_vcs": "plotters" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..c92720caeb749ae6a461e8d8135f997131d1e03a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.lock @@ -0,0 +1,1438 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ab_glyph" +version = "0.2.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79faae4620f45232f599d9bc7b290f88247a0834162c4495ab2f02d60004adfb" +dependencies = [ + "ab_glyph_rasterizer", + "owned_ttf_parser", +] + +[[package]] +name = "ab_glyph_rasterizer" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046" + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "autocfg" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "bytemuck" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.52.6", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core-graphics" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c07782be35f9e1140080c6b96f0d44b739e2278479f64e02fdab4e32dfd8b081" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-graphics-types", + "foreign-types", + "libc", +] + +[[package]] +name = "core-graphics-types" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "libc", +] + +[[package]] +name = "core-text" +version = "20.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9d2790b5c08465d49f8dc05c8bcae9fea467855947db39b0f8145c091aaced5" +dependencies = [ + "core-foundation", + "core-graphics", + "foreign-types", + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters 0.3.6", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "dlib" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412" +dependencies = [ + "libloading", +] + +[[package]] +name = "dwrote" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da3498378ed373237bdef1eddcc64e7be2d3ba4841f4c22a998e81cadeea83c" +dependencies = [ + "lazy_static", + "libc", + "winapi", + "wio", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "fdeflate" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f9bfee30e4dedf0ab8b422f03af778d9612b63f502710fc500a334ebe2de645" +dependencies = [ + "simd-adler32", +] + +[[package]] +name = "flate2" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +dependencies = [ + "crc32fast", + "miniz_oxide 0.8.0", +] + +[[package]] +name = "float-ord" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" + +[[package]] +name = "font-kit" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b64b34f4efd515f905952d91bc185039863705592c0c53ae6d979805dd154520" +dependencies = [ + "bitflags 2.6.0", + "byteorder", + "core-foundation", + "core-graphics", + "core-text", + "dirs", + "dwrote", + "float-ord", + "freetype-sys", + "lazy_static", + "libc", + "log", + "pathfinder_geometry", + "pathfinder_simd", + "walkdir", + "winapi", + "yeslogic-fontconfig-sys", +] + +[[package]] +name = "foreign-types" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965" +dependencies = [ + "foreign-types-macros", + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "foreign-types-shared" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" + +[[package]] +name = "freetype-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7edc5b9669349acfda99533e9e0bcf26a51862ab43b08ee7745c55d28eb134" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gif" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045" +dependencies = [ + "color_quant", + "weezl", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "image" +version = "0.24.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +dependencies = [ + "bytemuck", + "byteorder", + "color_quant", + "jpeg-decoder", + "num-traits", + "png", +] + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minicov" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", + "simd-adler32", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oorandom" +version = "11.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "owned_ttf_parser" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490d3a563d3122bf7c911a59b0add9389e5ec0f5f0c3ac6b91ff235a0e6a7f90" +dependencies = [ + "ttf-parser 0.24.1", +] + +[[package]] +name = "pathfinder_geometry" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b7e7b4ea703700ce73ebf128e1450eb69c3a8329199ffbfb9b2a0418e5ad3" +dependencies = [ + "log", + "pathfinder_simd", +] + +[[package]] +name = "pathfinder_simd" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf07ef4804cfa9aea3b04a7bbdd5a40031dbb6b4f2cbaf2b011666c80c5b4f2" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "plotters" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters" +version = "0.3.7" +dependencies = [ + "ab_glyph", + "chrono", + "criterion", + "font-kit", + "image", + "itertools", + "lazy_static", + "num-traits", + "once_cell", + "pathfinder_geometry", + "plotters-backend", + "plotters-bitmap", + "plotters-svg", + "rand", + "rand_distr", + "rand_xorshift", + "rayon", + "serde", + "serde_derive", + "serde_json", + "ttf-parser 0.20.0", + "wasm-bindgen", + "wasm-bindgen-test", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" + +[[package]] +name = "plotters-bitmap" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ce181e3f6bf82d6c1dc569103ca7b1bd964c60ba03d7e6cdfbb3e3eb7f7405" +dependencies = [ + "gif", + "image", + "plotters-backend", +] + +[[package]] +name = "plotters-svg" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +dependencies = [ + "image", + "plotters-backend", +] + +[[package]] +name = "png" +version = "0.17.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1" +dependencies = [ + "bitflags 1.3.2", + "crc32fast", + "fdeflate", + "flate2", + "miniz_oxide 0.7.4", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom", + "libredox", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + +[[package]] +name = "serde" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + +[[package]] +name = "syn" +version = "2.0.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thiserror" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "ttf-parser" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17f77d76d837a7830fe1d4f12b7b4ba4192c1888001c7164257e4bc6d21d96b4" + +[[package]] +name = "ttf-parser" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be21190ff5d38e8b4a2d3b6a3ae57f612cc39c96e83cedeaf7abc338a8bac4a" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "minicov", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "weezl" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wio" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5" +dependencies = [ + "winapi", +] + +[[package]] +name = "yeslogic-fontconfig-sys" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "503a066b4c037c440169d995b869046827dbc71263f6e8f3be6d77d4f3229dbd" +dependencies = [ + "dlib", + "once_cell", + "pkg-config", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..530924a35f70ec991ec24f88a0a81f182136325c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml @@ -0,0 +1,329 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "plotters" +version = "0.3.7" +authors = ["Hao Hou "] +build = false +exclude = [ + "doc-template", + "plotters-doc-data", +] +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A Rust drawing library focus on data plotting for both WASM and native applications" +homepage = "https://plotters-rs.github.io/" +readme = "README.md" +keywords = [ + "WebAssembly", + "Visualization", + "Plotting", + "Drawing", +] +categories = [ + "visualization", + "wasm", +] +license = "MIT" +repository = "https://github.com/plotters-rs/plotters" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "doc_cfg", +] + +[lib] +name = "plotters" +path = "src/lib.rs" + +[[example]] +name = "3d-plot" +path = "examples/3d-plot.rs" + +[[example]] +name = "3d-plot2" +path = "examples/3d-plot2.rs" + +[[example]] +name = "animation" +path = "examples/animation.rs" + +[[example]] +name = "area-chart" +path = "examples/area-chart.rs" + +[[example]] +name = "blit-bitmap" +path = "examples/blit-bitmap.rs" + +[[example]] +name = "boxplot" +path = "examples/boxplot.rs" + +[[example]] +name = "chart" +path = "examples/chart.rs" + +[[example]] +name = "colormaps" +path = "examples/colormaps.rs" + +[[example]] +name = "console" +path = "examples/console.rs" + +[[example]] +name = "customized_coord" +path = "examples/customized_coord.rs" + +[[example]] +name = "errorbar" +path = "examples/errorbar.rs" + +[[example]] +name = "full_palette" +path = "examples/full_palette.rs" + +[[example]] +name = "histogram" +path = "examples/histogram.rs" + +[[example]] +name = "mandelbrot" +path = "examples/mandelbrot.rs" + +[[example]] +name = "matshow" +path = "examples/matshow.rs" + +[[example]] +name = "nested_coord" +path = "examples/nested_coord.rs" + +[[example]] +name = "normal-dist" +path = "examples/normal-dist.rs" + +[[example]] +name = "normal-dist2" +path = "examples/normal-dist2.rs" + +[[example]] +name = "pie" +path = "examples/pie.rs" + +[[example]] +name = "relative_size" +path = "examples/relative_size.rs" + +[[example]] +name = "sierpinski" +path = "examples/sierpinski.rs" + +[[example]] +name = "slc-temp" +path = "examples/slc-temp.rs" + +[[example]] +name = "snowflake" +path = "examples/snowflake.rs" + +[[example]] +name = "stock" +path = "examples/stock.rs" + +[[example]] +name = "tick_control" +path = "examples/tick_control.rs" + +[[example]] +name = "two-scales" +path = "examples/two-scales.rs" + +[[bench]] +name = "benchmark" +path = "benches/main.rs" +harness = false + +[dependencies.chrono] +version = "0.4.32" +optional = true + +[dependencies.num-traits] +version = "0.2.14" + +[dependencies.plotters-backend] +version = "0.3.6" + +[dependencies.plotters-bitmap] +version = "0.3.6" +optional = true +default-features = false + +[dependencies.plotters-svg] +version = "0.3.6" +optional = true + +[dev-dependencies.criterion] +version = "0.5.1" + +[dev-dependencies.itertools] +version = "0.10.0" + +[dev-dependencies.rayon] +version = "1.5.1" + +[dev-dependencies.serde] +version = "1.0.139" + +[dev-dependencies.serde_derive] +version = "1.0.140" + +[dev-dependencies.serde_json] +version = "1.0.82" + +[features] +ab_glyph = [ + "dep:ab_glyph", + "once_cell", +] +all_elements = [ + "errorbar", + "candlestick", + "boxplot", + "histogram", +] +all_series = [ + "area_series", + "line_series", + "point_series", + "surface_series", +] +area_series = [] +bitmap_backend = ["plotters-bitmap"] +bitmap_encoder = ["plotters-bitmap/image_encoder"] +bitmap_gif = ["plotters-bitmap/gif_backend"] +boxplot = [] +candlestick = [] +colormaps = [] +datetime = ["chrono"] +default = [ + "bitmap_backend", + "bitmap_encoder", + "bitmap_gif", + "svg_backend", + "chrono", + "ttf", + "image", + "deprecated_items", + "all_series", + "all_elements", + "full_palette", + "colormaps", +] +deprecated_items = [] +errorbar = [] +evcxr = ["svg_backend"] +evcxr_bitmap = [ + "evcxr", + "bitmap_backend", + "plotters-svg/bitmap_encoder", +] +fontconfig-dlopen = ["font-kit/source-fontconfig-dlopen"] +full_palette = [] +histogram = [] +line_series = [] +point_series = [] +surface_series = [] +svg_backend = ["plotters-svg"] +ttf = [ + "font-kit", + "ttf-parser", + "lazy_static", + "pathfinder_geometry", +] + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies.wasm-bindgen] +version = "0.2.89" + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies.web-sys] +version = "0.3.66" +features = [ + "Document", + "DomRect", + "Element", + "HtmlElement", + "Node", + "Window", + "HtmlCanvasElement", + "CanvasRenderingContext2d", +] + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dev-dependencies.wasm-bindgen-test] +version = "0.3.39" + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.ab_glyph] +version = "0.2.12" +optional = true + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.font-kit] +version = "0.14.2" +optional = true + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.image] +version = "0.24.3" +features = [ + "jpeg", + "png", + "bmp", +] +optional = true +default-features = false + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.lazy_static] +version = "1.4.0" +optional = true + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.once_cell] +version = "1.8.0" +optional = true + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.pathfinder_geometry] +version = "0.5.1" +optional = true + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.ttf-parser] +version = "0.20.0" +optional = true + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.rand] +version = "0.8.3" + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.rand_distr] +version = "0.4.0" + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.rand_xorshift] +version = "0.3.0" + +[lints.rust.deprecated] +level = "allow" +priority = 0 + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(doc_cfg)"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..35a298a7808c74f4ab2dfaa7c071c61a045c0fe5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/Cargo.toml.orig @@ -0,0 +1,144 @@ +[package] +name = "plotters" +version = "0.3.7" +authors = ["Hao Hou "] +edition = "2018" +license = "MIT" +msrv = "1.56" +description = "A Rust drawing library focus on data plotting for both WASM and native applications" +repository = "https://github.com/plotters-rs/plotters" +homepage = "https://plotters-rs.github.io/" +keywords = ["WebAssembly", "Visualization", "Plotting", "Drawing"] +categories = ["visualization", "wasm"] +readme = "../README.md" +exclude = ["doc-template", "plotters-doc-data"] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(doc_cfg)'] } +deprecated = { level = "allow" } + +[dependencies] +num-traits = "0.2.14" +chrono = { version = "0.4.32", optional = true } + +[dependencies.plotters-backend] +version = "0.3.6" +path = "../plotters-backend" + +[dependencies.plotters-bitmap] +version = "0.3.6" +default-features = false +optional = true +path = "../plotters-bitmap" + +[dependencies.plotters-svg] +version = "0.3.6" +optional = true +path = "../plotters-svg" + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies] +ttf-parser = { version = "0.20.0", optional = true } +lazy_static = { version = "1.4.0", optional = true } +pathfinder_geometry = { version = "0.5.1", optional = true } +font-kit = { version = "0.14.2", optional = true } +ab_glyph = { version = "0.2.12", optional = true } +once_cell = { version = "1.8.0", optional = true } + + +[target.'cfg(not(all(target_arch = "wasm32", not(target_os = "wasi"))))'.dependencies.image] +version = "0.24.3" +optional = true +default-features = false +features = ["jpeg", "png", "bmp"] + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies.wasm-bindgen] +version = "0.2.89" + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies.web-sys] +version = "0.3.66" +features = [ + "Document", + "DomRect", + "Element", + "HtmlElement", + "Node", + "Window", + "HtmlCanvasElement", + "CanvasRenderingContext2d", +] + +[features] +default = [ + "bitmap_backend", "bitmap_encoder", "bitmap_gif", + "svg_backend", + "chrono", + "ttf", + "image", + "deprecated_items", "all_series", "all_elements", + "full_palette", + "colormaps" +] +all_series = ["area_series", "line_series", "point_series", "surface_series"] +all_elements = ["errorbar", "candlestick", "boxplot", "histogram"] + +# Tier 1 Backends +bitmap_backend = ["plotters-bitmap"] +bitmap_encoder = ["plotters-bitmap/image_encoder"] +bitmap_gif = ["plotters-bitmap/gif_backend"] +svg_backend = ["plotters-svg"] + +# Colors +full_palette = [] +colormaps = [] + +# Elements +errorbar = [] +candlestick = [] +boxplot = [] + +# Series +histogram = [] +area_series = [] +line_series = [] +point_series = [] +surface_series = [] + +# Font implementation +ttf = ["font-kit", "ttf-parser", "lazy_static", "pathfinder_geometry"] +# dlopen fontconfig C library at runtime instead of linking at build time +# Can be useful for cross compiling, especially considering fontconfig has lots of C dependencies +fontconfig-dlopen = ["font-kit/source-fontconfig-dlopen"] + +ab_glyph = ["dep:ab_glyph", "once_cell"] + +# Misc +datetime = ["chrono"] +evcxr = ["svg_backend"] +evcxr_bitmap = ["evcxr", "bitmap_backend", "plotters-svg/bitmap_encoder"] +deprecated_items = [] # Keep some of the deprecated items for backward compatibility + +[dev-dependencies] +itertools = "0.10.0" +criterion = "0.5.1" +rayon = "1.5.1" +serde_json = "1.0.82" +serde = "1.0.139" +serde_derive = "1.0.140" + +[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] +rand = "0.8.3" +rand_distr = "0.4.0" +rand_xorshift = "0.3.0" + +[target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dev-dependencies] +wasm-bindgen-test = "0.3.39" + +[[bench]] +name = "benchmark" +harness = false +path = "benches/main.rs" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "doc_cfg"] + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/LICENSE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ea5b60640b01f74e295037aa8a6b7d4ea278a739 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/README.md new file mode 100644 index 0000000000000000000000000000000000000000..32d46ee883b58d6a383eed06eb98f33aa6530ded --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-0.3.7/README.md @@ -0,0 +1 @@ +../README.md \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..1a9b20f96cdbebcf51e2d0a189cf80c17ef247a3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "cb7791f6814024d85aa6f9968e73051a4b295bf0" + }, + "path_in_vcs": "plotters-backend" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..09ed859e7c144e197286bdec24b432324fa429bc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml @@ -0,0 +1,32 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "plotters-backend" +version = "0.3.7" +authors = ["Hao Hou "] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Plotters Backend API" +homepage = "https://plotters-rs.github.io" +readme = "README.md" +license = "MIT" +repository = "https://github.com/plotters-rs/plotters" + +[lib] +name = "plotters_backend" +path = "src/lib.rs" + +[dependencies] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..aad934c2153db37b8446b6d864b7329ee7b7f177 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/Cargo.toml.orig @@ -0,0 +1,14 @@ +[package] +name = "plotters-backend" +version = "0.3.7" +authors = ["Hao Hou "] +edition = "2018" +license = "MIT" +description = "Plotters Backend API" +homepage = "https://plotters-rs.github.io" +repository = "https://github.com/plotters-rs/plotters" +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/LICENSE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ea5b60640b01f74e295037aa8a6b7d4ea278a739 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/LICENSE @@ -0,0 +1 @@ +../LICENSE \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/README.md new file mode 100644 index 0000000000000000000000000000000000000000..871fc7cee9d3e7a91b67bc3890067811da98f2ba --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/plotters-backend-0.3.7/README.md @@ -0,0 +1,8 @@ +# plotters-backend - The base crate for implementing a backend for Plotters + +This is a part of plotters project. For more details, please check the following links: + +- For high-level intro of Plotters, see: [Plotters on crates.io](https://crates.io/crates/plotters) +- Check the main repo at [Plotters repo](https://github.com/plotters-rs/plotters.git) +- For detailed documentation about this crate, check [plotters-backend on docs.rs](https://docs.rs/plotters-backend/) +- You can also visit Plotters [Homepage](https://plotters-rs.github.io) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..63eb5fb8231eb2647f1aa3759fa497e69c3562f6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "59cbe84d1ad1d8249adf5d3a282e2fe490379c85" + }, + "path_in_vcs": "portable-atomic-util" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/CHANGELOG.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..826c7502198eb2436553d724889d2d2f9019e9ee --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/CHANGELOG.md @@ -0,0 +1,127 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org). + +Releases may yanked if there is a security bug, a soundness bug, or a regression. + + + +## [Unreleased] + +## [0.2.5] - 2026-01-31 + +- Add `Arc::{new_zeroed,new_zeroed_slice}` at Rust 1.36+. (align to the [std `Arc` change in Rust 1.92](https://github.com/rust-lang/rust/pull/144091)) ([f8affb6](https://github.com/taiki-e/portable-atomic/commit/f8affb661abb1ac928cd78a66bf8fe1e72c42e42), [ae5aba7](https://github.com/taiki-e/portable-atomic/commit/ae5aba7cbe182b5d83b4b4973ccdaab372bef4d6)) + +- Implement `Default` for `Pin: Default>`. (align to the [std `Arc` change in Rust 1.91](https://github.com/rust-lang/rust/pull/143717)) ([2d8d33c](https://github.com/taiki-e/portable-atomic/commit/2d8d33c127126e9d290c6f368f54c2a784b43b57)) + +- Implement `From<&mut {[T],str}>` for `Arc<{[T],str}>` at Rust 1.36+. (align to the [std `Arc` change in Rust 1.84](https://github.com/rust-lang/rust/pull/129329)) ([99640d6](https://github.com/taiki-e/portable-atomic/commit/99640d656fb93c54e3fc255b820f61653d6c425e), [ae5aba7](https://github.com/taiki-e/portable-atomic/commit/ae5aba7cbe182b5d83b4b4973ccdaab372bef4d6)) + +- Implement `{AsFd, AsRawFd}` for `Arc` on Trusty. ([1b09ffb](https://github.com/taiki-e/portable-atomic/commit/1b09ffbc3010dc64b16e4fdf39742caee933ec7d)) + +- Support slice-related methods that previously required Rust 1.44+ at Rust 1.36+. ([ae5aba7](https://github.com/taiki-e/portable-atomic/commit/ae5aba7cbe182b5d83b4b4973ccdaab372bef4d6)) + +- Support `AsRawFd for Arc` implementation on Unix in all Rust versions. Previously, it was only for Rust 1.63+. ([1b09ffb](https://github.com/taiki-e/portable-atomic/commit/1b09ffbc3010dc64b16e4fdf39742caee933ec7d)) + +- Fix build error when building for HermitOS with `std` feature in Rust 1.63-1.68. ([1b09ffb](https://github.com/taiki-e/portable-atomic/commit/1b09ffbc3010dc64b16e4fdf39742caee933ec7d)) + +- Documentation improvements. + +- Enable [release immutability](https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/immutable-releases). + +## [0.2.4] - 2024-11-23 + +- Add unstable `portable_atomic_unstable_coerce_unsized` cfg (requires Rust nightly). ([#195](https://github.com/taiki-e/portable-atomic/pull/195), thanks @brodycj) + +- Respect [`RUSTC_BOOTSTRAP=-1` recently added in nightly](https://github.com/rust-lang/rust/pull/132993) in rustc version detection. ([5b2847a](https://github.com/taiki-e/portable-atomic/commit/5b2847a8b99aa2a57a6c80f5a47327b2764f08cc)) + +## [0.2.3] - 2024-10-17 + +- Add `Arc::{new_uninit,assume_init}` at Rust 1.36+ and `Arc::new_uninit_slice` at Rust 1.44+. (align to the [std `Arc` change in Rust 1.82](https://github.com/rust-lang/rust/pull/129401)) ([362dc9a](https://github.com/taiki-e/portable-atomic/commit/362dc9af2779c81aa346e89c4d3f3eef71cf29ed)) + +- Support `make_mut` on `Arc<[T]>` and `Arc` at Rust 1.36+. (align to the [std `Arc` change in Rust 1.81](https://github.com/rust-lang/rust/pull/116113)) ([362dc9a](https://github.com/taiki-e/portable-atomic/commit/362dc9af2779c81aa346e89c4d3f3eef71cf29ed)) + +## [0.2.2] - 2024-07-11 + +- Fix [build issue with `esp` toolchain](https://github.com/taiki-e/semihosting/issues/11). ([f8ea85e](https://github.com/taiki-e/portable-atomic/commit/f8ea85e1aa46fa00bc865633fb40b05f8a0c823b)) + +## [0.2.1] - 2024-06-22 + +**Note:** This release has been yanked due to an issue fixed in 0.2.2. + +- Support `impl Error for Arc` in no-std at Rust 1.81+. ([30b9f90](https://github.com/taiki-e/portable-atomic/commit/30b9f90346dfad14ab00f1c7e1f988f941330bcf)) + +- Implement `Default` for `Arc<[T]>` and `Arc` at Rust 1.51+. (align to the [std `Arc` change in Rust 1.80](https://github.com/rust-lang/rust/pull/124640)) ([c6ee296](https://github.com/taiki-e/portable-atomic/commit/c6ee29606984863d008c2cf2209751ed0fa43b14)) + +- Implement `{AsFd, AsRawFd}` for `Arc` on HermitOS. ([b778244](https://github.com/taiki-e/portable-atomic/commit/b778244917e17bfc431c9add4d028ff26d00e3b7)) + +## [0.2.0] - 2024-05-07 + +- Rewrite `Arc` based on `std::sync::Arc`'s implementation. ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + + This fixes accidental API differences with std ([#139](https://github.com/taiki-e/portable-atomic/issues/139), [#140](https://github.com/taiki-e/portable-atomic/issues/140)) and adds many missing APIs compared to std: + - Add `Arc::{downcast, into_inner, make_mut, new_cyclic}` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{fmt::Display, fmt::Pointer, Error, From, From>, From>, AsFd, AsRawFd, AsHandle, AsSocket}` for `Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142), [78690d7](https://github.com/taiki-e/portable-atomic/commit/78690d7cad3b394119ea147c5773f67806a6ac09), [aba0930](https://github.com/taiki-e/portable-atomic/commit/aba0930269d7075b81810b49bbbbb6c5edc85ea0)) + - Implement `{From<&[T]>, From>, From<[T; N]>, FromIterator}` for `Arc<[T]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142), [5e9f693](https://github.com/taiki-e/portable-atomic/commit/5e9f693dcb43c35187ca95ce1c824e0cb1d3c4f8)) + - Implement `TryFrom>` for `Arc<[T; N]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `From>` for `Arc<[u8]>` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{From<&str>, From}` for `Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + - Implement `{Read, Write, Seek}` for `Arc` ([591ece5](https://github.com/taiki-e/portable-atomic/commit/591ece5bde0f19f1895853791924ee55c51ee61e)) + - Remove `T: UnwindSafe` bound from `impl UnwindSafe for Arc` ([#142](https://github.com/taiki-e/portable-atomic/pull/142)) + +- Add `task::Wake`. ([#145](https://github.com/taiki-e/portable-atomic/pull/145)) + + This is equivalent to `std::task::Wake`, but using `portable_atomic_util::Arc` as a reference-counted pointer. + +- Respect `RUSTC_WRAPPER` in rustc version detection. + +## [0.1.5] - 2023-12-17 + +- Improve offset calculation in `Arc::{into_raw,as_ptr,from_ptr}`. ([#141](https://github.com/taiki-e/portable-atomic/pull/141), thanks @gtsiam) + +## [0.1.4] - 2023-12-16 + +- Fix a bug where `Arc::{into_raw,as_ptr}` returned invalid pointers for larger alignment types. ([#138](https://github.com/taiki-e/portable-atomic/pull/138), thanks @notgull) + +## [0.1.3] - 2023-05-06 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Enable `portable-atomic`'s `require-cas` feature to display helpful error messages to users on targets requiring additional action on the user side to provide atomic CAS. ([#100](https://github.com/taiki-e/portable-atomic/pull/100)) + +## [0.1.2] - 2023-04-04 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Implement `AsRef`, `Borrow`, and `Unpin` on `Arc`. ([#92](https://github.com/taiki-e/portable-atomic/pull/92) [#93](https://github.com/taiki-e/portable-atomic/pull/93), thanks @notgull) + +## [0.1.1] - 2023-03-24 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +- Prevent weak counter overflow in `Arc::downgrade`. ([#83](https://github.com/taiki-e/portable-atomic/pull/83)) + + This fixes [a potential unsoundness recently found in the standard library's `Arc`](https://github.com/rust-lang/rust/issues/108706). + +## [0.1.0] - 2023-01-15 + +**Note:** This release has been yanked due to a bug fixed in 0.1.4. + +Initial release + +[Unreleased]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.5...HEAD +[0.2.5]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.4...portable-atomic-util-0.2.5 +[0.2.4]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.3...portable-atomic-util-0.2.4 +[0.2.3]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.2...portable-atomic-util-0.2.3 +[0.2.2]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.1...portable-atomic-util-0.2.2 +[0.2.1]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.2.0...portable-atomic-util-0.2.1 +[0.2.0]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.5...portable-atomic-util-0.2.0 +[0.1.5]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.4...portable-atomic-util-0.1.5 +[0.1.4]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.3...portable-atomic-util-0.1.4 +[0.1.3]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.2...portable-atomic-util-0.1.3 +[0.1.2]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.1...portable-atomic-util-0.1.2 +[0.1.1]: https://github.com/taiki-e/portable-atomic/compare/portable-atomic-util-0.1.0...portable-atomic-util-0.1.1 +[0.1.0]: https://github.com/taiki-e/portable-atomic/releases/tag/portable-atomic-util-0.1.0 diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..6d5a40fe3e65720827f2fc95d42e7830ba3396d4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.lock @@ -0,0 +1,23 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "build-context" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "portable-atomic" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "portable-atomic-util" +version = "0.2.5" +dependencies = [ + "build-context 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "portable-atomic 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum build-context 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "86610cb1e9d45d65a31b574f9d69de003a76b6bb0b7d882396a5153fc547c935" +"checksum portable-atomic 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..baef814bf59d05fda691ced4c498a02fddabd3f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml @@ -0,0 +1,203 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.34" +name = "portable-atomic-util" +version = "0.2.5" +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Synchronization primitives built with portable-atomic. +""" +readme = "README.md" +keywords = ["atomic"] +categories = [ + "concurrency", + "data-structures", + "embedded", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/taiki-e/portable-atomic" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] + +[package.metadata.cargo_check_external_types] +allowed_external_types = [] + +[features] +alloc = [] +default = [] +std = ["alloc"] + +[lib] +name = "portable_atomic_util" +path = "src/lib.rs" +doc-scrape-examples = false + +[[test]] +name = "arc" +path = "tests/arc.rs" + +[dependencies.portable-atomic] +version = "1.5.1" +features = ["require-cas"] +default-features = false + +[dev-dependencies.build-context] +version = "0.1" + +[lints.clippy] +all = "warn" +as_ptr_cast_mut = "warn" +as_underscore = "warn" +default_union_representation = "warn" +inline_asm_x86_att_syntax = "warn" +pedantic = "warn" +trailing_empty_array = "warn" +transmute_undefined_repr = "warn" +undocumented_unsafe_blocks = "warn" +unused_trait_names = "warn" + +[lints.clippy.bool_assert_comparison] +level = "allow" +priority = 1 + +[lints.clippy.borrow_as_ptr] +level = "allow" +priority = 1 + +[lints.clippy.cast_lossless] +level = "allow" +priority = 1 + +[lints.clippy.declare_interior_mutable_const] +level = "allow" +priority = 1 + +[lints.clippy.doc_markdown] +level = "allow" +priority = 1 + +[lints.clippy.float_cmp] +level = "allow" +priority = 1 + +[lints.clippy.incompatible_msrv] +level = "allow" +priority = 1 + +[lints.clippy.lint_groups_priority] +level = "allow" +priority = 1 + +[lints.clippy.manual_assert] +level = "allow" +priority = 1 + +[lints.clippy.manual_range_contains] +level = "allow" +priority = 1 + +[lints.clippy.missing_errors_doc] +level = "allow" +priority = 1 + +[lints.clippy.module_name_repetitions] +level = "allow" +priority = 1 + +[lints.clippy.naive_bytecount] +level = "allow" +priority = 1 + +[lints.clippy.nonminimal_bool] +level = "allow" +priority = 1 + +[lints.clippy.range_plus_one] +level = "allow" +priority = 1 + +[lints.clippy.similar_names] +level = "allow" +priority = 1 + +[lints.clippy.single_match] +level = "allow" +priority = 1 + +[lints.clippy.single_match_else] +level = "allow" +priority = 1 + +[lints.clippy.struct_excessive_bools] +level = "allow" +priority = 1 + +[lints.clippy.struct_field_names] +level = "allow" +priority = 1 + +[lints.clippy.too_many_arguments] +level = "allow" +priority = 1 + +[lints.clippy.too_many_lines] +level = "allow" +priority = 1 + +[lints.clippy.type_complexity] +level = "allow" +priority = 1 + +[lints.clippy.unreadable_literal] +level = "allow" +priority = 1 + +[lints.rust] +deprecated_safe = "warn" +improper_ctypes = "warn" +improper_ctypes_definitions = "warn" +non_ascii_idents = "warn" +rust_2018_idioms = "warn" +single_use_lifetimes = "warn" +unnameable_types = "warn" +unreachable_pub = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + 'cfg(target_arch,values("xtensa"))', + 'cfg(target_arch,values("amdgpu"))', + 'cfg(target_arch,values("loongarch32"))', + 'cfg(target_os,values("trusty"))', + 'cfg(target_os,values("psx"))', + 'cfg(target_env,values("psx"))', + 'cfg(target_feature,values("lse2","lse128","rcpc3"))', + 'cfg(target_feature,values("quadword-atomics"))', + 'cfg(target_feature,values("zaamo","zabha"))', + 'cfg(target_feature,values("zacas"))', + 'cfg(target_feature,values("miscellaneous-extensions-3"))', + 'cfg(target_pointer_width,values("128"))', + "cfg(portable_atomic_no_outline_atomics,portable_atomic_outline_atomics,portable_atomic_unstable_f16,portable_atomic_unstable_f128)", + "cfg(portable_atomic_unstable_coerce_unsized)", + "cfg(portable_atomic_test_detect_false,portable_atomic_test_no_std_static_assert_ffi,qemu,valgrind)", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..b6641a7a285520d3dcb6a5fff0bc8ddc2799a9eb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/Cargo.toml.orig @@ -0,0 +1,52 @@ +[package] +name = "portable-atomic-util" +version = "0.2.5" #publish:version +edition = "2018" +rust-version = "1.34" # Align to portable-atomic +license = "Apache-2.0 OR MIT" +repository = "https://github.com/taiki-e/portable-atomic" +keywords = ["atomic"] +categories = ["concurrency", "data-structures", "embedded", "no-std"] +description = """ +Synchronization primitives built with portable-atomic. +""" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] + +[package.metadata.cargo_check_external_types] +# The following are external types that are allowed to be exposed in our public API. +allowed_external_types = [ +] + +[lib] +doc-scrape-examples = false + +[features] +default = [] + +# Use `std`. +# +# Note: +# - This implicitly enables the `alloc` feature. +std = ["alloc"] + +# Use `alloc`. +# +# Note: +# - The MSRV when this feature is enabled and the `std` feature is *not* enabled is Rust 1.36 that `alloc` crate stabilized. +alloc = [] + +# TODO: https://github.com/taiki-e/portable-atomic/issues/1 +# # Provides generic `atomic` type. +# generic = [] + +[dependencies] +portable-atomic = { version = "1.5.1", path = "..", default-features = false, features = ["require-cas"] } + +[dev-dependencies] +build-context = "0.1" + +[lints] +workspace = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..f433b1a53f5b830a205fd2df78e2b34974656c7b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/README.md new file mode 100644 index 0000000000000000000000000000000000000000..61a59ecc9e87785b61a476d3dbb103a138a30b69 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/README.md @@ -0,0 +1,77 @@ +# portable-atomic-util + +[![crates.io](https://img.shields.io/crates/v/portable-atomic-util?style=flat-square&logo=rust)](https://crates.io/crates/portable-atomic-util) +[![docs.rs](https://img.shields.io/badge/docs.rs-portable--atomic--util-blue?style=flat-square&logo=docs.rs)](https://docs.rs/portable-atomic-util) +[![license](https://img.shields.io/badge/license-Apache--2.0_OR_MIT-blue?style=flat-square)](#license) +[![msrv](https://img.shields.io/badge/msrv-1.34-blue?style=flat-square&logo=rust)](https://www.rust-lang.org) +[![github actions](https://img.shields.io/github/actions/workflow/status/taiki-e/portable-atomic/ci.yml?branch=main&style=flat-square&logo=github)](https://github.com/taiki-e/portable-atomic/actions) + + + +Synchronization primitives built with [portable-atomic]. + +- Provide `Arc`. (optional, requires the `std` or `alloc` feature) +- Provide `task::Wake`. (optional, requires the `std` or `alloc` feature) + + +See [#1] for other primitives being considered for addition to this crate. + +## Optional features + +- **`std`**
+ Use `std`. + + Note: + - This implicitly enables the `alloc` feature. + +- **`alloc`**
+ Use `alloc`. + + Note: + - The MSRV when this feature is enabled and the `std` feature is *not* enabled is Rust 1.36 that `alloc` crate stabilized. + + + +[portable-atomic]: https://github.com/taiki-e/portable-atomic +[#1]: https://github.com/taiki-e/portable-atomic/issues/1 + +## Optional cfg + +One of the ways to enable cfg is to set [rustflags in the cargo config](https://doc.rust-lang.org/cargo/reference/config.html#targettriplerustflags): + +```toml +# .cargo/config.toml +[target.] +rustflags = ["--cfg", "portable_atomic_unstable_coerce_unsized"] +``` + +Or set environment variable: + +```sh +RUSTFLAGS="--cfg portable_atomic_unstable_coerce_unsized" cargo ... +``` + +- **`--cfg portable_atomic_unstable_coerce_unsized`**
+ Support coercing of `Arc` to `Arc` as in `std::sync::Arc`. + + + + This cfg requires Rust nightly because this coercing requires [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html). + + See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569) for another known workaround. + + **Note:** This cfg is unstable and outside of the normal semver guarantees and minor or patch versions of portable-atomic-util may make breaking changes to them at any time. + + + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf4e86a11dcb420660326ce2b552d9a3676fa60f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/build.rs @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// The rustc-cfg emitted by the build script are *not* public API. + +// version.rs is shared with portable-atomic's build script, and portable-atomic-util only uses a part of it. +#[allow(dead_code)] +#[path = "version.rs"] +mod version; + +use std::env; + +use self::version::{Version, rustc_version}; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=version.rs"); + + let version = match rustc_version() { + Some(version) => version, + None => { + if env::var_os("PORTABLE_ATOMIC_DENY_WARNINGS").is_some() { + panic!("unable to determine rustc version") + } + println!( + "cargo:warning={}: unable to determine rustc version; assuming latest stable rustc (1.{})", + env!("CARGO_PKG_NAME"), + Version::LATEST.minor + ); + Version::LATEST + } + }; + + if version.minor >= 80 { + // Custom cfgs set by build script. Not public API. + // grep -F 'cargo:rustc-cfg=' portable-atomic-util/build.rs | grep -Ev '^ *//' | sed -E 's/^.*cargo:rustc-cfg=//; s/(=\\)?".*$//' | LC_ALL=C sort -u | tr '\n' ',' | sed -E 's/,$/\n/' + println!( + "cargo:rustc-check-cfg=cfg(portable_atomic_no_alloc,portable_atomic_no_core_unwind_safe,portable_atomic_no_error_in_core,portable_atomic_no_futures_api,portable_atomic_no_io_safety,portable_atomic_no_io_vec,portable_atomic_no_maybe_uninit,portable_atomic_no_min_const_generics,portable_atomic_no_strict_provenance,portable_atomic_no_track_caller,portable_atomic_no_unsafe_op_in_unsafe_fn,portable_atomic_sanitize_thread)" + ); + } + + // Note that cfgs are `no_`*, not `has_*`. This allows treating as the latest + // stable rustc is used when the build script doesn't run. This is useful + // for non-cargo build systems that don't run the build script. + + // alloc stabilized in Rust 1.36 (nightly-2019-04-15) https://github.com/rust-lang/rust/pull/59675 + if !version.probe(36, 2019, 4, 14) { + println!("cargo:rustc-cfg=portable_atomic_no_alloc"); + } + // std::{future,task} stabilized in Rust 1.36 (nightly-2019-04-25) https://github.com/rust-lang/rust/pull/59739 + if !version.probe(36, 2019, 4, 24) { + println!("cargo:rustc-cfg=portable_atomic_no_futures_api"); + } + // {read,write}_vectored stabilized in Rust 1.36 (nightly-2019-04-30) https://github.com/rust-lang/rust/pull/60334 + if !version.probe(36, 2019, 4, 29) { + println!("cargo:rustc-cfg=portable_atomic_no_io_vec"); + } + // MaybeUninit stabilized in Rust 1.36 (nightly-2019-05-21) https://github.com/rust-lang/rust/pull/60445 + if !version.probe(36, 2019, 5, 20) { + println!("cargo:rustc-cfg=portable_atomic_no_maybe_uninit"); + } + // track_caller stabilized in Rust 1.46 (nightly-2020-07-02): https://github.com/rust-lang/rust/pull/72445 + if !version.probe(46, 2020, 7, 1) { + println!("cargo:rustc-cfg=portable_atomic_no_track_caller"); + } + // min_const_generics stabilized in Rust 1.51 (nightly-2020-12-28): https://github.com/rust-lang/rust/pull/79135 + if !version.probe(51, 2020, 12, 27) { + println!("cargo:rustc-cfg=portable_atomic_no_min_const_generics"); + } + // unsafe_op_in_unsafe_fn stabilized in Rust 1.52 (nightly-2021-03-11): https://github.com/rust-lang/rust/pull/79208 + if !version.probe(52, 2021, 3, 10) { + println!("cargo:rustc-cfg=portable_atomic_no_unsafe_op_in_unsafe_fn"); + } + // https://github.com/rust-lang/rust/pull/84662 merged in Rust 1.56 (nightly-2021-08-02). + if !version.probe(56, 2021, 8, 1) { + println!("cargo:rustc-cfg=portable_atomic_no_core_unwind_safe"); + } + // io_safety stabilized in Rust 1.63 (nightly-2022-06-16): https://github.com/rust-lang/rust/pull/95118 + // std::os::hermit::io::AsFd requires Rust 1.69 (https://github.com/rust-lang/rust/commit/b5fb4f3d9b1b308d59cab24ef2f9bf23dad948aa) + if !version.probe(63, 2022, 6, 15) + || version.minor < 69 + && env::var("CARGO_CFG_TARGET_OS").expect("CARGO_CFG_TARGET_OS not set") == "hermit" + { + println!("cargo:rustc-cfg=portable_atomic_no_io_safety"); + } + // error_in_core stabilized in Rust 1.81 (nightly-2024-06-09): https://github.com/rust-lang/rust/pull/125951 + if !version.probe(81, 2024, 6, 8) { + println!("cargo:rustc-cfg=portable_atomic_no_error_in_core"); + } + // strict_provenance/exposed_provenance APIs stabilized in Rust 1.84 (nightly-2024-10-22): https://github.com/rust-lang/rust/pull/130350 + if !version.probe(84, 2024, 10, 21) { + println!("cargo:rustc-cfg=portable_atomic_no_strict_provenance"); + } + + if version.nightly { + // `cfg(sanitize = "..")` is not stabilized. + let sanitize = env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); + if sanitize.contains("thread") { + println!("cargo:rustc-cfg=portable_atomic_sanitize_thread"); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/version.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/version.rs new file mode 100644 index 0000000000000000000000000000000000000000..9e24407d48410b6f92395d2bb63e9f173692b06e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/portable-atomic-util-0.2.5/version.rs @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::{env, iter, process::Command, str}; + +pub(crate) fn rustc_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let rustc_wrapper = if env::var_os("CARGO_ENCODED_RUSTFLAGS").is_some() { + env::var_os("RUSTC_WRAPPER").filter(|v| !v.is_empty()) + } else { + // Cargo sets environment variables for wrappers correctly only since https://github.com/rust-lang/cargo/pull/9601. + None + }; + // Do not apply RUSTC_WORKSPACE_WRAPPER: https://github.com/cuviper/autocfg/issues/58#issuecomment-2067625980 + let mut rustc = rustc_wrapper.into_iter().chain(iter::once(rustc)); + let mut cmd = Command::new(rustc.next().unwrap()); + cmd.args(rustc); + // Use verbose version output because the packagers add extra strings to the normal version output. + // Do not use long flags (--version --verbose) because clippy-deriver doesn't handle them properly. + // -vV is also matched with that cargo internally uses: https://github.com/rust-lang/cargo/blob/0.80.0/src/cargo/util/rustc.rs#L65 + let output = cmd.arg("-vV").output().ok()?; + let verbose_version = str::from_utf8(&output.stdout).ok()?; + Version::parse(verbose_version) +} + +#[cfg_attr(test, derive(Debug, PartialEq))] +pub(crate) struct Version { + pub(crate) minor: u32, + pub(crate) nightly: bool, + commit_date: Date, + pub(crate) llvm: u32, +} + +impl Version { + // The known latest stable version. If we unable to determine + // the rustc version, we assume this is the current version. + // It is no problem if this is older than the actual latest stable. + // LLVM version is assumed to be the minimum external LLVM version: + // https://github.com/rust-lang/rust/blob/1.93.0/src/bootstrap/src/core/build_steps/llvm.rs#L627 + pub(crate) const LATEST: Self = Self::stable(93, 20); + + pub(crate) const fn stable(rustc_minor: u32, llvm_major: u32) -> Self { + Self { minor: rustc_minor, nightly: false, commit_date: Date::UNKNOWN, llvm: llvm_major } + } + + pub(crate) fn probe(&self, minor: u32, year: u16, month: u8, day: u8) -> bool { + if self.nightly { + self.minor > minor + || self.minor == minor && self.commit_date >= Date::new(year, month, day) + } else { + self.minor >= minor + } + } + + #[cfg(test)] + pub(crate) fn commit_date(&self) -> &Date { + &self.commit_date + } + + pub(crate) fn parse(verbose_version: &str) -> Option { + let mut release = verbose_version + .lines() + .find(|line| line.starts_with("release: ")) + .map(|line| &line["release: ".len()..])? + .splitn(2, '-'); + let version = release.next().unwrap(); + let channel = release.next().unwrap_or_default(); + let mut digits = version.splitn(3, '.'); + let major = digits.next()?; + if major != "1" { + return None; + } + let minor = digits.next()?.parse::().ok()?; + let _patch = digits.next().unwrap_or("0").parse::().ok()?; + let nightly = match env::var_os("RUSTC_BOOTSTRAP") { + // When -1 is passed rustc works like stable, e.g., cfg(target_feature = "unstable_target_feature") will never be set. https://github.com/rust-lang/rust/pull/132993 + Some(ref v) if v == "-1" => false, + _ => channel == "nightly" || channel == "dev", + }; + + // Note that rustc 1.49-1.50 (and 1.13 or older) don't print LLVM version. + let llvm_major = (|| { + let version = verbose_version + .lines() + .find(|line| line.starts_with("LLVM version: ")) + .map(|line| &line["LLVM version: ".len()..])?; + let mut digits = version.splitn(3, '.'); + let major = digits.next()?.parse::().ok()?; + let _minor = digits.next()?.parse::().ok()?; + let _patch = digits.next().unwrap_or("0").parse::().ok()?; + Some(major) + })() + .unwrap_or(0); + + // we don't refer commit date on stable/beta. + if nightly { + let commit_date = (|| { + let mut commit_date = verbose_version + .lines() + .find(|line| line.starts_with("commit-date: ")) + .map(|line| &line["commit-date: ".len()..])? + .splitn(3, '-'); + let year = commit_date.next()?.parse::().ok()?; + let month = commit_date.next()?.parse::().ok()?; + let day = commit_date.next()?.parse::().ok()?; + if month > 12 || day > 31 { + return None; + } + Some(Date::new(year, month, day)) + })(); + Some(Self { + minor, + nightly, + commit_date: commit_date.unwrap_or(Date::UNKNOWN), + llvm: llvm_major, + }) + } else { + Some(Self::stable(minor, llvm_major)) + } + } +} + +#[derive(PartialEq, PartialOrd)] +#[cfg_attr(test, derive(Debug))] +pub(crate) struct Date { + pub(crate) year: u16, + pub(crate) month: u8, + pub(crate) day: u8, +} + +impl Date { + const UNKNOWN: Self = Self::new(0, 0, 0); + + const fn new(year: u16, month: u8, day: u8) -> Self { + Self { year, month, day } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..6f2fcd43d41e5ad1ed7d4600a90be71901abe7f7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "5889a2a65c99836fd718340726f6b4379bc5028f" + }, + "path_in_vcs": "powerfmt" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f7acec39445bf71ee92b7122ad5b7777453a8710 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml @@ -0,0 +1,59 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.67.0" +name = "powerfmt" +version = "0.2.0" +authors = ["Jacob Pratt "] +description = """ + `powerfmt` is a library that provides utilities for formatting values. This crate makes it + significantly easier to support filling to a minimum width with alignment, avoid heap + allocation, and avoid repetitive calculations. +""" +readme = "README.md" +keywords = [ + "display", + "format", + "fmt", + "formatter", + "extension", +] +categories = [ + "no-std", + "no-std::no-alloc", + "rust-patterns", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/jhpratt/powerfmt" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "__powerfmt_docs", + "--generate-link-to-definition", +] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies.powerfmt-macros] +version = "=0.1.0" +optional = true + +[features] +alloc = [] +default = [ + "std", + "macros", +] +macros = ["dep:powerfmt-macros"] +std = ["alloc"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..7f41f61644624a06b75d8ec0eedb0014a97dfafb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/Cargo.toml.orig @@ -0,0 +1,33 @@ +[package] +name = "powerfmt" +version = "0.2.0" +authors = ["Jacob Pratt "] +edition = "2021" +rust-version = "1.67.0" +repository = "https://github.com/jhpratt/powerfmt" +keywords = ["display", "format", "fmt", "formatter", "extension"] +categories = ["no-std", "no-std::no-alloc", "rust-patterns"] +readme = "README.md" +license = "MIT OR Apache-2.0" +description = """ + `powerfmt` is a library that provides utilities for formatting values. This crate makes it + significantly easier to support filling to a minimum width with alignment, avoid heap + allocation, and avoid repetitive calculations. +""" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = ["--cfg", "__powerfmt_docs", "--generate-link-to-definition"] + +[lints] +workspace = true + +[features] +default = ["std", "macros"] +alloc = [] +std = ["alloc"] +macros = ["dep:powerfmt-macros"] + +[dependencies] +powerfmt-macros = { workspace = true, optional = true } diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-Apache b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-Apache new file mode 100644 index 0000000000000000000000000000000000000000..ddde1f9a0fdeab573c0482d6f2f9384e80233899 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-Apache @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Jacob Pratt et al. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..89c1f78cb46888390f82a8e584af0f491c33dd55 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) 2023 Jacob Pratt et al. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c22a3e2fe08d9c2eb4dc96a378decac0436d161b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/powerfmt-0.2.0/README.md @@ -0,0 +1,45 @@ +# `powerfmt` + +[![minimum rustc: 1.65](https://img.shields.io/badge/minimum%20rustc-1.65-yellowgreen?logo=rust&style=flat-square)](https://www.whatrustisit.com) +[![version](https://img.shields.io/crates/v/powerfmt?color=blue&logo=rust&style=flat-square)](https://crates.io/crates/powerfmt) +[![build status](https://img.shields.io/github/actions/workflow/status/jhpratt/powerfmt/build.yaml?branch=main&style=flat-square)](https://github.com/jhpratt/powerfmt/actions) + +Documentation is available [on docs.rs](https://docs.rs/powerfmt). + +## Minimum Rust version policy + +`powerfmt` is guaranteed to compile with the latest stable release of Rust in addition to the two +prior minor releases. For example, if the latest stable Rust release is 1.70, then `powerfmt` is +guaranteed to compile with Rust 1.68, 1.69, and 1.70. + +The minimum supported Rust version may be increased to one of the aforementioned versions if doing +so provides the end user a benefit. However, the minimum supported Rust version may also be bumped +to a version four minor releases prior to the most recent stable release if doing so improves code +quality or maintainability. + +For interoperability with third-party crates, it is guaranteed that there exists a version of that +crate that supports the minimum supported Rust version of `powerfmt`. This does not mean that the +latest version of the third-party crate supports the minimum supported Rust version of `powerfmt`. + +## Contributing + +Contributions are always welcome! If you have an idea, it's best to float it by me before working on +it to ensure no effort is wasted. If there's already an open issue for it, knock yourself out. + +If you have any questions, feel free to use [Discussions]. Don't hesitate to ask questions — that's +what I'm here for! + +[Discussions]: https://github.com/jhpratt/powerfmt/discussions + +## License + +This project is licensed under either of + +- [Apache License, Version 2.0](https://github.com/jhpratt/powerfmt/blob/main/LICENSE-Apache) +- [MIT license](https://github.com/jhpratt/powerfmt/blob/main/LICENSE-MIT) + +at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in +time by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..d33ae0607c5cedc953dad19309bd0e04e5fe8373 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "b2b930a0662b18b2e351264a21e175478bb3c3f1" + }, + "path_in_vcs": "quinn-proto" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..57c214d75bde7adc1e00f2900a10054a3168a682 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.lock @@ -0,0 +1,1650 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "aws-lc-fips-sys" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2608e5a7965cc9d58c56234d346c9c89b824c4c8652b6f047b3bd0a777c0644f" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "regex", +] + +[[package]] +name = "aws-lc-rs" +version = "1.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c953fe1ba023e6b7730c0d4b031d06f267f23a46167dcbd40316644b10a17ba" +dependencies = [ + "aws-lc-fips-sys", + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfd150b5dbdb988bcc8fb1fe787eb6b7ee6180ca24da683b61ea5405f3d43ff" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn", + "which", +] + +[[package]] +name = "bitflags" +version = "2.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastbloom" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18c1ddb9231d8554c2d6bdf4cfaabf0c59251658c68b6c95cd52dd0c513a912a" +dependencies = [ + "getrandom 0.3.3", + "libm", + "rand", + "siphasher", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" + +[[package]] +name = "hex-literal" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "indexmap" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.175" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" + +[[package]] +name = "libloading" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +dependencies = [ + "cfg-if", + "windows-targets 0.53.3", +] + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64", + "serde", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "qlog" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f15b83c59e6b945f2261c95a1dd9faf239187f32ff0a96af1d1d28c4557f919" +dependencies = [ + "serde", + "serde_json", + "serde_with", + "smallvec", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +dependencies = [ + "arbitrary", + "assert_matches", + "aws-lc-rs", + "bytes", + "fastbloom", + "getrandom 0.3.3", + "hex-literal", + "lazy_static", + "lru-slab", + "qlog", + "rand", + "rand_pcg", + "rcgen", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "slab", + "thiserror 2.0.16", + "tinyvec", + "tracing", + "tracing-subscriber", + "wasm-bindgen-test", + "web-time", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "rand_pcg" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b48ac3f7ffaab7fac4d2376632268aa5f89abdb55f7ebf8f4d11fffccb2320f7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rcgen" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0068c5b3cab1d4e271e0bb6539c87563c43411cad90b057b15c79958fbeb41f7" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "regex" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.10", + "regex-syntax 0.8.6", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.6", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be59af91596cac372a6942530653ad0c3a246cdd491aaa9dcaee47f88d67d5a0" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "security-framework" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.143" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +dependencies = [ + "indexmap", + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +dependencies = [ + "serde", + "serde_derive", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +dependencies = [ + "thiserror-impl 2.0.16", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "thread_local", + "time", + "tracing", + "tracing-core", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +dependencies = [ + "windows-sys 0.60.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ea25891ddea16dda948da142f1346e538b1f8594 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml @@ -0,0 +1,193 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.74.1" +name = "quinn-proto" +version = "0.11.13" +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "State machine for the QUIC transport protocol" +readme = false +keywords = ["quic"] +categories = [ + "network-programming", + "asynchronous", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/quinn-rs/quinn" + +[package.metadata.docs.rs] +features = [ + "rustls-aws-lc-rs", + "rustls-ring", + "platform-verifier", + "log", + "rustls-log", +] + +[features] +aws-lc-rs = [ + "dep:aws-lc-rs", + "aws-lc-rs?/aws-lc-sys", + "aws-lc-rs?/prebuilt-nasm", +] +aws-lc-rs-fips = [ + "aws-lc-rs", + "aws-lc-rs?/fips", +] +bloom = ["dep:fastbloom"] +default = [ + "rustls-ring", + "log", + "bloom", +] +log = ["tracing/log"] +platform-verifier = ["dep:rustls-platform-verifier"] +qlog = ["dep:qlog"] +ring = ["dep:ring"] +rustls = ["rustls-ring"] +rustls-aws-lc-rs = [ + "dep:rustls", + "rustls?/aws-lc-rs", + "aws-lc-rs", +] +rustls-aws-lc-rs-fips = [ + "rustls-aws-lc-rs", + "aws-lc-rs-fips", +] +rustls-log = ["rustls?/logging"] +rustls-ring = [ + "dep:rustls", + "rustls?/ring", + "ring", +] + +[lib] +name = "quinn_proto" +path = "src/lib.rs" + +[dependencies.arbitrary] +version = "1.0.1" +features = ["derive"] +optional = true + +[dependencies.aws-lc-rs] +version = "1.9" +optional = true +default-features = false + +[dependencies.bytes] +version = "1" + +[dependencies.fastbloom] +version = "0.14" +optional = true + +[dependencies.lru-slab] +version = "0.1.2" + +[dependencies.qlog] +version = "0.15.2" +optional = true + +[dependencies.rand] +version = "0.9" + +[dependencies.ring] +version = "0.17" +optional = true + +[dependencies.rustc-hash] +version = "2" + +[dependencies.rustls] +version = "0.23.5" +features = ["std"] +optional = true +default-features = false + +[dependencies.rustls-platform-verifier] +version = "0.6" +optional = true + +[dependencies.slab] +version = "0.4.6" + +[dependencies.thiserror] +version = "2.0.3" + +[dependencies.tinyvec] +version = "1.1" +features = [ + "alloc", + "alloc", +] + +[dependencies.tracing] +version = "0.1.10" +features = ["std"] +default-features = false + +[dev-dependencies.assert_matches] +version = "1.1" + +[dev-dependencies.hex-literal] +version = "1" + +[dev-dependencies.lazy_static] +version = "1" + +[dev-dependencies.rand_pcg] +version = "0.9" + +[dev-dependencies.rcgen] +version = "0.14" + +[dev-dependencies.tracing-subscriber] +version = "0.3.0" +features = [ + "env-filter", + "fmt", + "ansi", + "time", + "local-time", +] +default-features = false + +[dev-dependencies.wasm-bindgen-test] +version = "0.3.45" + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.getrandom] +version = "0.3" +features = ["wasm_js"] +default-features = false + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.ring] +version = "0.17" +features = ["wasm32_unknown_unknown_js"] + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.rustls-pki-types] +version = "1.7" +features = ["web"] + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.web-time] +version = "1" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(fuzzing)"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..3c29d498fdd4d155422390dc9d58186a5d2fef53 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/Cargo.toml.orig @@ -0,0 +1,79 @@ +[package] +name = "quinn-proto" +version = "0.11.13" +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true +description = "State machine for the QUIC transport protocol" +keywords.workspace = true +categories.workspace = true +workspace = ".." + +[features] +# NOTE: Please keep this in sync with the feature list in `.github/workflows/codecov.yml`, see +# comment in that file for more information. +default = ["rustls-ring", "log", "bloom"] +aws-lc-rs = ["dep:aws-lc-rs", "aws-lc-rs?/aws-lc-sys", "aws-lc-rs?/prebuilt-nasm"] +aws-lc-rs-fips = ["aws-lc-rs", "aws-lc-rs?/fips"] +# Enables BloomTokenLog, and uses it by default +bloom = ["dep:fastbloom"] +# For backwards compatibility, `rustls` forwards to `rustls-ring` +rustls = ["rustls-ring"] +# Enable rustls with the `aws-lc-rs` crypto provider +rustls-aws-lc-rs = ["dep:rustls", "rustls?/aws-lc-rs", "aws-lc-rs"] +rustls-aws-lc-rs-fips = ["rustls-aws-lc-rs", "aws-lc-rs-fips"] +# Enable rustls with the `ring` crypto provider +rustls-ring = ["dep:rustls", "rustls?/ring", "ring"] +ring = ["dep:ring"] +# Enable rustls ring provider and direct ring usage +# Provides `ClientConfig::with_platform_verifier()` convenience method +platform-verifier = ["dep:rustls-platform-verifier"] +# Configure `tracing` to log events via `log` if no `tracing` subscriber exists. +log = ["tracing/log"] +# Enable rustls logging +rustls-log = ["rustls?/logging"] +# Enable qlog support +qlog = ["dep:qlog"] + +[dependencies] +arbitrary = { workspace = true, optional = true } +aws-lc-rs = { workspace = true, optional = true } +bytes = { workspace = true } +fastbloom = { workspace = true, optional = true } +lru-slab = { workspace = true } +qlog = { workspace = true, optional = true } +rustc-hash = { workspace = true } +rand = { workspace = true } +ring = { workspace = true, optional = true } +rustls = { workspace = true, optional = true } +rustls-platform-verifier = { workspace = true, optional = true } +slab = { workspace = true } +thiserror = { workspace = true } +tinyvec = { workspace = true, features = ["alloc"] } +tracing = { workspace = true } + +# Feature flags & dependencies for wasm +# wasm-bindgen is assumed for a wasm*-*-unknown target +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] +ring = { workspace = true, features = ["wasm32_unknown_unknown_js"] } +getrandom = { workspace = true, features = ["wasm_js"] } +rustls-pki-types = { workspace = true, features = ["web"] } # only added as dependency to enforce the `web` feature for this target +web-time = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +hex-literal = { workspace = true } +rand_pcg = "0.9" +rcgen = { workspace = true } +tracing-subscriber = { workspace = true } +lazy_static = "1" +wasm-bindgen-test = { workspace = true } + +[lints.rust] +# https://rust-fuzz.github.io/book/cargo-fuzz/guide.html#cfgfuzzing +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } + +[package.metadata.docs.rs] +# all non-default features except fips (cannot build on docs.rs environment) +features = ["rustls-aws-lc-rs", "rustls-ring", "platform-verifier", "log", "rustls-log"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..f656104056377cb81385873ec137545e5f154f17 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/LICENSE-MIT @@ -0,0 +1,7 @@ +Copyright (c) 2018 The quinn Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_generator.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_generator.rs new file mode 100644 index 0000000000000000000000000000000000000000..e62415e8b4f7f6b14941e21d1ef5467bcf4e025c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_generator.rs @@ -0,0 +1,180 @@ +use std::hash::Hasher; + +use rand::{Rng, RngCore}; + +use crate::Duration; +use crate::MAX_CID_SIZE; +use crate::shared::ConnectionId; + +/// Generates connection IDs for incoming connections +pub trait ConnectionIdGenerator: Send + Sync { + /// Generates a new CID + /// + /// Connection IDs MUST NOT contain any information that can be used by + /// an external observer (that is, one that does not cooperate with the + /// issuer) to correlate them with other connection IDs for the same + /// connection. They MUST have high entropy, e.g. due to encrypted data + /// or cryptographic-grade random data. + fn generate_cid(&mut self) -> ConnectionId; + + /// Quickly determine whether `cid` could have been generated by this generator + /// + /// False positives are permitted, but increase the cost of handling invalid packets. + fn validate(&self, _cid: &ConnectionId) -> Result<(), InvalidCid> { + Ok(()) + } + + /// Returns the length of a CID for connections created by this generator + fn cid_len(&self) -> usize; + /// Returns the lifetime of generated Connection IDs + /// + /// Connection IDs will be retired after the returned `Duration`, if any. Assumed to be constant. + fn cid_lifetime(&self) -> Option; +} + +/// The connection ID was not recognized by the [`ConnectionIdGenerator`] +#[derive(Debug, Copy, Clone)] +pub struct InvalidCid; + +/// Generates purely random connection IDs of a specified length +/// +/// Random CIDs can be smaller than those produced by [`HashedConnectionIdGenerator`], but cannot be +/// usefully [`validate`](ConnectionIdGenerator::validate)d. +#[derive(Debug, Clone, Copy)] +pub struct RandomConnectionIdGenerator { + cid_len: usize, + lifetime: Option, +} + +impl Default for RandomConnectionIdGenerator { + fn default() -> Self { + Self { + cid_len: 8, + lifetime: None, + } + } +} + +impl RandomConnectionIdGenerator { + /// Initialize Random CID generator with a fixed CID length + /// + /// The given length must be less than or equal to MAX_CID_SIZE. + pub fn new(cid_len: usize) -> Self { + debug_assert!(cid_len <= MAX_CID_SIZE); + Self { + cid_len, + ..Self::default() + } + } + + /// Set the lifetime of CIDs created by this generator + pub fn set_lifetime(&mut self, d: Duration) -> &mut Self { + self.lifetime = Some(d); + self + } +} + +impl ConnectionIdGenerator for RandomConnectionIdGenerator { + fn generate_cid(&mut self) -> ConnectionId { + let mut bytes_arr = [0; MAX_CID_SIZE]; + rand::rng().fill_bytes(&mut bytes_arr[..self.cid_len]); + + ConnectionId::new(&bytes_arr[..self.cid_len]) + } + + /// Provide the length of dst_cid in short header packet + fn cid_len(&self) -> usize { + self.cid_len + } + + fn cid_lifetime(&self) -> Option { + self.lifetime + } +} + +/// Generates 8-byte connection IDs that can be efficiently +/// [`validate`](ConnectionIdGenerator::validate)d +/// +/// This generator uses a non-cryptographic hash and can therefore still be spoofed, but nonetheless +/// helps prevents Quinn from responding to non-QUIC packets at very low cost. +pub struct HashedConnectionIdGenerator { + key: u64, + lifetime: Option, +} + +impl HashedConnectionIdGenerator { + /// Create a generator with a random key + pub fn new() -> Self { + Self::from_key(rand::rng().random()) + } + + /// Create a generator with a specific key + /// + /// Allows [`validate`](ConnectionIdGenerator::validate) to recognize a consistent set of + /// connection IDs across restarts + pub fn from_key(key: u64) -> Self { + Self { + key, + lifetime: None, + } + } + + /// Set the lifetime of CIDs created by this generator + pub fn set_lifetime(&mut self, d: Duration) -> &mut Self { + self.lifetime = Some(d); + self + } +} + +impl Default for HashedConnectionIdGenerator { + fn default() -> Self { + Self::new() + } +} + +impl ConnectionIdGenerator for HashedConnectionIdGenerator { + fn generate_cid(&mut self) -> ConnectionId { + let mut bytes_arr = [0; NONCE_LEN + SIGNATURE_LEN]; + rand::rng().fill_bytes(&mut bytes_arr[..NONCE_LEN]); + let mut hasher = rustc_hash::FxHasher::default(); + hasher.write_u64(self.key); + hasher.write(&bytes_arr[..NONCE_LEN]); + bytes_arr[NONCE_LEN..].copy_from_slice(&hasher.finish().to_le_bytes()[..SIGNATURE_LEN]); + ConnectionId::new(&bytes_arr) + } + + fn validate(&self, cid: &ConnectionId) -> Result<(), InvalidCid> { + let (nonce, signature) = cid.split_at(NONCE_LEN); + let mut hasher = rustc_hash::FxHasher::default(); + hasher.write_u64(self.key); + hasher.write(nonce); + let expected = hasher.finish().to_le_bytes(); + match expected[..SIGNATURE_LEN] == signature[..] { + true => Ok(()), + false => Err(InvalidCid), + } + } + + fn cid_len(&self) -> usize { + NONCE_LEN + SIGNATURE_LEN + } + + fn cid_lifetime(&self) -> Option { + self.lifetime + } +} + +const NONCE_LEN: usize = 3; // Good for more than 16 million connections +const SIGNATURE_LEN: usize = 8 - NONCE_LEN; // 8-byte total CID length + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn validate_keyed_cid() { + let mut generator = HashedConnectionIdGenerator::new(); + let cid = generator.generate_cid(); + generator.validate(&cid).unwrap(); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_queue.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_queue.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e3eec9d46fe2f6c70a72637f083232ee08b2164 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/cid_queue.rs @@ -0,0 +1,303 @@ +use std::ops::Range; + +use crate::{ConnectionId, ResetToken, frame::NewConnectionId}; + +/// DataType stored in CidQueue buffer +type CidData = (ConnectionId, Option); + +/// Sliding window of active Connection IDs +/// +/// May contain gaps due to packet loss or reordering +#[derive(Debug)] +pub(crate) struct CidQueue { + /// Ring buffer indexed by `self.cursor` + buffer: [Option; Self::LEN], + /// Index at which circular buffer addressing is based + cursor: usize, + /// Sequence number of `self.buffer[cursor]` + /// + /// The sequence number of the active CID; must be the smallest among CIDs in `buffer`. + offset: u64, +} + +impl CidQueue { + pub(crate) fn new(cid: ConnectionId) -> Self { + let mut buffer = [None; Self::LEN]; + buffer[0] = Some((cid, None)); + Self { + buffer, + cursor: 0, + offset: 0, + } + } + + /// Handle a `NEW_CONNECTION_ID` frame + /// + /// Returns a non-empty range of retired sequence numbers and the reset token of the new active + /// CID iff any CIDs were retired. + pub(crate) fn insert( + &mut self, + cid: NewConnectionId, + ) -> Result, ResetToken)>, InsertError> { + // Position of new CID wrt. the current active CID + let index = match cid.sequence.checked_sub(self.offset) { + None => return Err(InsertError::Retired), + Some(x) => x, + }; + + let retired_count = cid.retire_prior_to.saturating_sub(self.offset); + if index >= Self::LEN as u64 + retired_count { + return Err(InsertError::ExceedsLimit); + } + + // Discard retired CIDs, if any + for i in 0..(retired_count.min(Self::LEN as u64) as usize) { + self.buffer[(self.cursor + i) % Self::LEN] = None; + } + + // Record the new CID + let index = ((self.cursor as u64 + index) % Self::LEN as u64) as usize; + self.buffer[index] = Some((cid.id, Some(cid.reset_token))); + + if retired_count == 0 { + return Ok(None); + } + + // The active CID was retired. Find the first known CID with sequence number of at least + // retire_prior_to, and inform the caller that all prior CIDs have been retired, and of + // the new CID's reset token. + self.cursor = ((self.cursor as u64 + retired_count) % Self::LEN as u64) as usize; + let (i, (_, token)) = self + .iter() + .next() + .expect("it is impossible to retire a CID without supplying a new one"); + self.cursor = (self.cursor + i) % Self::LEN; + let orig_offset = self.offset; + self.offset = cid.retire_prior_to + i as u64; + // We don't immediately retire CIDs in the range (orig_offset + + // Self::LEN)..self.offset. These are CIDs that we haven't yet received from a + // NEW_CONNECTION_ID frame, since having previously received them would violate the + // connection ID limit we specified based on Self::LEN. If we do receive a such a frame + // in the future, e.g. due to reordering, we'll retire it then. This ensures we can't be + // made to buffer an arbitrarily large number of RETIRE_CONNECTION_ID frames. + Ok(Some(( + orig_offset..self.offset.min(orig_offset + Self::LEN as u64), + token.expect("non-initial CID missing reset token"), + ))) + } + + /// Switch to next active CID if possible, return + /// 1) the corresponding ResetToken and 2) a non-empty range preceding it to retire + pub(crate) fn next(&mut self) -> Option<(ResetToken, Range)> { + let (i, cid_data) = self.iter().nth(1)?; + self.buffer[self.cursor] = None; + + let orig_offset = self.offset; + self.offset += i as u64; + self.cursor = (self.cursor + i) % Self::LEN; + Some((cid_data.1.unwrap(), orig_offset..self.offset)) + } + + /// Iterate CIDs in CidQueue that are not `None`, including the active CID + fn iter(&self) -> impl Iterator + '_ { + (0..Self::LEN).filter_map(move |step| { + let index = (self.cursor + step) % Self::LEN; + self.buffer[index].map(|cid_data| (step, cid_data)) + }) + } + + /// Replace the initial CID + pub(crate) fn update_initial_cid(&mut self, cid: ConnectionId) { + debug_assert_eq!(self.offset, 0); + self.buffer[self.cursor] = Some((cid, None)); + } + + /// Return active remote CID itself + pub(crate) fn active(&self) -> ConnectionId { + self.buffer[self.cursor].unwrap().0 + } + + /// Return the sequence number of active remote CID + pub(crate) fn active_seq(&self) -> u64 { + self.offset + } + + pub(crate) const LEN: usize = 5; +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) enum InsertError { + /// CID was already retired + Retired, + /// Sequence number violates the leading edge of the window + ExceedsLimit, +} + +#[cfg(test)] +mod tests { + use super::*; + + fn cid(sequence: u64, retire_prior_to: u64) -> NewConnectionId { + NewConnectionId { + sequence, + id: ConnectionId::new(&[0xAB; 8]), + reset_token: ResetToken::from([0xCD; crate::RESET_TOKEN_SIZE]), + retire_prior_to, + } + } + + fn initial_cid() -> ConnectionId { + ConnectionId::new(&[0xFF; 8]) + } + + #[test] + fn next_dense() { + let mut q = CidQueue::new(initial_cid()); + assert!(q.next().is_none()); + assert!(q.next().is_none()); + + for i in 1..CidQueue::LEN as u64 { + q.insert(cid(i, 0)).unwrap(); + } + for i in 1..CidQueue::LEN as u64 { + let (_, retire) = q.next().unwrap(); + assert_eq!(q.active_seq(), i); + assert_eq!(retire.end - retire.start, 1); + } + assert!(q.next().is_none()); + } + #[test] + fn next_sparse() { + let mut q = CidQueue::new(initial_cid()); + let seqs = (1..CidQueue::LEN as u64).filter(|x| x % 2 == 0); + for i in seqs.clone() { + q.insert(cid(i, 0)).unwrap(); + } + for i in seqs { + let (_, retire) = q.next().unwrap(); + dbg!(&retire); + assert_eq!(q.active_seq(), i); + assert_eq!(retire, (q.active_seq().saturating_sub(2))..q.active_seq()); + } + assert!(q.next().is_none()); + } + + #[test] + fn wrap() { + let mut q = CidQueue::new(initial_cid()); + + for i in 1..CidQueue::LEN as u64 { + q.insert(cid(i, 0)).unwrap(); + } + for _ in 1..(CidQueue::LEN as u64 - 1) { + q.next().unwrap(); + } + for i in CidQueue::LEN as u64..(CidQueue::LEN as u64 + 3) { + q.insert(cid(i, 0)).unwrap(); + } + for i in (CidQueue::LEN as u64 - 1)..(CidQueue::LEN as u64 + 3) { + q.next().unwrap(); + assert_eq!(q.active_seq(), i); + } + assert!(q.next().is_none()); + } + + #[test] + fn retire_dense() { + let mut q = CidQueue::new(initial_cid()); + + for i in 1..CidQueue::LEN as u64 { + q.insert(cid(i, 0)).unwrap(); + } + assert_eq!(q.active_seq(), 0); + + assert_eq!(q.insert(cid(4, 2)).unwrap().unwrap().0, 0..2); + assert_eq!(q.active_seq(), 2); + assert_eq!(q.insert(cid(4, 2)), Ok(None)); + + for i in 2..(CidQueue::LEN as u64 - 1) { + let _ = q.next().unwrap(); + assert_eq!(q.active_seq(), i + 1); + assert_eq!(q.insert(cid(i + 1, i + 1)), Ok(None)); + } + + assert!(q.next().is_none()); + } + + #[test] + fn retire_sparse() { + // Retiring CID 0 when CID 1 is not known should retire CID 1 as we move to CID 2 + let mut q = CidQueue::new(initial_cid()); + q.insert(cid(2, 0)).unwrap(); + assert_eq!(q.insert(cid(3, 1)).unwrap().unwrap().0, 0..2,); + assert_eq!(q.active_seq(), 2); + } + + #[test] + fn retire_many() { + let mut q = CidQueue::new(initial_cid()); + q.insert(cid(2, 0)).unwrap(); + assert_eq!( + q.insert(cid(1_000_000, 1_000_000)).unwrap().unwrap().0, + 0..CidQueue::LEN as u64, + ); + assert_eq!(q.active_seq(), 1_000_000); + } + + #[test] + fn insert_limit() { + let mut q = CidQueue::new(initial_cid()); + assert_eq!(q.insert(cid(CidQueue::LEN as u64 - 1, 0)), Ok(None)); + assert_eq!( + q.insert(cid(CidQueue::LEN as u64, 0)), + Err(InsertError::ExceedsLimit) + ); + } + + #[test] + fn insert_duplicate() { + let mut q = CidQueue::new(initial_cid()); + q.insert(cid(0, 0)).unwrap(); + q.insert(cid(0, 0)).unwrap(); + } + + #[test] + fn insert_retired() { + let mut q = CidQueue::new(initial_cid()); + assert_eq!( + q.insert(cid(0, 0)), + Ok(None), + "reinserting active CID succeeds" + ); + assert!(q.next().is_none(), "active CID isn't requeued"); + q.insert(cid(1, 0)).unwrap(); + q.next().unwrap(); + assert_eq!( + q.insert(cid(0, 0)), + Err(InsertError::Retired), + "previous active CID is already retired" + ); + } + + #[test] + fn retire_then_insert_next() { + let mut q = CidQueue::new(initial_cid()); + for i in 1..CidQueue::LEN as u64 { + q.insert(cid(i, 0)).unwrap(); + } + q.next().unwrap(); + q.insert(cid(CidQueue::LEN as u64, 0)).unwrap(); + assert_eq!( + q.insert(cid(CidQueue::LEN as u64 + 1, 0)), + Err(InsertError::ExceedsLimit) + ); + } + + #[test] + fn always_valid() { + let mut q = CidQueue::new(initial_cid()); + assert!(q.next().is_none()); + assert_eq!(q.active(), initial_cid()); + assert_eq!(q.active_seq(), 0); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/coding.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/coding.rs new file mode 100644 index 0000000000000000000000000000000000000000..3104cbf8483cf4ac0548177d0a2e299bd1af7b44 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/coding.rs @@ -0,0 +1,130 @@ +//! Coding related traits. + +use std::net::{Ipv4Addr, Ipv6Addr}; + +use bytes::{Buf, BufMut}; +use thiserror::Error; + +use crate::VarInt; + +/// Error indicating that the provided buffer was too small +#[derive(Error, Debug, Copy, Clone, Eq, PartialEq)] +#[error("unexpected end of buffer")] +pub struct UnexpectedEnd; + +/// Coding result type +pub type Result = ::std::result::Result; + +/// Infallible encoding and decoding of QUIC primitives +pub trait Codec: Sized { + /// Decode a `Self` from the provided buffer, if the buffer is large enough + fn decode(buf: &mut B) -> Result; + /// Append the encoding of `self` to the provided buffer + fn encode(&self, buf: &mut B); +} + +impl Codec for u8 { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 1 { + return Err(UnexpectedEnd); + } + Ok(buf.get_u8()) + } + fn encode(&self, buf: &mut B) { + buf.put_u8(*self); + } +} + +impl Codec for u16 { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 2 { + return Err(UnexpectedEnd); + } + Ok(buf.get_u16()) + } + fn encode(&self, buf: &mut B) { + buf.put_u16(*self); + } +} + +impl Codec for u32 { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 4 { + return Err(UnexpectedEnd); + } + Ok(buf.get_u32()) + } + fn encode(&self, buf: &mut B) { + buf.put_u32(*self); + } +} + +impl Codec for u64 { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 8 { + return Err(UnexpectedEnd); + } + Ok(buf.get_u64()) + } + fn encode(&self, buf: &mut B) { + buf.put_u64(*self); + } +} + +impl Codec for Ipv4Addr { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 4 { + return Err(UnexpectedEnd); + } + let mut octets = [0; 4]; + buf.copy_to_slice(&mut octets); + Ok(octets.into()) + } + fn encode(&self, buf: &mut B) { + buf.put_slice(&self.octets()); + } +} + +impl Codec for Ipv6Addr { + fn decode(buf: &mut B) -> Result { + if buf.remaining() < 16 { + return Err(UnexpectedEnd); + } + let mut octets = [0; 16]; + buf.copy_to_slice(&mut octets); + Ok(octets.into()) + } + fn encode(&self, buf: &mut B) { + buf.put_slice(&self.octets()); + } +} + +pub(crate) trait BufExt { + fn get(&mut self) -> Result; + fn get_var(&mut self) -> Result; +} + +impl BufExt for T { + fn get(&mut self) -> Result { + U::decode(self) + } + + fn get_var(&mut self) -> Result { + Ok(VarInt::decode(self)?.into_inner()) + } +} + +pub(crate) trait BufMutExt { + fn write(&mut self, x: T); + fn write_var(&mut self, x: u64); +} + +impl BufMutExt for T { + fn write(&mut self, x: U) { + x.encode(self); + } + + fn write_var(&mut self, x: u64) { + VarInt::from_u64(x).unwrap().encode(self); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa755b51909675ccc32e71410b85180e9777ec86 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/mod.rs @@ -0,0 +1,697 @@ +use std::{ + fmt, + net::{SocketAddrV4, SocketAddrV6}, + num::TryFromIntError, + sync::Arc, +}; + +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +use rustls::client::WebPkiServerVerifier; +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +use rustls::pki_types::{CertificateDer, PrivateKeyDer}; +use thiserror::Error; + +#[cfg(feature = "bloom")] +use crate::BloomTokenLog; +#[cfg(not(feature = "bloom"))] +use crate::NoneTokenLog; +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +use crate::crypto::rustls::{QuicServerConfig, configured_provider}; +use crate::{ + DEFAULT_SUPPORTED_VERSIONS, Duration, MAX_CID_SIZE, RandomConnectionIdGenerator, SystemTime, + TokenLog, TokenMemoryCache, TokenStore, VarInt, VarIntBoundsExceeded, + cid_generator::{ConnectionIdGenerator, HashedConnectionIdGenerator}, + crypto::{self, HandshakeTokenKey, HmacKey}, + shared::ConnectionId, +}; + +mod transport; +#[cfg(feature = "qlog")] +pub use transport::QlogConfig; +pub use transport::{AckFrequencyConfig, IdleTimeout, MtuDiscoveryConfig, TransportConfig}; + +/// Global configuration for the endpoint, affecting all connections +/// +/// Default values should be suitable for most internet applications. +#[derive(Clone)] +pub struct EndpointConfig { + pub(crate) reset_key: Arc, + pub(crate) max_udp_payload_size: VarInt, + /// CID generator factory + /// + /// Create a cid generator for local cid in Endpoint struct + pub(crate) connection_id_generator_factory: + Arc Box + Send + Sync>, + pub(crate) supported_versions: Vec, + pub(crate) grease_quic_bit: bool, + /// Minimum interval between outgoing stateless reset packets + pub(crate) min_reset_interval: Duration, + /// Optional seed to be used internally for random number generation + pub(crate) rng_seed: Option<[u8; 32]>, +} + +impl EndpointConfig { + /// Create a default config with a particular `reset_key` + pub fn new(reset_key: Arc) -> Self { + let cid_factory = + || -> Box { Box::::default() }; + Self { + reset_key, + max_udp_payload_size: (1500u32 - 28).into(), // Ethernet MTU minus IP + UDP headers + connection_id_generator_factory: Arc::new(cid_factory), + supported_versions: DEFAULT_SUPPORTED_VERSIONS.to_vec(), + grease_quic_bit: true, + min_reset_interval: Duration::from_millis(20), + rng_seed: None, + } + } + + /// Supply a custom connection ID generator factory + /// + /// Called once by each `Endpoint` constructed from this configuration to obtain the CID + /// generator which will be used to generate the CIDs used for incoming packets on all + /// connections involving that `Endpoint`. A custom CID generator allows applications to embed + /// information in local connection IDs, e.g. to support stateless packet-level load balancers. + /// + /// Defaults to [`HashedConnectionIdGenerator`]. + pub fn cid_generator Box + Send + Sync + 'static>( + &mut self, + factory: F, + ) -> &mut Self { + self.connection_id_generator_factory = Arc::new(factory); + self + } + + /// Private key used to send authenticated connection resets to peers who were + /// communicating with a previous instance of this endpoint. + pub fn reset_key(&mut self, key: Arc) -> &mut Self { + self.reset_key = key; + self + } + + /// Maximum UDP payload size accepted from peers (excluding UDP and IP overhead). + /// + /// Must be greater or equal than 1200. + /// + /// Defaults to 1472, which is the largest UDP payload that can be transmitted in the typical + /// 1500 byte Ethernet MTU. Deployments on links with larger MTUs (e.g. loopback or Ethernet + /// with jumbo frames) can raise this to improve performance at the cost of a linear increase in + /// datagram receive buffer size. + pub fn max_udp_payload_size(&mut self, value: u16) -> Result<&mut Self, ConfigError> { + if !(1200..=65_527).contains(&value) { + return Err(ConfigError::OutOfBounds); + } + + self.max_udp_payload_size = value.into(); + Ok(self) + } + + /// Get the current value of [`max_udp_payload_size`](Self::max_udp_payload_size) + // + // While most parameters don't need to be readable, this must be exposed to allow higher-level + // layers, e.g. the `quinn` crate, to determine how large a receive buffer to allocate to + // support an externally-defined `EndpointConfig`. + // + // While `get_` accessors are typically unidiomatic in Rust, we favor concision for setters, + // which will be used far more heavily. + pub fn get_max_udp_payload_size(&self) -> u64 { + self.max_udp_payload_size.into() + } + + /// Override supported QUIC versions + pub fn supported_versions(&mut self, supported_versions: Vec) -> &mut Self { + self.supported_versions = supported_versions; + self + } + + /// Whether to accept QUIC packets containing any value for the fixed bit + /// + /// Enabled by default. Helps protect against protocol ossification and makes traffic less + /// identifiable to observers. Disable if helping observers identify this traffic as QUIC is + /// desired. + pub fn grease_quic_bit(&mut self, value: bool) -> &mut Self { + self.grease_quic_bit = value; + self + } + + /// Minimum interval between outgoing stateless reset packets + /// + /// Defaults to 20ms. Limits the impact of attacks which flood an endpoint with garbage packets, + /// e.g. [ISAKMP/IKE amplification]. Larger values provide a stronger defense, but may delay + /// detection of some error conditions by clients. Using a [`ConnectionIdGenerator`] with a low + /// rate of false positives in [`validate`](ConnectionIdGenerator::validate) reduces the risk + /// incurred by a small minimum reset interval. + /// + /// [ISAKMP/IKE + /// amplification]: https://bughunters.google.com/blog/5960150648750080/preventing-cross-service-udp-loops-in-quic#isakmp-ike-amplification-vs-quic + pub fn min_reset_interval(&mut self, value: Duration) -> &mut Self { + self.min_reset_interval = value; + self + } + + /// Optional seed to be used internally for random number generation + /// + /// By default, quinn will initialize an endpoint's rng using a platform entropy source. + /// However, you can seed the rng yourself through this method (e.g. if you need to run quinn + /// deterministically or if you are using quinn in an environment that doesn't have a source of + /// entropy available). + pub fn rng_seed(&mut self, seed: Option<[u8; 32]>) -> &mut Self { + self.rng_seed = seed; + self + } +} + +impl fmt::Debug for EndpointConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("EndpointConfig") + // reset_key not debug + .field("max_udp_payload_size", &self.max_udp_payload_size) + // cid_generator_factory not debug + .field("supported_versions", &self.supported_versions) + .field("grease_quic_bit", &self.grease_quic_bit) + .field("rng_seed", &self.rng_seed) + .finish_non_exhaustive() + } +} + +#[cfg(any(feature = "aws-lc-rs", feature = "ring"))] +impl Default for EndpointConfig { + fn default() -> Self { + #[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] + use aws_lc_rs::hmac; + use rand::RngCore; + #[cfg(feature = "ring")] + use ring::hmac; + + let mut reset_key = [0; 64]; + rand::rng().fill_bytes(&mut reset_key); + + Self::new(Arc::new(hmac::Key::new(hmac::HMAC_SHA256, &reset_key))) + } +} + +/// Parameters governing incoming connections +/// +/// Default values should be suitable for most internet applications. +#[derive(Clone)] +pub struct ServerConfig { + /// Transport configuration to use for incoming connections + pub transport: Arc, + + /// TLS configuration used for incoming connections + /// + /// Must be set to use TLS 1.3 only. + pub crypto: Arc, + + /// Configuration for sending and handling validation tokens + pub validation_token: ValidationTokenConfig, + + /// Used to generate one-time AEAD keys to protect handshake tokens + pub(crate) token_key: Arc, + + /// Duration after a retry token was issued for which it's considered valid + pub(crate) retry_token_lifetime: Duration, + + /// Whether to allow clients to migrate to new addresses + /// + /// Improves behavior for clients that move between different internet connections or suffer NAT + /// rebinding. Enabled by default. + pub(crate) migration: bool, + + pub(crate) preferred_address_v4: Option, + pub(crate) preferred_address_v6: Option, + + pub(crate) max_incoming: usize, + pub(crate) incoming_buffer_size: u64, + pub(crate) incoming_buffer_size_total: u64, + + pub(crate) time_source: Arc, +} + +impl ServerConfig { + /// Create a default config with a particular handshake token key + pub fn new( + crypto: Arc, + token_key: Arc, + ) -> Self { + Self { + transport: Arc::new(TransportConfig::default()), + crypto, + + token_key, + retry_token_lifetime: Duration::from_secs(15), + + migration: true, + + validation_token: ValidationTokenConfig::default(), + + preferred_address_v4: None, + preferred_address_v6: None, + + max_incoming: 1 << 16, + incoming_buffer_size: 10 << 20, + incoming_buffer_size_total: 100 << 20, + + time_source: Arc::new(StdSystemTime), + } + } + + /// Set a custom [`TransportConfig`] + pub fn transport_config(&mut self, transport: Arc) -> &mut Self { + self.transport = transport; + self + } + + /// Set a custom [`ValidationTokenConfig`] + pub fn validation_token_config( + &mut self, + validation_token: ValidationTokenConfig, + ) -> &mut Self { + self.validation_token = validation_token; + self + } + + /// Private key used to authenticate data included in handshake tokens + pub fn token_key(&mut self, value: Arc) -> &mut Self { + self.token_key = value; + self + } + + /// Duration after a retry token was issued for which it's considered valid + /// + /// Defaults to 15 seconds. + pub fn retry_token_lifetime(&mut self, value: Duration) -> &mut Self { + self.retry_token_lifetime = value; + self + } + + /// Whether to allow clients to migrate to new addresses + /// + /// Improves behavior for clients that move between different internet connections or suffer NAT + /// rebinding. Enabled by default. + pub fn migration(&mut self, value: bool) -> &mut Self { + self.migration = value; + self + } + + /// The preferred IPv4 address that will be communicated to clients during handshaking + /// + /// If the client is able to reach this address, it will switch to it. + pub fn preferred_address_v4(&mut self, address: Option) -> &mut Self { + self.preferred_address_v4 = address; + self + } + + /// The preferred IPv6 address that will be communicated to clients during handshaking + /// + /// If the client is able to reach this address, it will switch to it. + pub fn preferred_address_v6(&mut self, address: Option) -> &mut Self { + self.preferred_address_v6 = address; + self + } + + /// Maximum number of [`Incoming`][crate::Incoming] to allow to exist at a time + /// + /// An [`Incoming`][crate::Incoming] comes into existence when an incoming connection attempt + /// is received and stops existing when the application either accepts it or otherwise disposes + /// of it. While this limit is reached, new incoming connection attempts are immediately + /// refused. Larger values have greater worst-case memory consumption, but accommodate greater + /// application latency in handling incoming connection attempts. + /// + /// The default value is set to 65536. With a typical Ethernet MTU of 1500 bytes, this limits + /// memory consumption from this to under 100 MiB--a generous amount that still prevents memory + /// exhaustion in most contexts. + pub fn max_incoming(&mut self, max_incoming: usize) -> &mut Self { + self.max_incoming = max_incoming; + self + } + + /// Maximum number of received bytes to buffer for each [`Incoming`][crate::Incoming] + /// + /// An [`Incoming`][crate::Incoming] comes into existence when an incoming connection attempt + /// is received and stops existing when the application either accepts it or otherwise disposes + /// of it. This limit governs only packets received within that period, and does not include + /// the first packet. Packets received in excess of this limit are dropped, which may cause + /// 0-RTT or handshake data to have to be retransmitted. + /// + /// The default value is set to 10 MiB--an amount such that in most situations a client would + /// not transmit that much 0-RTT data faster than the server handles the corresponding + /// [`Incoming`][crate::Incoming]. + pub fn incoming_buffer_size(&mut self, incoming_buffer_size: u64) -> &mut Self { + self.incoming_buffer_size = incoming_buffer_size; + self + } + + /// Maximum number of received bytes to buffer for all [`Incoming`][crate::Incoming] + /// collectively + /// + /// An [`Incoming`][crate::Incoming] comes into existence when an incoming connection attempt + /// is received and stops existing when the application either accepts it or otherwise disposes + /// of it. This limit governs only packets received within that period, and does not include + /// the first packet. Packets received in excess of this limit are dropped, which may cause + /// 0-RTT or handshake data to have to be retransmitted. + /// + /// The default value is set to 100 MiB--a generous amount that still prevents memory + /// exhaustion in most contexts. + pub fn incoming_buffer_size_total(&mut self, incoming_buffer_size_total: u64) -> &mut Self { + self.incoming_buffer_size_total = incoming_buffer_size_total; + self + } + + /// Object to get current [`SystemTime`] + /// + /// This exists to allow system time to be mocked in tests, or wherever else desired. + /// + /// Defaults to [`StdSystemTime`], which simply calls [`SystemTime::now()`](SystemTime::now). + pub fn time_source(&mut self, time_source: Arc) -> &mut Self { + self.time_source = time_source; + self + } + + pub(crate) fn has_preferred_address(&self) -> bool { + self.preferred_address_v4.is_some() || self.preferred_address_v6.is_some() + } +} + +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +impl ServerConfig { + /// Create a server config with the given certificate chain to be presented to clients + /// + /// Uses a randomized handshake token key. + pub fn with_single_cert( + cert_chain: Vec>, + key: PrivateKeyDer<'static>, + ) -> Result { + Ok(Self::with_crypto(Arc::new(QuicServerConfig::new( + cert_chain, key, + )?))) + } +} + +#[cfg(any(feature = "aws-lc-rs", feature = "ring"))] +impl ServerConfig { + /// Create a server config with the given [`crypto::ServerConfig`] + /// + /// Uses a randomized handshake token key. + pub fn with_crypto(crypto: Arc) -> Self { + #[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] + use aws_lc_rs::hkdf; + use rand::RngCore; + #[cfg(feature = "ring")] + use ring::hkdf; + + let rng = &mut rand::rng(); + let mut master_key = [0u8; 64]; + rng.fill_bytes(&mut master_key); + let master_key = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); + + Self::new(crypto, Arc::new(master_key)) + } +} + +impl fmt::Debug for ServerConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ServerConfig") + .field("transport", &self.transport) + // crypto not debug + // token not debug + .field("retry_token_lifetime", &self.retry_token_lifetime) + .field("validation_token", &self.validation_token) + .field("migration", &self.migration) + .field("preferred_address_v4", &self.preferred_address_v4) + .field("preferred_address_v6", &self.preferred_address_v6) + .field("max_incoming", &self.max_incoming) + .field("incoming_buffer_size", &self.incoming_buffer_size) + .field( + "incoming_buffer_size_total", + &self.incoming_buffer_size_total, + ) + // system_time_clock not debug + .finish_non_exhaustive() + } +} + +/// Configuration for sending and handling validation tokens in incoming connections +/// +/// Default values should be suitable for most internet applications. +/// +/// ## QUIC Tokens +/// +/// The QUIC protocol defines a concept of "[address validation][1]". Essentially, one side of a +/// QUIC connection may appear to be receiving QUIC packets from a particular remote UDP address, +/// but it will only consider that remote address "validated" once it has convincing evidence that +/// the address is not being [spoofed][2]. +/// +/// Validation is important primarily because of QUIC's "anti-amplification limit." This limit +/// prevents a QUIC server from sending a client more than three times the number of bytes it has +/// received from the client on a given address until that address is validated. This is designed +/// to mitigate the ability of attackers to use QUIC-based servers as reflectors in [amplification +/// attacks][3]. +/// +/// A path may become validated in several ways. The server is always considered validated by the +/// client. The client usually begins in an unvalidated state upon first connecting or migrating, +/// but then becomes validated through various mechanisms that usually take one network round trip. +/// However, in some cases, a client which has previously attempted to connect to a server may have +/// been given a one-time use cryptographically secured "token" that it can send in a subsequent +/// connection attempt to be validated immediately. +/// +/// There are two ways these tokens can originate: +/// +/// - If the server responds to an incoming connection with `retry`, a "retry token" is minted and +/// sent to the client, which the client immediately uses to attempt to connect again. Retry +/// tokens operate on short timescales, such as 15 seconds. +/// - If a client's path within an active connection is validated, the server may send the client +/// one or more "validation tokens," which the client may store for use in later connections to +/// the same server. Validation tokens may be valid for much longer lifetimes than retry token. +/// +/// The usage of validation tokens is most impactful in situations where 0-RTT data is also being +/// used--in particular, in situations where the server sends the client more than three times more +/// 0.5-RTT data than it has received 0-RTT data. Since the successful completion of a connection +/// handshake implicitly causes the client's address to be validated, transmission of 0.5-RTT data +/// is the main situation where a server might be sending application data to an address that could +/// be validated by token usage earlier than it would become validated without token usage. +/// +/// [1]: https://www.rfc-editor.org/rfc/rfc9000.html#section-8 +/// [2]: https://en.wikipedia.org/wiki/IP_address_spoofing +/// [3]: https://en.wikipedia.org/wiki/Denial-of-service_attack#Amplification +/// +/// These tokens should not be confused with "stateless reset tokens," which are similarly named +/// but entirely unrelated. +#[derive(Clone)] +pub struct ValidationTokenConfig { + pub(crate) lifetime: Duration, + pub(crate) log: Arc, + pub(crate) sent: u32, +} + +impl ValidationTokenConfig { + /// Duration after an address validation token was issued for which it's considered valid + /// + /// This refers only to tokens sent in NEW_TOKEN frames, in contrast to retry tokens. + /// + /// Defaults to 2 weeks. + pub fn lifetime(&mut self, value: Duration) -> &mut Self { + self.lifetime = value; + self + } + + #[allow(rustdoc::redundant_explicit_links)] // which links are redundant depends on features + /// Set a custom [`TokenLog`] + /// + /// If the `bloom` feature is enabled (which it is by default), defaults to a default + /// [`BloomTokenLog`][crate::BloomTokenLog], which is suitable for most internet applications. + /// + /// If the `bloom` feature is disabled, defaults to [`NoneTokenLog`][crate::NoneTokenLog], + /// which makes the server ignore all address validation tokens (that is, tokens originating + /// from NEW_TOKEN frames--retry tokens are not affected). + pub fn log(&mut self, log: Arc) -> &mut Self { + self.log = log; + self + } + + /// Number of address validation tokens sent to a client when its path is validated + /// + /// This refers only to tokens sent in NEW_TOKEN frames, in contrast to retry tokens. + /// + /// If the `bloom` feature is enabled (which it is by default), defaults to 2. Otherwise, + /// defaults to 0. + pub fn sent(&mut self, value: u32) -> &mut Self { + self.sent = value; + self + } +} + +impl Default for ValidationTokenConfig { + fn default() -> Self { + #[cfg(feature = "bloom")] + let log = Arc::new(BloomTokenLog::default()); + #[cfg(not(feature = "bloom"))] + let log = Arc::new(NoneTokenLog); + Self { + lifetime: Duration::from_secs(2 * 7 * 24 * 60 * 60), + log, + sent: if cfg!(feature = "bloom") { 2 } else { 0 }, + } + } +} + +impl fmt::Debug for ValidationTokenConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ServerValidationTokenConfig") + .field("lifetime", &self.lifetime) + // log not debug + .field("sent", &self.sent) + .finish_non_exhaustive() + } +} + +/// Configuration for outgoing connections +/// +/// Default values should be suitable for most internet applications. +#[derive(Clone)] +#[non_exhaustive] +pub struct ClientConfig { + /// Transport configuration to use + pub(crate) transport: Arc, + + /// Cryptographic configuration to use + pub(crate) crypto: Arc, + + /// Validation token store to use + pub(crate) token_store: Arc, + + /// Provider that populates the destination connection ID of Initial Packets + pub(crate) initial_dst_cid_provider: Arc ConnectionId + Send + Sync>, + + /// QUIC protocol version to use + pub(crate) version: u32, +} + +impl ClientConfig { + /// Create a default config with a particular cryptographic config + pub fn new(crypto: Arc) -> Self { + Self { + transport: Default::default(), + crypto, + token_store: Arc::new(TokenMemoryCache::default()), + initial_dst_cid_provider: Arc::new(|| { + RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid() + }), + version: 1, + } + } + + /// Configure how to populate the destination CID of the initial packet when attempting to + /// establish a new connection + /// + /// By default, it's populated with random bytes with reasonable length, so unless you have + /// a good reason, you do not need to change it. + /// + /// When prefer to override the default, please note that the generated connection ID MUST be + /// at least 8 bytes long and unpredictable, as per section 7.2 of RFC 9000. + pub fn initial_dst_cid_provider( + &mut self, + initial_dst_cid_provider: Arc ConnectionId + Send + Sync>, + ) -> &mut Self { + self.initial_dst_cid_provider = initial_dst_cid_provider; + self + } + + /// Set a custom [`TransportConfig`] + pub fn transport_config(&mut self, transport: Arc) -> &mut Self { + self.transport = transport; + self + } + + /// Set a custom [`TokenStore`] + /// + /// Defaults to [`TokenMemoryCache`], which is suitable for most internet applications. + pub fn token_store(&mut self, store: Arc) -> &mut Self { + self.token_store = store; + self + } + + /// Set the QUIC version to use + pub fn version(&mut self, version: u32) -> &mut Self { + self.version = version; + self + } +} + +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +impl ClientConfig { + /// Create a client configuration that trusts the platform's native roots + #[deprecated(since = "0.11.13", note = "use `try_with_platform_verifier()` instead")] + #[cfg(feature = "platform-verifier")] + pub fn with_platform_verifier() -> Self { + Self::try_with_platform_verifier().expect("use try_with_platform_verifier() instead") + } + + /// Create a client configuration that trusts the platform's native roots + #[cfg(feature = "platform-verifier")] + pub fn try_with_platform_verifier() -> Result { + Ok(Self::new(Arc::new( + crypto::rustls::QuicClientConfig::with_platform_verifier()?, + ))) + } + + /// Create a client configuration that trusts specified trust anchors + pub fn with_root_certificates( + roots: Arc, + ) -> Result { + Ok(Self::new(Arc::new(crypto::rustls::QuicClientConfig::new( + WebPkiServerVerifier::builder_with_provider(roots, configured_provider()).build()?, + )))) + } +} + +impl fmt::Debug for ClientConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ClientConfig") + .field("transport", &self.transport) + // crypto not debug + // token_store not debug + .field("version", &self.version) + .finish_non_exhaustive() + } +} + +/// Errors in the configuration of an endpoint +#[derive(Debug, Error, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum ConfigError { + /// Value exceeds supported bounds + #[error("value exceeds supported bounds")] + OutOfBounds, +} + +impl From for ConfigError { + fn from(_: TryFromIntError) -> Self { + Self::OutOfBounds + } +} + +impl From for ConfigError { + fn from(_: VarIntBoundsExceeded) -> Self { + Self::OutOfBounds + } +} + +/// Object to get current [`SystemTime`] +/// +/// This exists to allow system time to be mocked in tests, or wherever else desired. +pub trait TimeSource: Send + Sync { + /// Get [`SystemTime::now()`](SystemTime::now) or the mocked equivalent + fn now(&self) -> SystemTime; +} + +/// Default implementation of [`TimeSource`] +/// +/// Implements `now` by calling [`SystemTime::now()`](SystemTime::now). +pub struct StdSystemTime; + +impl TimeSource for StdSystemTime { + fn now(&self) -> SystemTime { + SystemTime::now() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/transport.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/transport.rs new file mode 100644 index 0000000000000000000000000000000000000000..0198e4504b48fcde2de57d25d4c9da39e253ee83 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/config/transport.rs @@ -0,0 +1,785 @@ +use std::{fmt, sync::Arc}; +#[cfg(feature = "qlog")] +use std::{io, sync::Mutex, time::Instant}; + +#[cfg(feature = "qlog")] +use qlog::streamer::QlogStreamer; + +#[cfg(feature = "qlog")] +use crate::QlogStream; +use crate::{ + Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, congestion, + connection::qlog::QlogSink, +}; + +/// Parameters governing the core QUIC state machine +/// +/// Default values should be suitable for most internet applications. Applications protocols which +/// forbid remotely-initiated streams should set `max_concurrent_bidi_streams` and +/// `max_concurrent_uni_streams` to zero. +/// +/// In some cases, performance or resource requirements can be improved by tuning these values to +/// suit a particular application and/or network connection. In particular, data window sizes can be +/// tuned for a particular expected round trip time, link capacity, and memory availability. Tuning +/// for higher bandwidths and latencies increases worst-case memory consumption, but does not impair +/// performance at lower bandwidths and latencies. The default configuration is tuned for a 100Mbps +/// link with a 100ms round trip time. +pub struct TransportConfig { + pub(crate) max_concurrent_bidi_streams: VarInt, + pub(crate) max_concurrent_uni_streams: VarInt, + pub(crate) max_idle_timeout: Option, + pub(crate) stream_receive_window: VarInt, + pub(crate) receive_window: VarInt, + pub(crate) send_window: u64, + pub(crate) send_fairness: bool, + + pub(crate) packet_threshold: u32, + pub(crate) time_threshold: f32, + pub(crate) initial_rtt: Duration, + pub(crate) initial_mtu: u16, + pub(crate) min_mtu: u16, + pub(crate) mtu_discovery_config: Option, + pub(crate) pad_to_mtu: bool, + pub(crate) ack_frequency_config: Option, + + pub(crate) persistent_congestion_threshold: u32, + pub(crate) keep_alive_interval: Option, + pub(crate) crypto_buffer_size: usize, + pub(crate) allow_spin: bool, + pub(crate) datagram_receive_buffer_size: Option, + pub(crate) datagram_send_buffer_size: usize, + #[cfg(test)] + pub(crate) deterministic_packet_numbers: bool, + + pub(crate) congestion_controller_factory: Arc, + + pub(crate) enable_segmentation_offload: bool, + + pub(crate) qlog_sink: QlogSink, +} + +impl TransportConfig { + /// Maximum number of incoming bidirectional streams that may be open concurrently + /// + /// Must be nonzero for the peer to open any bidirectional streams. + /// + /// Worst-case memory use is directly proportional to `max_concurrent_bidi_streams * + /// stream_receive_window`, with an upper bound proportional to `receive_window`. + pub fn max_concurrent_bidi_streams(&mut self, value: VarInt) -> &mut Self { + self.max_concurrent_bidi_streams = value; + self + } + + /// Variant of `max_concurrent_bidi_streams` affecting unidirectional streams + pub fn max_concurrent_uni_streams(&mut self, value: VarInt) -> &mut Self { + self.max_concurrent_uni_streams = value; + self + } + + /// Maximum duration of inactivity to accept before timing out the connection. + /// + /// The true idle timeout is the minimum of this and the peer's own max idle timeout. `None` + /// represents an infinite timeout. Defaults to 30 seconds. + /// + /// **WARNING**: If a peer or its network path malfunctions or acts maliciously, an infinite + /// idle timeout can result in permanently hung futures! + /// + /// ``` + /// # use std::{convert::TryInto, time::Duration}; + /// # use quinn_proto::{TransportConfig, VarInt, VarIntBoundsExceeded}; + /// # fn main() -> Result<(), VarIntBoundsExceeded> { + /// let mut config = TransportConfig::default(); + /// + /// // Set the idle timeout as `VarInt`-encoded milliseconds + /// config.max_idle_timeout(Some(VarInt::from_u32(10_000).into())); + /// + /// // Set the idle timeout as a `Duration` + /// config.max_idle_timeout(Some(Duration::from_secs(10).try_into()?)); + /// # Ok(()) + /// # } + /// ``` + pub fn max_idle_timeout(&mut self, value: Option) -> &mut Self { + self.max_idle_timeout = value.map(|t| t.0); + self + } + + /// Maximum number of bytes the peer may transmit without acknowledgement on any one stream + /// before becoming blocked. + /// + /// This should be set to at least the expected connection latency multiplied by the maximum + /// desired throughput. Setting this smaller than `receive_window` helps ensure that a single + /// stream doesn't monopolize receive buffers, which may otherwise occur if the application + /// chooses not to read from a large stream for a time while still requiring data on other + /// streams. + pub fn stream_receive_window(&mut self, value: VarInt) -> &mut Self { + self.stream_receive_window = value; + self + } + + /// Maximum number of bytes the peer may transmit across all streams of a connection before + /// becoming blocked. + /// + /// This should be set to at least the expected connection latency multiplied by the maximum + /// desired throughput. Larger values can be useful to allow maximum throughput within a + /// stream while another is blocked. + pub fn receive_window(&mut self, value: VarInt) -> &mut Self { + self.receive_window = value; + self + } + + /// Maximum number of bytes to transmit to a peer without acknowledgment + /// + /// Provides an upper bound on memory when communicating with peers that issue large amounts of + /// flow control credit. Endpoints that wish to handle large numbers of connections robustly + /// should take care to set this low enough to guarantee memory exhaustion does not occur if + /// every connection uses the entire window. + pub fn send_window(&mut self, value: u64) -> &mut Self { + self.send_window = value; + self + } + + /// Whether to implement fair queuing for send streams having the same priority. + /// + /// When enabled, connections schedule data from outgoing streams having the same priority in a + /// round-robin fashion. When disabled, streams are scheduled in the order they are written to. + /// + /// Note that this only affects streams with the same priority. Higher priority streams always + /// take precedence over lower priority streams. + /// + /// Disabling fairness can reduce fragmentation and protocol overhead for workloads that use + /// many small streams. + pub fn send_fairness(&mut self, value: bool) -> &mut Self { + self.send_fairness = value; + self + } + + /// Maximum reordering in packet number space before FACK style loss detection considers a + /// packet lost. Should not be less than 3, per RFC5681. + pub fn packet_threshold(&mut self, value: u32) -> &mut Self { + self.packet_threshold = value; + self + } + + /// Maximum reordering in time space before time based loss detection considers a packet lost, + /// as a factor of RTT + pub fn time_threshold(&mut self, value: f32) -> &mut Self { + self.time_threshold = value; + self + } + + /// The RTT used before an RTT sample is taken + pub fn initial_rtt(&mut self, value: Duration) -> &mut Self { + self.initial_rtt = value; + self + } + + /// The initial value to be used as the maximum UDP payload size before running MTU discovery + /// (see [`TransportConfig::mtu_discovery_config`]). + /// + /// Must be at least 1200, which is the default, and known to be safe for typical internet + /// applications. Larger values are more efficient, but increase the risk of packet loss due to + /// exceeding the network path's IP MTU. If the provided value is higher than what the network + /// path actually supports, packet loss will eventually trigger black hole detection and bring + /// it down to [`TransportConfig::min_mtu`]. + pub fn initial_mtu(&mut self, value: u16) -> &mut Self { + self.initial_mtu = value.max(INITIAL_MTU); + self + } + + pub(crate) fn get_initial_mtu(&self) -> u16 { + self.initial_mtu.max(self.min_mtu) + } + + /// The maximum UDP payload size guaranteed to be supported by the network. + /// + /// Must be at least 1200, which is the default, and lower than or equal to + /// [`TransportConfig::initial_mtu`]. + /// + /// Real-world MTUs can vary according to ISP, VPN, and properties of intermediate network links + /// outside of either endpoint's control. Extreme care should be used when raising this value + /// outside of private networks where these factors are fully controlled. If the provided value + /// is higher than what the network path actually supports, the result will be unpredictable and + /// catastrophic packet loss, without a possibility of repair. Prefer + /// [`TransportConfig::initial_mtu`] together with + /// [`TransportConfig::mtu_discovery_config`] to set a maximum UDP payload size that robustly + /// adapts to the network. + pub fn min_mtu(&mut self, value: u16) -> &mut Self { + self.min_mtu = value.max(INITIAL_MTU); + self + } + + /// Specifies the MTU discovery config (see [`MtuDiscoveryConfig`] for details). + /// + /// Enabled by default. + pub fn mtu_discovery_config(&mut self, value: Option) -> &mut Self { + self.mtu_discovery_config = value; + self + } + + /// Pad UDP datagrams carrying application data to current maximum UDP payload size + /// + /// Disabled by default. UDP datagrams containing loss probes are exempt from padding. + /// + /// Enabling this helps mitigate traffic analysis by network observers, but it increases + /// bandwidth usage. Without this mitigation precise plain text size of application datagrams as + /// well as the total size of stream write bursts can be inferred by observers under certain + /// conditions. This analysis requires either an uncongested connection or application datagrams + /// too large to be coalesced. + pub fn pad_to_mtu(&mut self, value: bool) -> &mut Self { + self.pad_to_mtu = value; + self + } + + /// Specifies the ACK frequency config (see [`AckFrequencyConfig`] for details) + /// + /// The provided configuration will be ignored if the peer does not support the acknowledgement + /// frequency QUIC extension. + /// + /// Defaults to `None`, which disables controlling the peer's acknowledgement frequency. Even + /// if set to `None`, the local side still supports the acknowledgement frequency QUIC + /// extension and may use it in other ways. + pub fn ack_frequency_config(&mut self, value: Option) -> &mut Self { + self.ack_frequency_config = value; + self + } + + /// Number of consecutive PTOs after which network is considered to be experiencing persistent congestion. + pub fn persistent_congestion_threshold(&mut self, value: u32) -> &mut Self { + self.persistent_congestion_threshold = value; + self + } + + /// Period of inactivity before sending a keep-alive packet + /// + /// Keep-alive packets prevent an inactive but otherwise healthy connection from timing out. + /// + /// `None` to disable, which is the default. Only one side of any given connection needs keep-alive + /// enabled for the connection to be preserved. Must be set lower than the idle_timeout of both + /// peers to be effective. + pub fn keep_alive_interval(&mut self, value: Option) -> &mut Self { + self.keep_alive_interval = value; + self + } + + /// Maximum quantity of out-of-order crypto layer data to buffer + pub fn crypto_buffer_size(&mut self, value: usize) -> &mut Self { + self.crypto_buffer_size = value; + self + } + + /// Whether the implementation is permitted to set the spin bit on this connection + /// + /// This allows passive observers to easily judge the round trip time of a connection, which can + /// be useful for network administration but sacrifices a small amount of privacy. + pub fn allow_spin(&mut self, value: bool) -> &mut Self { + self.allow_spin = value; + self + } + + /// Maximum number of incoming application datagram bytes to buffer, or None to disable + /// incoming datagrams + /// + /// The peer is forbidden to send single datagrams larger than this size. If the aggregate size + /// of all datagrams that have been received from the peer but not consumed by the application + /// exceeds this value, old datagrams are dropped until it is no longer exceeded. + pub fn datagram_receive_buffer_size(&mut self, value: Option) -> &mut Self { + self.datagram_receive_buffer_size = value; + self + } + + /// Maximum number of outgoing application datagram bytes to buffer + /// + /// While datagrams are sent ASAP, it is possible for an application to generate data faster + /// than the link, or even the underlying hardware, can transmit them. This limits the amount of + /// memory that may be consumed in that case. When the send buffer is full and a new datagram is + /// sent, older datagrams are dropped until sufficient space is available. + pub fn datagram_send_buffer_size(&mut self, value: usize) -> &mut Self { + self.datagram_send_buffer_size = value; + self + } + + /// Whether to force every packet number to be used + /// + /// By default, packet numbers are occasionally skipped to ensure peers aren't ACKing packets + /// before they see them. + #[cfg(test)] + pub(crate) fn deterministic_packet_numbers(&mut self, enabled: bool) -> &mut Self { + self.deterministic_packet_numbers = enabled; + self + } + + /// How to construct new `congestion::Controller`s + /// + /// Typically the refcounted configuration of a `congestion::Controller`, + /// e.g. a `congestion::NewRenoConfig`. + /// + /// # Example + /// ``` + /// # use quinn_proto::*; use std::sync::Arc; + /// let mut config = TransportConfig::default(); + /// config.congestion_controller_factory(Arc::new(congestion::NewRenoConfig::default())); + /// ``` + pub fn congestion_controller_factory( + &mut self, + factory: Arc, + ) -> &mut Self { + self.congestion_controller_factory = factory; + self + } + + /// Whether to use "Generic Segmentation Offload" to accelerate transmits, when supported by the + /// environment + /// + /// Defaults to `true`. + /// + /// GSO dramatically reduces CPU consumption when sending large numbers of packets with the same + /// headers, such as when transmitting bulk data on a connection. However, it is not supported + /// by all network interface drivers or packet inspection tools. `quinn-udp` will attempt to + /// disable GSO automatically when unavailable, but this can lead to spurious packet loss at + /// startup, temporarily degrading performance. + pub fn enable_segmentation_offload(&mut self, enabled: bool) -> &mut Self { + self.enable_segmentation_offload = enabled; + self + } + + /// qlog capture configuration to use for a particular connection + #[cfg(feature = "qlog")] + pub fn qlog_stream(&mut self, stream: Option) -> &mut Self { + self.qlog_sink = stream.into(); + self + } +} + +impl Default for TransportConfig { + fn default() -> Self { + const EXPECTED_RTT: u32 = 100; // ms + const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s + // Window size needed to avoid pipeline + // stalls + const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT; + + Self { + max_concurrent_bidi_streams: 100u32.into(), + max_concurrent_uni_streams: 100u32.into(), + // 30 second default recommended by RFC 9308 § 3.2 + max_idle_timeout: Some(VarInt(30_000)), + stream_receive_window: STREAM_RWND.into(), + receive_window: VarInt::MAX, + send_window: (8 * STREAM_RWND).into(), + send_fairness: true, + + packet_threshold: 3, + time_threshold: 9.0 / 8.0, + initial_rtt: Duration::from_millis(333), // per spec, intentionally distinct from EXPECTED_RTT + initial_mtu: INITIAL_MTU, + min_mtu: INITIAL_MTU, + mtu_discovery_config: Some(MtuDiscoveryConfig::default()), + pad_to_mtu: false, + ack_frequency_config: None, + + persistent_congestion_threshold: 3, + keep_alive_interval: None, + crypto_buffer_size: 16 * 1024, + allow_spin: true, + datagram_receive_buffer_size: Some(STREAM_RWND as usize), + datagram_send_buffer_size: 1024 * 1024, + #[cfg(test)] + deterministic_packet_numbers: false, + + congestion_controller_factory: Arc::new(congestion::CubicConfig::default()), + + enable_segmentation_offload: true, + + qlog_sink: QlogSink::default(), + } + } +} + +impl fmt::Debug for TransportConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self { + max_concurrent_bidi_streams, + max_concurrent_uni_streams, + max_idle_timeout, + stream_receive_window, + receive_window, + send_window, + send_fairness, + packet_threshold, + time_threshold, + initial_rtt, + initial_mtu, + min_mtu, + mtu_discovery_config, + pad_to_mtu, + ack_frequency_config, + persistent_congestion_threshold, + keep_alive_interval, + crypto_buffer_size, + allow_spin, + datagram_receive_buffer_size, + datagram_send_buffer_size, + #[cfg(test)] + deterministic_packet_numbers: _, + congestion_controller_factory: _, + enable_segmentation_offload, + qlog_sink, + } = self; + let mut s = fmt.debug_struct("TransportConfig"); + + s.field("max_concurrent_bidi_streams", max_concurrent_bidi_streams) + .field("max_concurrent_uni_streams", max_concurrent_uni_streams) + .field("max_idle_timeout", max_idle_timeout) + .field("stream_receive_window", stream_receive_window) + .field("receive_window", receive_window) + .field("send_window", send_window) + .field("send_fairness", send_fairness) + .field("packet_threshold", packet_threshold) + .field("time_threshold", time_threshold) + .field("initial_rtt", initial_rtt) + .field("initial_mtu", initial_mtu) + .field("min_mtu", min_mtu) + .field("mtu_discovery_config", mtu_discovery_config) + .field("pad_to_mtu", pad_to_mtu) + .field("ack_frequency_config", ack_frequency_config) + .field( + "persistent_congestion_threshold", + persistent_congestion_threshold, + ) + .field("keep_alive_interval", keep_alive_interval) + .field("crypto_buffer_size", crypto_buffer_size) + .field("allow_spin", allow_spin) + .field("datagram_receive_buffer_size", datagram_receive_buffer_size) + .field("datagram_send_buffer_size", datagram_send_buffer_size) + // congestion_controller_factory not debug + .field("enable_segmentation_offload", enable_segmentation_offload); + if cfg!(feature = "qlog") { + s.field("qlog_stream", &qlog_sink.is_enabled()); + } + + s.finish_non_exhaustive() + } +} + +/// Parameters for controlling the peer's acknowledgement frequency +/// +/// The parameters provided in this config will be sent to the peer at the beginning of the +/// connection, so it can take them into account when sending acknowledgements (see each parameter's +/// description for details on how it influences acknowledgement frequency). +/// +/// Quinn's implementation follows the fourth draft of the +/// [QUIC Acknowledgement Frequency extension](https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency-04). +/// The defaults produce behavior slightly different than the behavior without this extension, +/// because they change the way reordered packets are handled (see +/// [`AckFrequencyConfig::reordering_threshold`] for details). +#[derive(Clone, Debug)] +pub struct AckFrequencyConfig { + pub(crate) ack_eliciting_threshold: VarInt, + pub(crate) max_ack_delay: Option, + pub(crate) reordering_threshold: VarInt, +} + +impl AckFrequencyConfig { + /// The ack-eliciting threshold we will request the peer to use + /// + /// This threshold represents the number of ack-eliciting packets an endpoint may receive + /// without immediately sending an ACK. + /// + /// The remote peer should send at least one ACK frame when more than this number of + /// ack-eliciting packets have been received. A value of 0 results in a receiver immediately + /// acknowledging every ack-eliciting packet. + /// + /// Defaults to 1, which sends ACK frames for every other ack-eliciting packet. + pub fn ack_eliciting_threshold(&mut self, value: VarInt) -> &mut Self { + self.ack_eliciting_threshold = value; + self + } + + /// The `max_ack_delay` we will request the peer to use + /// + /// This parameter represents the maximum amount of time that an endpoint waits before sending + /// an ACK when the ack-eliciting threshold hasn't been reached. + /// + /// The effective `max_ack_delay` will be clamped to be at least the peer's `min_ack_delay` + /// transport parameter, and at most the greater of the current path RTT or 25ms. + /// + /// Defaults to `None`, in which case the peer's original `max_ack_delay` will be used, as + /// obtained from its transport parameters. + pub fn max_ack_delay(&mut self, value: Option) -> &mut Self { + self.max_ack_delay = value; + self + } + + /// The reordering threshold we will request the peer to use + /// + /// This threshold represents the amount of out-of-order packets that will trigger an endpoint + /// to send an ACK, without waiting for `ack_eliciting_threshold` to be exceeded or for + /// `max_ack_delay` to be elapsed. + /// + /// A value of 0 indicates out-of-order packets do not elicit an immediate ACK. A value of 1 + /// immediately acknowledges any packets that are received out of order (this is also the + /// behavior when the extension is disabled). + /// + /// It is recommended to set this value to [`TransportConfig::packet_threshold`] minus one. + /// Since the default value for [`TransportConfig::packet_threshold`] is 3, this value defaults + /// to 2. + pub fn reordering_threshold(&mut self, value: VarInt) -> &mut Self { + self.reordering_threshold = value; + self + } +} + +impl Default for AckFrequencyConfig { + fn default() -> Self { + Self { + ack_eliciting_threshold: VarInt(1), + max_ack_delay: None, + reordering_threshold: VarInt(2), + } + } +} + +/// Configuration for qlog trace logging +#[cfg(feature = "qlog")] +pub struct QlogConfig { + writer: Option>, + title: Option, + description: Option, + start_time: Instant, +} + +#[cfg(feature = "qlog")] +impl QlogConfig { + /// Where to write a qlog `TraceSeq` + pub fn writer(&mut self, writer: Box) -> &mut Self { + self.writer = Some(writer); + self + } + + /// Title to record in the qlog capture + pub fn title(&mut self, title: Option) -> &mut Self { + self.title = title; + self + } + + /// Description to record in the qlog capture + pub fn description(&mut self, description: Option) -> &mut Self { + self.description = description; + self + } + + /// Epoch qlog event times are recorded relative to + pub fn start_time(&mut self, start_time: Instant) -> &mut Self { + self.start_time = start_time; + self + } + + /// Construct the [`QlogStream`] described by this configuration + pub fn into_stream(self) -> Option { + use tracing::warn; + + let writer = self.writer?; + let trace = qlog::TraceSeq::new( + qlog::VantagePoint { + name: None, + ty: qlog::VantagePointType::Unknown, + flow: None, + }, + self.title.clone(), + self.description.clone(), + Some(qlog::Configuration { + time_offset: Some(0.0), + original_uris: None, + }), + None, + ); + + let mut streamer = QlogStreamer::new( + qlog::QLOG_VERSION.into(), + self.title, + self.description, + None, + self.start_time, + trace, + qlog::events::EventImportance::Core, + writer, + ); + + match streamer.start_log() { + Ok(()) => Some(QlogStream(Arc::new(Mutex::new(streamer)))), + Err(e) => { + warn!("could not initialize endpoint qlog streamer: {e}"); + None + } + } + } +} + +#[cfg(feature = "qlog")] +impl Default for QlogConfig { + fn default() -> Self { + Self { + writer: None, + title: None, + description: None, + start_time: Instant::now(), + } + } +} + +/// Parameters governing MTU discovery. +/// +/// # The why of MTU discovery +/// +/// By design, QUIC ensures during the handshake that the network path between the client and the +/// server is able to transmit unfragmented UDP packets with a body of 1200 bytes. In other words, +/// once the connection is established, we know that the network path's maximum transmission unit +/// (MTU) is of at least 1200 bytes (plus IP and UDP headers). Because of this, a QUIC endpoint can +/// split outgoing data in packets of 1200 bytes, with confidence that the network will be able to +/// deliver them (if the endpoint were to send bigger packets, they could prove too big and end up +/// being dropped). +/// +/// There is, however, a significant overhead associated to sending a packet. If the same +/// information can be sent in fewer packets, that results in higher throughput. The amount of +/// packets that need to be sent is inversely proportional to the MTU: the higher the MTU, the +/// bigger the packets that can be sent, and the fewer packets that are needed to transmit a given +/// amount of bytes. +/// +/// Most networks have an MTU higher than 1200. Through MTU discovery, endpoints can detect the +/// path's MTU and, if it turns out to be higher, start sending bigger packets. +/// +/// # MTU discovery internals +/// +/// Quinn implements MTU discovery through DPLPMTUD (Datagram Packetization Layer Path MTU +/// Discovery), described in [section 14.3 of RFC +/// 9000](https://www.rfc-editor.org/rfc/rfc9000.html#section-14.3). This method consists of sending +/// QUIC packets padded to a particular size (called PMTU probes), and waiting to see if the remote +/// peer responds with an ACK. If an ACK is received, that means the probe arrived at the remote +/// peer, which in turn means that the network path's MTU is of at least the packet's size. If the +/// probe is lost, it is sent another 2 times before concluding that the MTU is lower than the +/// packet's size. +/// +/// MTU discovery runs on a schedule (e.g. every 600 seconds) specified through +/// [`MtuDiscoveryConfig::interval`]. The first run happens right after the handshake, and +/// subsequent discoveries are scheduled to run when the interval has elapsed, starting from the +/// last time when MTU discovery completed. +/// +/// Since the search space for MTUs is quite big (the smallest possible MTU is 1200, and the highest +/// is 65527), Quinn performs a binary search to keep the number of probes as low as possible. The +/// lower bound of the search is equal to [`TransportConfig::initial_mtu`] in the +/// initial MTU discovery run, and is equal to the currently discovered MTU in subsequent runs. The +/// upper bound is determined by the minimum of [`MtuDiscoveryConfig::upper_bound`] and the +/// `max_udp_payload_size` transport parameter received from the peer during the handshake. +/// +/// # Black hole detection +/// +/// If, at some point, the network path no longer accepts packets of the detected size, packet loss +/// will eventually trigger black hole detection and reset the detected MTU to 1200. In that case, +/// MTU discovery will be triggered after [`MtuDiscoveryConfig::black_hole_cooldown`] (ignoring the +/// timer that was set based on [`MtuDiscoveryConfig::interval`]). +/// +/// # Interaction between peers +/// +/// There is no guarantee that the MTU on the path between A and B is the same as the MTU of the +/// path between B and A. Therefore, each peer in the connection needs to run MTU discovery +/// independently in order to discover the path's MTU. +#[derive(Clone, Debug)] +pub struct MtuDiscoveryConfig { + pub(crate) interval: Duration, + pub(crate) upper_bound: u16, + pub(crate) minimum_change: u16, + pub(crate) black_hole_cooldown: Duration, +} + +impl MtuDiscoveryConfig { + /// Specifies the time to wait after completing MTU discovery before starting a new MTU + /// discovery run. + /// + /// Defaults to 600 seconds, as recommended by [RFC + /// 8899](https://www.rfc-editor.org/rfc/rfc8899). + pub fn interval(&mut self, value: Duration) -> &mut Self { + self.interval = value; + self + } + + /// Specifies the upper bound to the max UDP payload size that MTU discovery will search for. + /// + /// Defaults to 1452, to stay within Ethernet's MTU when using IPv4 and IPv6. The highest + /// allowed value is 65527, which corresponds to the maximum permitted UDP payload on IPv6. + /// + /// It is safe to use an arbitrarily high upper bound, regardless of the network path's MTU. The + /// only drawback is that MTU discovery might take more time to finish. + pub fn upper_bound(&mut self, value: u16) -> &mut Self { + self.upper_bound = value.min(MAX_UDP_PAYLOAD); + self + } + + /// Specifies the amount of time that MTU discovery should wait after a black hole was detected + /// before running again. Defaults to one minute. + /// + /// Black hole detection can be spuriously triggered in case of congestion, so it makes sense to + /// try MTU discovery again after a short period of time. + pub fn black_hole_cooldown(&mut self, value: Duration) -> &mut Self { + self.black_hole_cooldown = value; + self + } + + /// Specifies the minimum MTU change to stop the MTU discovery phase. + /// Defaults to 20. + pub fn minimum_change(&mut self, value: u16) -> &mut Self { + self.minimum_change = value; + self + } +} + +impl Default for MtuDiscoveryConfig { + fn default() -> Self { + Self { + interval: Duration::from_secs(600), + upper_bound: 1452, + black_hole_cooldown: Duration::from_secs(60), + minimum_change: 20, + } + } +} + +/// Maximum duration of inactivity to accept before timing out the connection +/// +/// This wraps an underlying [`VarInt`], representing the duration in milliseconds. Values can be +/// constructed by converting directly from `VarInt`, or using `TryFrom`. +/// +/// ``` +/// # use std::{convert::TryFrom, time::Duration}; +/// # use quinn_proto::{IdleTimeout, VarIntBoundsExceeded, VarInt}; +/// # fn main() -> Result<(), VarIntBoundsExceeded> { +/// // A `VarInt`-encoded value in milliseconds +/// let timeout = IdleTimeout::from(VarInt::from_u32(10_000)); +/// +/// // Try to convert a `Duration` into a `VarInt`-encoded timeout +/// let timeout = IdleTimeout::try_from(Duration::from_secs(10))?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Default, Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct IdleTimeout(VarInt); + +impl From for IdleTimeout { + fn from(inner: VarInt) -> Self { + Self(inner) + } +} + +impl std::convert::TryFrom for IdleTimeout { + type Error = VarIntBoundsExceeded; + + fn try_from(timeout: Duration) -> Result { + let inner = VarInt::try_from(timeout.as_millis())?; + Ok(Self(inner)) + } +} + +impl fmt::Debug for IdleTimeout { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion.rs new file mode 100644 index 0000000000000000000000000000000000000000..391e4d19c071d7f9cb0a54d74603f5f4610eed3a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion.rs @@ -0,0 +1,105 @@ +//! Logic for controlling the rate at which data is sent + +use crate::Instant; +use crate::connection::RttEstimator; +use std::any::Any; +use std::sync::Arc; + +mod bbr; +mod cubic; +mod new_reno; + +pub use bbr::{Bbr, BbrConfig}; +pub use cubic::{Cubic, CubicConfig}; +pub use new_reno::{NewReno, NewRenoConfig}; + +/// Common interface for different congestion controllers +pub trait Controller: Send + Sync { + /// One or more packets were just sent + #[allow(unused_variables)] + fn on_sent(&mut self, now: Instant, bytes: u64, last_packet_number: u64) {} + + /// Packet deliveries were confirmed + /// + /// `app_limited` indicates whether the connection was blocked on outgoing + /// application data prior to receiving these acknowledgements. + #[allow(unused_variables)] + fn on_ack( + &mut self, + now: Instant, + sent: Instant, + bytes: u64, + app_limited: bool, + rtt: &RttEstimator, + ) { + } + + /// Packets are acked in batches, all with the same `now` argument. This indicates one of those batches has completed. + #[allow(unused_variables)] + fn on_end_acks( + &mut self, + now: Instant, + in_flight: u64, + app_limited: bool, + largest_packet_num_acked: Option, + ) { + } + + /// Packets were deemed lost or marked congested + /// + /// `in_persistent_congestion` indicates whether all packets sent within the persistent + /// congestion threshold period ending when the most recent packet in this batch was sent were + /// lost. + /// `lost_bytes` indicates how many bytes were lost. This value will be 0 for ECN triggers. + fn on_congestion_event( + &mut self, + now: Instant, + sent: Instant, + is_persistent_congestion: bool, + lost_bytes: u64, + ); + + /// The known MTU for the current network path has been updated + fn on_mtu_update(&mut self, new_mtu: u16); + + /// Number of ack-eliciting bytes that may be in flight + fn window(&self) -> u64; + + /// Retrieve implementation-specific metrics used to populate `qlog` traces when they are enabled + fn metrics(&self) -> ControllerMetrics { + ControllerMetrics { + congestion_window: self.window(), + ssthresh: None, + pacing_rate: None, + } + } + + /// Duplicate the controller's state + fn clone_box(&self) -> Box; + + /// Initial congestion window + fn initial_window(&self) -> u64; + + /// Returns Self for use in down-casting to extract implementation details + fn into_any(self: Box) -> Box; +} + +/// Common congestion controller metrics +#[derive(Default)] +#[non_exhaustive] +pub struct ControllerMetrics { + /// Congestion window (bytes) + pub congestion_window: u64, + /// Slow start threshold (bytes) + pub ssthresh: Option, + /// Pacing rate (bits/s) + pub pacing_rate: Option, +} + +/// Constructs controllers on demand +pub trait ControllerFactory { + /// Construct a fresh `Controller` + fn build(self: Arc, now: Instant, current_mtu: u16) -> Box; +} + +const BASE_DATAGRAM_SIZE: u64 = 1200; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/bw_estimation.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/bw_estimation.rs new file mode 100644 index 0000000000000000000000000000000000000000..84ea4e68755ac13b23dddf220c1439e5b8dcf729 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/bw_estimation.rs @@ -0,0 +1,101 @@ +use std::fmt::{Debug, Display, Formatter}; + +use super::min_max::MinMax; +use crate::{Duration, Instant}; + +#[derive(Clone, Debug, Default)] +pub(crate) struct BandwidthEstimation { + total_acked: u64, + prev_total_acked: u64, + acked_time: Option, + prev_acked_time: Option, + total_sent: u64, + prev_total_sent: u64, + sent_time: Option, + prev_sent_time: Option, + max_filter: MinMax, + acked_at_last_window: u64, +} + +impl BandwidthEstimation { + pub(crate) fn on_sent(&mut self, now: Instant, bytes: u64) { + self.prev_total_sent = self.total_sent; + self.total_sent += bytes; + self.prev_sent_time = self.sent_time; + self.sent_time = Some(now); + } + + pub(crate) fn on_ack( + &mut self, + now: Instant, + _sent: Instant, + bytes: u64, + round: u64, + app_limited: bool, + ) { + self.prev_total_acked = self.total_acked; + self.total_acked += bytes; + self.prev_acked_time = self.acked_time; + self.acked_time = Some(now); + + let prev_sent_time = match self.prev_sent_time { + Some(prev_sent_time) => prev_sent_time, + None => return, + }; + + let send_rate = match self.sent_time { + Some(sent_time) if sent_time > prev_sent_time => Self::bw_from_delta( + self.total_sent - self.prev_total_sent, + sent_time - prev_sent_time, + ) + .unwrap_or(0), + _ => u64::MAX, // will take the min of send and ack, so this is just a skip + }; + + let ack_rate = match self.prev_acked_time { + Some(prev_acked_time) => Self::bw_from_delta( + self.total_acked - self.prev_total_acked, + now - prev_acked_time, + ) + .unwrap_or(0), + None => 0, + }; + + let bandwidth = send_rate.min(ack_rate); + if !app_limited && self.max_filter.get() < bandwidth { + self.max_filter.update_max(round, bandwidth); + } + } + + pub(crate) fn bytes_acked_this_window(&self) -> u64 { + self.total_acked - self.acked_at_last_window + } + + pub(crate) fn end_acks(&mut self, _current_round: u64, _app_limited: bool) { + self.acked_at_last_window = self.total_acked; + } + + pub(crate) fn get_estimate(&self) -> u64 { + self.max_filter.get() + } + + pub(crate) const fn bw_from_delta(bytes: u64, delta: Duration) -> Option { + let window_duration_ns = delta.as_nanos(); + if window_duration_ns == 0 { + return None; + } + let b_ns = bytes * 1_000_000_000; + let bytes_per_second = b_ns / (window_duration_ns as u64); + Some(bytes_per_second) + } +} + +impl Display for BandwidthEstimation { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{:.3} MB/s", + self.get_estimate() as f32 / (1024 * 1024) as f32 + ) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/min_max.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/min_max.rs new file mode 100644 index 0000000000000000000000000000000000000000..97a701bc370db2dadc779ae4f6b20985ff668db4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/min_max.rs @@ -0,0 +1,152 @@ +/* + * Based on Google code released under BSD license here: + * https://groups.google.com/forum/#!topic/bbr-dev/3RTgkzi5ZD8 + */ + +/* + * Kathleen Nichols' algorithm for tracking the minimum (or maximum) + * value of a data stream over some fixed time interval. (E.g., + * the minimum RTT over the past five minutes.) It uses constant + * space and constant time per update yet almost always delivers + * the same minimum as an implementation that has to keep all the + * data in the window. + * + * The algorithm keeps track of the best, 2nd best & 3rd best min + * values, maintaining an invariant that the measurement time of + * the n'th best >= n-1'th best. It also makes sure that the three + * values are widely separated in the time window since that bounds + * the worse case error when that data is monotonically increasing + * over the window. + * + * Upon getting a new min, we can forget everything earlier because + * it has no value - the new min is <= everything else in the window + * by definition and it samples the most recent. So we restart fresh on + * every new min and overwrites 2nd & 3rd choices. The same property + * holds for 2nd & 3rd best. + */ + +use std::fmt::Debug; + +#[derive(Copy, Clone, Debug)] +pub(super) struct MinMax { + /// round count, not a timestamp + window: u64, + samples: [MinMaxSample; 3], +} + +impl MinMax { + pub(super) fn get(&self) -> u64 { + self.samples[0].value + } + + fn fill(&mut self, sample: MinMaxSample) { + self.samples.fill(sample); + } + + pub(super) fn reset(&mut self) { + self.fill(Default::default()) + } + + /// update_min is also defined in the original source, but removed here since it is not used. + pub(super) fn update_max(&mut self, current_round: u64, measurement: u64) { + let sample = MinMaxSample { + time: current_round, + value: measurement, + }; + + if self.samples[0].value == 0 /* uninitialised */ + || /* found new max? */ sample.value >= self.samples[0].value + || /* nothing left in window? */ sample.time - self.samples[2].time > self.window + { + self.fill(sample); /* forget earlier samples */ + return; + } + + if sample.value >= self.samples[1].value { + self.samples[2] = sample; + self.samples[1] = sample; + } else if sample.value >= self.samples[2].value { + self.samples[2] = sample; + } + + self.subwin_update(sample); + } + + /* As time advances, update the 1st, 2nd, and 3rd choices. */ + fn subwin_update(&mut self, sample: MinMaxSample) { + let dt = sample.time - self.samples[0].time; + if dt > self.window { + /* + * Passed entire window without a new sample so make 2nd + * choice the new sample & 3rd choice the new 2nd choice. + * we may have to iterate this since our 2nd choice + * may also be outside the window (we checked on entry + * that the third choice was in the window). + */ + self.samples[0] = self.samples[1]; + self.samples[1] = self.samples[2]; + self.samples[2] = sample; + if sample.time - self.samples[0].time > self.window { + self.samples[0] = self.samples[1]; + self.samples[1] = self.samples[2]; + self.samples[2] = sample; + } + } else if self.samples[1].time == self.samples[0].time && dt > self.window / 4 { + /* + * We've passed a quarter of the window without a new sample + * so take a 2nd choice from the 2nd quarter of the window. + */ + self.samples[2] = sample; + self.samples[1] = sample; + } else if self.samples[2].time == self.samples[1].time && dt > self.window / 2 { + /* + * We've passed half the window without finding a new sample + * so take a 3rd choice from the last half of the window + */ + self.samples[2] = sample; + } + } +} + +impl Default for MinMax { + fn default() -> Self { + Self { + window: 10, + samples: [Default::default(); 3], + } + } +} + +#[derive(Debug, Copy, Clone, Default)] +struct MinMaxSample { + /// round number, not a timestamp + time: u64, + value: u64, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test() { + let round = 25; + let mut min_max = MinMax::default(); + min_max.update_max(round + 1, 100); + assert_eq!(100, min_max.get()); + min_max.update_max(round + 3, 120); + assert_eq!(120, min_max.get()); + min_max.update_max(round + 5, 160); + assert_eq!(160, min_max.get()); + min_max.update_max(round + 7, 100); + assert_eq!(160, min_max.get()); + min_max.update_max(round + 10, 100); + assert_eq!(160, min_max.get()); + min_max.update_max(round + 14, 100); + assert_eq!(160, min_max.get()); + min_max.update_max(round + 16, 100); + assert_eq!(100, min_max.get()); + min_max.update_max(round + 18, 130); + assert_eq!(130, min_max.get()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..97fb3bffcdc8baa1fe91c27a49ebfe8bd7b6c8aa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/bbr/mod.rs @@ -0,0 +1,646 @@ +use std::any::Any; +use std::fmt::Debug; +use std::sync::Arc; + +use rand::{Rng, SeedableRng}; + +use crate::congestion::ControllerMetrics; +use crate::congestion::bbr::bw_estimation::BandwidthEstimation; +use crate::congestion::bbr::min_max::MinMax; +use crate::connection::RttEstimator; +use crate::{Duration, Instant}; + +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; + +mod bw_estimation; +mod min_max; + +/// Experimental! Use at your own risk. +/// +/// Aims for reduced buffer bloat and improved performance over high bandwidth-delay product networks. +/// Based on google's quiche implementation +/// of BBR . +/// More discussion and links at . +#[derive(Debug, Clone)] +pub struct Bbr { + config: Arc, + current_mtu: u64, + max_bandwidth: BandwidthEstimation, + acked_bytes: u64, + mode: Mode, + loss_state: LossState, + recovery_state: RecoveryState, + recovery_window: u64, + is_at_full_bandwidth: bool, + pacing_gain: f32, + high_gain: f32, + drain_gain: f32, + cwnd_gain: f32, + high_cwnd_gain: f32, + last_cycle_start: Option, + current_cycle_offset: u8, + init_cwnd: u64, + min_cwnd: u64, + prev_in_flight_count: u64, + exit_probe_rtt_at: Option, + probe_rtt_last_started_at: Option, + min_rtt: Duration, + exiting_quiescence: bool, + pacing_rate: u64, + max_acked_packet_number: u64, + max_sent_packet_number: u64, + end_recovery_at_packet_number: u64, + cwnd: u64, + current_round_trip_end_packet_number: u64, + round_count: u64, + bw_at_last_round: u64, + round_wo_bw_gain: u64, + ack_aggregation: AckAggregationState, + random_number_generator: rand::rngs::StdRng, +} + +impl Bbr { + /// Construct a state using the given `config` and current time `now` + pub fn new(config: Arc, current_mtu: u16) -> Self { + let initial_window = config.initial_window; + Self { + config, + current_mtu: current_mtu as u64, + max_bandwidth: BandwidthEstimation::default(), + acked_bytes: 0, + mode: Mode::Startup, + loss_state: Default::default(), + recovery_state: RecoveryState::NotInRecovery, + recovery_window: 0, + is_at_full_bandwidth: false, + pacing_gain: K_DEFAULT_HIGH_GAIN, + high_gain: K_DEFAULT_HIGH_GAIN, + drain_gain: 1.0 / K_DEFAULT_HIGH_GAIN, + cwnd_gain: K_DEFAULT_HIGH_GAIN, + high_cwnd_gain: K_DEFAULT_HIGH_GAIN, + last_cycle_start: None, + current_cycle_offset: 0, + init_cwnd: initial_window, + min_cwnd: calculate_min_window(current_mtu as u64), + prev_in_flight_count: 0, + exit_probe_rtt_at: None, + probe_rtt_last_started_at: None, + min_rtt: Default::default(), + exiting_quiescence: false, + pacing_rate: 0, + max_acked_packet_number: 0, + max_sent_packet_number: 0, + end_recovery_at_packet_number: 0, + cwnd: initial_window, + current_round_trip_end_packet_number: 0, + round_count: 0, + bw_at_last_round: 0, + round_wo_bw_gain: 0, + ack_aggregation: AckAggregationState::default(), + random_number_generator: rand::rngs::StdRng::from_os_rng(), + } + } + + fn enter_startup_mode(&mut self) { + self.mode = Mode::Startup; + self.pacing_gain = self.high_gain; + self.cwnd_gain = self.high_cwnd_gain; + } + + fn enter_probe_bandwidth_mode(&mut self, now: Instant) { + self.mode = Mode::ProbeBw; + self.cwnd_gain = K_DERIVED_HIGH_CWNDGAIN; + self.last_cycle_start = Some(now); + // Pick a random offset for the gain cycle out of {0, 2..7} range. 1 is + // excluded because in that case increased gain and decreased gain would not + // follow each other. + let mut rand_index = self + .random_number_generator + .random_range(0..K_PACING_GAIN.len() as u8 - 1); + if rand_index >= 1 { + rand_index += 1; + } + self.current_cycle_offset = rand_index; + self.pacing_gain = K_PACING_GAIN[rand_index as usize]; + } + + fn update_recovery_state(&mut self, is_round_start: bool) { + // Exit recovery when there are no losses for a round. + if self.loss_state.has_losses() { + self.end_recovery_at_packet_number = self.max_sent_packet_number; + } + match self.recovery_state { + // Enter conservation on the first loss. + RecoveryState::NotInRecovery if self.loss_state.has_losses() => { + self.recovery_state = RecoveryState::Conservation; + // This will cause the |recovery_window| to be set to the + // correct value in CalculateRecoveryWindow(). + self.recovery_window = 0; + // Since the conservation phase is meant to be lasting for a whole + // round, extend the current round as if it were started right now. + self.current_round_trip_end_packet_number = self.max_sent_packet_number; + } + RecoveryState::Growth | RecoveryState::Conservation => { + if self.recovery_state == RecoveryState::Conservation && is_round_start { + self.recovery_state = RecoveryState::Growth; + } + // Exit recovery if appropriate. + if !self.loss_state.has_losses() + && self.max_acked_packet_number > self.end_recovery_at_packet_number + { + self.recovery_state = RecoveryState::NotInRecovery; + } + } + _ => {} + } + } + + fn update_gain_cycle_phase(&mut self, now: Instant, in_flight: u64) { + // In most cases, the cycle is advanced after an RTT passes. + let mut should_advance_gain_cycling = self + .last_cycle_start + .map(|last_cycle_start| now.duration_since(last_cycle_start) > self.min_rtt) + .unwrap_or(false); + // If the pacing gain is above 1.0, the connection is trying to probe the + // bandwidth by increasing the number of bytes in flight to at least + // pacing_gain * BDP. Make sure that it actually reaches the target, as + // long as there are no losses suggesting that the buffers are not able to + // hold that much. + if self.pacing_gain > 1.0 + && !self.loss_state.has_losses() + && self.prev_in_flight_count < self.get_target_cwnd(self.pacing_gain) + { + should_advance_gain_cycling = false; + } + + // If pacing gain is below 1.0, the connection is trying to drain the extra + // queue which could have been incurred by probing prior to it. If the + // number of bytes in flight falls down to the estimated BDP value earlier, + // conclude that the queue has been successfully drained and exit this cycle + // early. + if self.pacing_gain < 1.0 && in_flight <= self.get_target_cwnd(1.0) { + should_advance_gain_cycling = true; + } + + if should_advance_gain_cycling { + self.current_cycle_offset = (self.current_cycle_offset + 1) % K_PACING_GAIN.len() as u8; + self.last_cycle_start = Some(now); + // Stay in low gain mode until the target BDP is hit. Low gain mode + // will be exited immediately when the target BDP is achieved. + if DRAIN_TO_TARGET + && self.pacing_gain < 1.0 + && (K_PACING_GAIN[self.current_cycle_offset as usize] - 1.0).abs() < f32::EPSILON + && in_flight > self.get_target_cwnd(1.0) + { + return; + } + self.pacing_gain = K_PACING_GAIN[self.current_cycle_offset as usize]; + } + } + + fn maybe_exit_startup_or_drain(&mut self, now: Instant, in_flight: u64) { + if self.mode == Mode::Startup && self.is_at_full_bandwidth { + self.mode = Mode::Drain; + self.pacing_gain = self.drain_gain; + self.cwnd_gain = self.high_cwnd_gain; + } + if self.mode == Mode::Drain && in_flight <= self.get_target_cwnd(1.0) { + self.enter_probe_bandwidth_mode(now); + } + } + + fn is_min_rtt_expired(&self, now: Instant, app_limited: bool) -> bool { + !app_limited + && self + .probe_rtt_last_started_at + .map(|last| now.saturating_duration_since(last) > Duration::from_secs(10)) + .unwrap_or(true) + } + + fn maybe_enter_or_exit_probe_rtt( + &mut self, + now: Instant, + is_round_start: bool, + bytes_in_flight: u64, + app_limited: bool, + ) { + let min_rtt_expired = self.is_min_rtt_expired(now, app_limited); + if min_rtt_expired && !self.exiting_quiescence && self.mode != Mode::ProbeRtt { + self.mode = Mode::ProbeRtt; + self.pacing_gain = 1.0; + // Do not decide on the time to exit ProbeRtt until the + // |bytes_in_flight| is at the target small value. + self.exit_probe_rtt_at = None; + self.probe_rtt_last_started_at = Some(now); + } + + if self.mode == Mode::ProbeRtt { + if self.exit_probe_rtt_at.is_none() { + // If the window has reached the appropriate size, schedule exiting + // ProbeRtt. The CWND during ProbeRtt is + // kMinimumCongestionWindow, but we allow an extra packet since QUIC + // checks CWND before sending a packet. + if bytes_in_flight < self.get_probe_rtt_cwnd() + self.current_mtu { + const K_PROBE_RTT_TIME: Duration = Duration::from_millis(200); + self.exit_probe_rtt_at = Some(now + K_PROBE_RTT_TIME); + } + } else if is_round_start && now >= self.exit_probe_rtt_at.unwrap() { + if !self.is_at_full_bandwidth { + self.enter_startup_mode(); + } else { + self.enter_probe_bandwidth_mode(now); + } + } + } + + self.exiting_quiescence = false; + } + + fn get_target_cwnd(&self, gain: f32) -> u64 { + let bw = self.max_bandwidth.get_estimate(); + let bdp = self.min_rtt.as_micros() as u64 * bw; + let bdpf = bdp as f64; + let cwnd = ((gain as f64 * bdpf) / 1_000_000f64) as u64; + // BDP estimate will be zero if no bandwidth samples are available yet. + if cwnd == 0 { + return self.init_cwnd; + } + cwnd.max(self.min_cwnd) + } + + fn get_probe_rtt_cwnd(&self) -> u64 { + const K_MODERATE_PROBE_RTT_MULTIPLIER: f32 = 0.75; + if PROBE_RTT_BASED_ON_BDP { + return self.get_target_cwnd(K_MODERATE_PROBE_RTT_MULTIPLIER); + } + self.min_cwnd + } + + fn calculate_pacing_rate(&mut self) { + let bw = self.max_bandwidth.get_estimate(); + if bw == 0 { + return; + } + let target_rate = (bw as f64 * self.pacing_gain as f64) as u64; + if self.is_at_full_bandwidth { + self.pacing_rate = target_rate; + return; + } + + // Pace at the rate of initial_window / RTT as soon as RTT measurements are + // available. + if self.pacing_rate == 0 && self.min_rtt.as_nanos() != 0 { + self.pacing_rate = + BandwidthEstimation::bw_from_delta(self.init_cwnd, self.min_rtt).unwrap(); + return; + } + + // Do not decrease the pacing rate during startup. + if self.pacing_rate < target_rate { + self.pacing_rate = target_rate; + } + } + + fn calculate_cwnd(&mut self, bytes_acked: u64, excess_acked: u64) { + if self.mode == Mode::ProbeRtt { + return; + } + let mut target_window = self.get_target_cwnd(self.cwnd_gain); + if self.is_at_full_bandwidth { + // Add the max recently measured ack aggregation to CWND. + target_window += self.ack_aggregation.max_ack_height.get(); + } else { + // Add the most recent excess acked. Because CWND never decreases in + // STARTUP, this will automatically create a very localized max filter. + target_window += excess_acked; + } + // Instead of immediately setting the target CWND as the new one, BBR grows + // the CWND towards |target_window| by only increasing it |bytes_acked| at a + // time. + if self.is_at_full_bandwidth { + self.cwnd = target_window.min(self.cwnd + bytes_acked); + } else if (self.cwnd_gain < target_window as f32) || (self.acked_bytes < self.init_cwnd) { + // If the connection is not yet out of startup phase, do not decrease + // the window. + self.cwnd += bytes_acked; + } + + // Enforce the limits on the congestion window. + if self.cwnd < self.min_cwnd { + self.cwnd = self.min_cwnd; + } + } + + fn calculate_recovery_window(&mut self, bytes_acked: u64, bytes_lost: u64, in_flight: u64) { + if !self.recovery_state.in_recovery() { + return; + } + // Set up the initial recovery window. + if self.recovery_window == 0 { + self.recovery_window = self.min_cwnd.max(in_flight + bytes_acked); + return; + } + + // Remove losses from the recovery window, while accounting for a potential + // integer underflow. + if self.recovery_window >= bytes_lost { + self.recovery_window -= bytes_lost; + } else { + // k_max_segment_size = current_mtu + self.recovery_window = self.current_mtu; + } + // In CONSERVATION mode, just subtracting losses is sufficient. In GROWTH, + // release additional |bytes_acked| to achieve a slow-start-like behavior. + if self.recovery_state == RecoveryState::Growth { + self.recovery_window += bytes_acked; + } + + // Sanity checks. Ensure that we always allow to send at least an MSS or + // |bytes_acked| in response, whichever is larger. + self.recovery_window = self + .recovery_window + .max(in_flight + bytes_acked) + .max(self.min_cwnd); + } + + /// + fn check_if_full_bw_reached(&mut self, app_limited: bool) { + if app_limited { + return; + } + let target = (self.bw_at_last_round as f64 * K_STARTUP_GROWTH_TARGET as f64) as u64; + let bw = self.max_bandwidth.get_estimate(); + if bw >= target { + self.bw_at_last_round = bw; + self.round_wo_bw_gain = 0; + self.ack_aggregation.max_ack_height.reset(); + return; + } + + self.round_wo_bw_gain += 1; + if self.round_wo_bw_gain >= K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP as u64 + || (self.recovery_state.in_recovery()) + { + self.is_at_full_bandwidth = true; + } + } +} + +impl Controller for Bbr { + fn on_sent(&mut self, now: Instant, bytes: u64, last_packet_number: u64) { + self.max_sent_packet_number = last_packet_number; + self.max_bandwidth.on_sent(now, bytes); + } + + fn on_ack( + &mut self, + now: Instant, + sent: Instant, + bytes: u64, + app_limited: bool, + rtt: &RttEstimator, + ) { + self.max_bandwidth + .on_ack(now, sent, bytes, self.round_count, app_limited); + self.acked_bytes += bytes; + if self.is_min_rtt_expired(now, app_limited) || self.min_rtt > rtt.min() { + self.min_rtt = rtt.min(); + } + } + + fn on_end_acks( + &mut self, + now: Instant, + in_flight: u64, + app_limited: bool, + largest_packet_num_acked: Option, + ) { + let bytes_acked = self.max_bandwidth.bytes_acked_this_window(); + let excess_acked = self.ack_aggregation.update_ack_aggregation_bytes( + bytes_acked, + now, + self.round_count, + self.max_bandwidth.get_estimate(), + ); + self.max_bandwidth.end_acks(self.round_count, app_limited); + if let Some(largest_acked_packet) = largest_packet_num_acked { + self.max_acked_packet_number = largest_acked_packet; + } + + let mut is_round_start = false; + if bytes_acked > 0 { + is_round_start = + self.max_acked_packet_number > self.current_round_trip_end_packet_number; + if is_round_start { + self.current_round_trip_end_packet_number = self.max_sent_packet_number; + self.round_count += 1; + } + } + + self.update_recovery_state(is_round_start); + + if self.mode == Mode::ProbeBw { + self.update_gain_cycle_phase(now, in_flight); + } + + if is_round_start && !self.is_at_full_bandwidth { + self.check_if_full_bw_reached(app_limited); + } + + self.maybe_exit_startup_or_drain(now, in_flight); + + self.maybe_enter_or_exit_probe_rtt(now, is_round_start, in_flight, app_limited); + + // After the model is updated, recalculate the pacing rate and congestion window. + self.calculate_pacing_rate(); + self.calculate_cwnd(bytes_acked, excess_acked); + self.calculate_recovery_window(bytes_acked, self.loss_state.lost_bytes, in_flight); + + self.prev_in_flight_count = in_flight; + self.loss_state.reset(); + } + + fn on_congestion_event( + &mut self, + _now: Instant, + _sent: Instant, + _is_persistent_congestion: bool, + lost_bytes: u64, + ) { + self.loss_state.lost_bytes += lost_bytes; + } + + fn on_mtu_update(&mut self, new_mtu: u16) { + self.current_mtu = new_mtu as u64; + self.min_cwnd = calculate_min_window(self.current_mtu); + self.init_cwnd = self.config.initial_window.max(self.min_cwnd); + self.cwnd = self.cwnd.max(self.min_cwnd); + } + + fn window(&self) -> u64 { + if self.mode == Mode::ProbeRtt { + return self.get_probe_rtt_cwnd(); + } else if self.recovery_state.in_recovery() && self.mode != Mode::Startup { + return self.cwnd.min(self.recovery_window); + } + self.cwnd + } + + fn metrics(&self) -> ControllerMetrics { + ControllerMetrics { + congestion_window: self.window(), + ssthresh: None, + pacing_rate: Some(self.pacing_rate * 8), + } + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn initial_window(&self) -> u64 { + self.config.initial_window + } + + fn into_any(self: Box) -> Box { + self + } +} + +/// Configuration for the [`Bbr`] congestion controller +#[derive(Debug, Clone)] +pub struct BbrConfig { + initial_window: u64, +} + +impl BbrConfig { + /// Default limit on the amount of outstanding data in bytes. + /// + /// Recommended value: `min(10 * max_datagram_size, max(2 * max_datagram_size, 14720))` + pub fn initial_window(&mut self, value: u64) -> &mut Self { + self.initial_window = value; + self + } +} + +impl Default for BbrConfig { + fn default() -> Self { + Self { + initial_window: K_MAX_INITIAL_CONGESTION_WINDOW * BASE_DATAGRAM_SIZE, + } + } +} + +impl ControllerFactory for BbrConfig { + fn build(self: Arc, _now: Instant, current_mtu: u16) -> Box { + Box::new(Bbr::new(self, current_mtu)) + } +} + +#[derive(Debug, Default, Copy, Clone)] +struct AckAggregationState { + max_ack_height: MinMax, + aggregation_epoch_start_time: Option, + aggregation_epoch_bytes: u64, +} + +impl AckAggregationState { + fn update_ack_aggregation_bytes( + &mut self, + newly_acked_bytes: u64, + now: Instant, + round: u64, + max_bandwidth: u64, + ) -> u64 { + // Compute how many bytes are expected to be delivered, assuming max + // bandwidth is correct. + let expected_bytes_acked = max_bandwidth + * now + .saturating_duration_since(self.aggregation_epoch_start_time.unwrap_or(now)) + .as_micros() as u64 + / 1_000_000; + + // Reset the current aggregation epoch as soon as the ack arrival rate is + // less than or equal to the max bandwidth. + if self.aggregation_epoch_bytes <= expected_bytes_acked { + // Reset to start measuring a new aggregation epoch. + self.aggregation_epoch_bytes = newly_acked_bytes; + self.aggregation_epoch_start_time = Some(now); + return 0; + } + + // Compute how many extra bytes were delivered vs max bandwidth. + // Include the bytes most recently acknowledged to account for stretch acks. + self.aggregation_epoch_bytes += newly_acked_bytes; + let diff = self.aggregation_epoch_bytes - expected_bytes_acked; + self.max_ack_height.update_max(round, diff); + diff + } +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +enum Mode { + // Startup phase of the connection. + Startup, + // After achieving the highest possible bandwidth during the startup, lower + // the pacing rate in order to drain the queue. + Drain, + // Cruising mode. + ProbeBw, + // Temporarily slow down sending in order to empty the buffer and measure + // the real minimum RTT. + ProbeRtt, +} + +// Indicates how the congestion control limits the amount of bytes in flight. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +enum RecoveryState { + // Do not limit. + NotInRecovery, + // Allow an extra outstanding byte for each byte acknowledged. + Conservation, + // Allow two extra outstanding bytes for each byte acknowledged (slow + // start). + Growth, +} + +impl RecoveryState { + pub(super) fn in_recovery(&self) -> bool { + !matches!(self, Self::NotInRecovery) + } +} + +#[derive(Debug, Clone, Default)] +struct LossState { + lost_bytes: u64, +} + +impl LossState { + pub(super) fn reset(&mut self) { + self.lost_bytes = 0; + } + + pub(super) fn has_losses(&self) -> bool { + self.lost_bytes != 0 + } +} + +fn calculate_min_window(current_mtu: u64) -> u64 { + 4 * current_mtu +} + +// The gain used for the STARTUP, equal to 2/ln(2). +const K_DEFAULT_HIGH_GAIN: f32 = 2.885; +// The newly derived CWND gain for STARTUP, 2. +const K_DERIVED_HIGH_CWNDGAIN: f32 = 2.0; +// The cycle of gains used during the ProbeBw stage. +const K_PACING_GAIN: [f32; 8] = [1.25, 0.75, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]; + +const K_STARTUP_GROWTH_TARGET: f32 = 1.25; +const K_ROUND_TRIPS_WITHOUT_GROWTH_BEFORE_EXITING_STARTUP: u8 = 3; + +// Do not allow initial congestion window to be greater than 200 packets. +const K_MAX_INITIAL_CONGESTION_WINDOW: u64 = 200; + +const PROBE_RTT_BASED_ON_BDP: bool = true; +const DRAIN_TO_TARGET: bool = true; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/cubic.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/cubic.rs new file mode 100644 index 0000000000000000000000000000000000000000..1bd9c69de6fbec643d630378c8509f0cbc49e47e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/cubic.rs @@ -0,0 +1,272 @@ +use std::any::Any; +use std::cmp; +use std::sync::Arc; + +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; +use crate::connection::RttEstimator; +use crate::{Duration, Instant}; + +/// CUBIC Constants. +/// +/// These are recommended value in RFC8312. +const BETA_CUBIC: f64 = 0.7; + +const C: f64 = 0.4; + +/// CUBIC State Variables. +/// +/// We need to keep those variables across the connection. +/// k, w_max are described in the RFC. +#[derive(Debug, Default, Clone)] +pub(super) struct State { + k: f64, + + w_max: f64, + + // Store cwnd increment during congestion avoidance. + cwnd_inc: u64, +} + +/// CUBIC Functions. +/// +/// Note that these calculations are based on a count of cwnd as bytes, +/// not packets. +/// Unit of t (duration) and RTT are based on seconds (f64). +impl State { + // K = cbrt(w_max * (1 - beta_cubic) / C) (Eq. 2) + fn cubic_k(&self, max_datagram_size: u64) -> f64 { + let w_max = self.w_max / max_datagram_size as f64; + (w_max * (1.0 - BETA_CUBIC) / C).cbrt() + } + + // W_cubic(t) = C * (t - K)^3 - w_max (Eq. 1) + fn w_cubic(&self, t: Duration, max_datagram_size: u64) -> f64 { + let w_max = self.w_max / max_datagram_size as f64; + + (C * (t.as_secs_f64() - self.k).powi(3) + w_max) * max_datagram_size as f64 + } + + // W_est(t) = w_max * beta_cubic + 3 * (1 - beta_cubic) / (1 + beta_cubic) * + // (t / RTT) (Eq. 4) + fn w_est(&self, t: Duration, rtt: Duration, max_datagram_size: u64) -> f64 { + let w_max = self.w_max / max_datagram_size as f64; + (w_max * BETA_CUBIC + + 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC) * t.as_secs_f64() / rtt.as_secs_f64()) + * max_datagram_size as f64 + } +} + +/// The RFC8312 congestion controller, as widely used for TCP +#[derive(Debug, Clone)] +pub struct Cubic { + config: Arc, + /// Maximum number of bytes in flight that may be sent. + window: u64, + /// Slow start threshold in bytes. When the congestion window is below ssthresh, the mode is + /// slow start and the window grows by the number of bytes acknowledged. + ssthresh: u64, + /// The time when QUIC first detects a loss, causing it to enter recovery. When a packet sent + /// after this time is acknowledged, QUIC exits recovery. + recovery_start_time: Option, + cubic_state: State, + current_mtu: u64, +} + +impl Cubic { + /// Construct a state using the given `config` and current time `now` + pub fn new(config: Arc, _now: Instant, current_mtu: u16) -> Self { + Self { + window: config.initial_window, + ssthresh: u64::MAX, + recovery_start_time: None, + config, + cubic_state: Default::default(), + current_mtu: current_mtu as u64, + } + } + + fn minimum_window(&self) -> u64 { + 2 * self.current_mtu + } +} + +impl Controller for Cubic { + fn on_ack( + &mut self, + now: Instant, + sent: Instant, + bytes: u64, + app_limited: bool, + rtt: &RttEstimator, + ) { + if app_limited + || self + .recovery_start_time + .map(|recovery_start_time| sent <= recovery_start_time) + .unwrap_or(false) + { + return; + } + + if self.window < self.ssthresh { + // Slow start + self.window += bytes; + } else { + // Congestion avoidance. + let ca_start_time; + + match self.recovery_start_time { + Some(t) => ca_start_time = t, + None => { + // When we come here without congestion_event() triggered, + // initialize congestion_recovery_start_time, w_max and k. + ca_start_time = now; + self.recovery_start_time = Some(now); + + self.cubic_state.w_max = self.window as f64; + self.cubic_state.k = 0.0; + } + } + + let t = now - ca_start_time; + + // w_cubic(t + rtt) + let w_cubic = self.cubic_state.w_cubic(t + rtt.get(), self.current_mtu); + + // w_est(t) + let w_est = self.cubic_state.w_est(t, rtt.get(), self.current_mtu); + + let mut cubic_cwnd = self.window; + + if w_cubic < w_est { + // TCP friendly region. + cubic_cwnd = cmp::max(cubic_cwnd, w_est as u64); + } else if cubic_cwnd < w_cubic as u64 { + // Concave region or convex region use same increment. + let cubic_inc = + (w_cubic - cubic_cwnd as f64) / cubic_cwnd as f64 * self.current_mtu as f64; + + cubic_cwnd += cubic_inc as u64; + } + + // Update the increment and increase cwnd by MSS. + self.cubic_state.cwnd_inc += cubic_cwnd - self.window; + + // cwnd_inc can be more than 1 MSS in the late stage of max probing. + // however RFC9002 §7.3.3 (Congestion Avoidance) limits + // the increase of cwnd to 1 max_datagram_size per cwnd acknowledged. + if self.cubic_state.cwnd_inc >= self.current_mtu { + self.window += self.current_mtu; + self.cubic_state.cwnd_inc = 0; + } + } + } + + fn on_congestion_event( + &mut self, + now: Instant, + sent: Instant, + is_persistent_congestion: bool, + _lost_bytes: u64, + ) { + if self + .recovery_start_time + .map(|recovery_start_time| sent <= recovery_start_time) + .unwrap_or(false) + { + return; + } + + self.recovery_start_time = Some(now); + + // Fast convergence + if (self.window as f64) < self.cubic_state.w_max { + self.cubic_state.w_max = self.window as f64 * (1.0 + BETA_CUBIC) / 2.0; + } else { + self.cubic_state.w_max = self.window as f64; + } + + self.ssthresh = cmp::max( + (self.cubic_state.w_max * BETA_CUBIC) as u64, + self.minimum_window(), + ); + self.window = self.ssthresh; + self.cubic_state.k = self.cubic_state.cubic_k(self.current_mtu); + + self.cubic_state.cwnd_inc = (self.cubic_state.cwnd_inc as f64 * BETA_CUBIC) as u64; + + if is_persistent_congestion { + self.recovery_start_time = None; + self.cubic_state.w_max = self.window as f64; + + // 4.7 Timeout - reduce ssthresh based on BETA_CUBIC + self.ssthresh = cmp::max( + (self.window as f64 * BETA_CUBIC) as u64, + self.minimum_window(), + ); + + self.cubic_state.cwnd_inc = 0; + + self.window = self.minimum_window(); + } + } + + fn on_mtu_update(&mut self, new_mtu: u16) { + self.current_mtu = new_mtu as u64; + self.window = self.window.max(self.minimum_window()); + } + + fn window(&self) -> u64 { + self.window + } + + fn metrics(&self) -> super::ControllerMetrics { + super::ControllerMetrics { + congestion_window: self.window(), + ssthresh: Some(self.ssthresh), + pacing_rate: None, + } + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn initial_window(&self) -> u64 { + self.config.initial_window + } + + fn into_any(self: Box) -> Box { + self + } +} + +/// Configuration for the `Cubic` congestion controller +#[derive(Debug, Clone)] +pub struct CubicConfig { + initial_window: u64, +} + +impl CubicConfig { + /// Default limit on the amount of outstanding data in bytes. + /// + /// Recommended value: `min(10 * max_datagram_size, max(2 * max_datagram_size, 14720))` + pub fn initial_window(&mut self, value: u64) -> &mut Self { + self.initial_window = value; + self + } +} + +impl Default for CubicConfig { + fn default() -> Self { + Self { + initial_window: 14720.clamp(2 * BASE_DATAGRAM_SIZE, 10 * BASE_DATAGRAM_SIZE), + } + } +} + +impl ControllerFactory for CubicConfig { + fn build(self: Arc, now: Instant, current_mtu: u16) -> Box { + Box::new(Cubic::new(self, now, current_mtu)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/new_reno.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/new_reno.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bc61c666a6d6264f5206167f8674db981fa3c23 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/congestion/new_reno.rs @@ -0,0 +1,172 @@ +use std::any::Any; +use std::sync::Arc; + +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; +use crate::Instant; +use crate::connection::RttEstimator; + +/// A simple, standard congestion controller +#[derive(Debug, Clone)] +pub struct NewReno { + config: Arc, + current_mtu: u64, + /// Maximum number of bytes in flight that may be sent. + window: u64, + /// Slow start threshold in bytes. When the congestion window is below ssthresh, the mode is + /// slow start and the window grows by the number of bytes acknowledged. + ssthresh: u64, + /// The time when QUIC first detects a loss, causing it to enter recovery. When a packet sent + /// after this time is acknowledged, QUIC exits recovery. + recovery_start_time: Instant, + /// Bytes which had been acked by the peer since leaving slow start + bytes_acked: u64, +} + +impl NewReno { + /// Construct a state using the given `config` and current time `now` + pub fn new(config: Arc, now: Instant, current_mtu: u16) -> Self { + Self { + window: config.initial_window, + ssthresh: u64::MAX, + recovery_start_time: now, + current_mtu: current_mtu as u64, + config, + bytes_acked: 0, + } + } + + fn minimum_window(&self) -> u64 { + 2 * self.current_mtu + } +} + +impl Controller for NewReno { + fn on_ack( + &mut self, + _now: Instant, + sent: Instant, + bytes: u64, + app_limited: bool, + _rtt: &RttEstimator, + ) { + if app_limited || sent <= self.recovery_start_time { + return; + } + + if self.window < self.ssthresh { + // Slow start + self.window += bytes; + + if self.window >= self.ssthresh { + // Exiting slow start + // Initialize `bytes_acked` for congestion avoidance. The idea + // here is that any bytes over `sshthresh` will already be counted + // towards the congestion avoidance phase - independent of when + // how close to `sshthresh` the `window` was when switching states, + // and independent of datagram sizes. + self.bytes_acked = self.window - self.ssthresh; + } + } else { + // Congestion avoidance + // This implementation uses the method which does not require + // floating point math, which also increases the window by 1 datagram + // for every round trip. + // This mechanism is called Appropriate Byte Counting in + // https://tools.ietf.org/html/rfc3465 + self.bytes_acked += bytes; + + if self.bytes_acked >= self.window { + self.bytes_acked -= self.window; + self.window += self.current_mtu; + } + } + } + + fn on_congestion_event( + &mut self, + now: Instant, + sent: Instant, + is_persistent_congestion: bool, + _lost_bytes: u64, + ) { + if sent <= self.recovery_start_time { + return; + } + + self.recovery_start_time = now; + self.window = (self.window as f32 * self.config.loss_reduction_factor) as u64; + self.window = self.window.max(self.minimum_window()); + self.ssthresh = self.window; + + if is_persistent_congestion { + self.window = self.minimum_window(); + } + } + + fn on_mtu_update(&mut self, new_mtu: u16) { + self.current_mtu = new_mtu as u64; + self.window = self.window.max(self.minimum_window()); + } + + fn window(&self) -> u64 { + self.window + } + + fn metrics(&self) -> super::ControllerMetrics { + super::ControllerMetrics { + congestion_window: self.window(), + ssthresh: Some(self.ssthresh), + pacing_rate: None, + } + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn initial_window(&self) -> u64 { + self.config.initial_window + } + + fn into_any(self: Box) -> Box { + self + } +} + +/// Configuration for the `NewReno` congestion controller +#[derive(Debug, Clone)] +pub struct NewRenoConfig { + initial_window: u64, + loss_reduction_factor: f32, +} + +impl NewRenoConfig { + /// Default limit on the amount of outstanding data in bytes. + /// + /// Recommended value: `min(10 * max_datagram_size, max(2 * max_datagram_size, 14720))` + pub fn initial_window(&mut self, value: u64) -> &mut Self { + self.initial_window = value; + self + } + + /// Reduction in congestion window when a new loss event is detected. + pub fn loss_reduction_factor(&mut self, value: f32) -> &mut Self { + self.loss_reduction_factor = value; + self + } +} + +impl Default for NewRenoConfig { + fn default() -> Self { + Self { + initial_window: 14720.clamp(2 * BASE_DATAGRAM_SIZE, 10 * BASE_DATAGRAM_SIZE), + loss_reduction_factor: 0.5, + } + } +} + +impl ControllerFactory for NewRenoConfig { + fn build(self: Arc, now: Instant, current_mtu: u16) -> Box { + Box::new(NewReno::new(self, now, current_mtu)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/ack_frequency.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/ack_frequency.rs new file mode 100644 index 0000000000000000000000000000000000000000..8de43d7c9da50d68ccc5bb6142e7c049d7d1ed14 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/ack_frequency.rs @@ -0,0 +1,155 @@ +use crate::Duration; +use crate::connection::spaces::PendingAcks; +use crate::frame::AckFrequency; +use crate::transport_parameters::TransportParameters; +use crate::{AckFrequencyConfig, TIMER_GRANULARITY, TransportError, VarInt}; + +/// State associated to ACK frequency +pub(super) struct AckFrequencyState { + // + // Sending ACK_FREQUENCY frames + // + in_flight_ack_frequency_frame: Option<(u64, Duration)>, + next_outgoing_sequence_number: VarInt, + pub(super) peer_max_ack_delay: Duration, + + // + // Receiving ACK_FREQUENCY frames + // + last_ack_frequency_frame: Option, + pub(super) max_ack_delay: Duration, +} + +impl AckFrequencyState { + pub(super) fn new(default_max_ack_delay: Duration) -> Self { + Self { + in_flight_ack_frequency_frame: None, + next_outgoing_sequence_number: VarInt(0), + peer_max_ack_delay: default_max_ack_delay, + + last_ack_frequency_frame: None, + max_ack_delay: default_max_ack_delay, + } + } + + /// Returns the `max_ack_delay` that should be requested of the peer when sending an + /// ACK_FREQUENCY frame + pub(super) fn candidate_max_ack_delay( + &self, + rtt: Duration, + config: &AckFrequencyConfig, + peer_params: &TransportParameters, + ) -> Duration { + // Use the peer's max_ack_delay if no custom max_ack_delay was provided in the config + let min_ack_delay = + Duration::from_micros(peer_params.min_ack_delay.map_or(0, |x| x.into())); + config + .max_ack_delay + .unwrap_or(self.peer_max_ack_delay) + .clamp(min_ack_delay, rtt.max(MIN_AUTOMATIC_ACK_DELAY)) + } + + /// Returns the `max_ack_delay` for the purposes of calculating the PTO + /// + /// This `max_ack_delay` is defined as the maximum of the peer's current `max_ack_delay` and all + /// in-flight `max_ack_delay`s (i.e. proposed values that haven't been acknowledged yet, but + /// might be already in use by the peer). + pub(super) fn max_ack_delay_for_pto(&self) -> Duration { + // Note: we have at most one in-flight ACK_FREQUENCY frame + if let Some((_, max_ack_delay)) = self.in_flight_ack_frequency_frame { + self.peer_max_ack_delay.max(max_ack_delay) + } else { + self.peer_max_ack_delay + } + } + + /// Returns the next sequence number for an ACK_FREQUENCY frame + pub(super) fn next_sequence_number(&mut self) -> VarInt { + assert!(self.next_outgoing_sequence_number <= VarInt::MAX); + + let seq = self.next_outgoing_sequence_number; + self.next_outgoing_sequence_number.0 += 1; + seq + } + + /// Returns true if we should send an ACK_FREQUENCY frame + pub(super) fn should_send_ack_frequency( + &self, + rtt: Duration, + config: &AckFrequencyConfig, + peer_params: &TransportParameters, + ) -> bool { + if self.next_outgoing_sequence_number.0 == 0 { + // Always send at startup + return true; + } + let current = self + .in_flight_ack_frequency_frame + .map_or(self.peer_max_ack_delay, |(_, pending)| pending); + let desired = self.candidate_max_ack_delay(rtt, config, peer_params); + let error = (desired.as_secs_f32() / current.as_secs_f32()) - 1.0; + error.abs() > MAX_RTT_ERROR + } + + /// Notifies the [`AckFrequencyState`] that a packet containing an ACK_FREQUENCY frame was sent + pub(super) fn ack_frequency_sent(&mut self, pn: u64, requested_max_ack_delay: Duration) { + self.in_flight_ack_frequency_frame = Some((pn, requested_max_ack_delay)); + } + + /// Notifies the [`AckFrequencyState`] that a packet has been ACKed + pub(super) fn on_acked(&mut self, pn: u64) { + match self.in_flight_ack_frequency_frame { + Some((number, requested_max_ack_delay)) if number == pn => { + self.in_flight_ack_frequency_frame = None; + self.peer_max_ack_delay = requested_max_ack_delay; + } + _ => {} + } + } + + /// Notifies the [`AckFrequencyState`] that an ACK_FREQUENCY frame was received + /// + /// Updates the endpoint's params according to the payload of the ACK_FREQUENCY frame, or + /// returns an error in case the requested `max_ack_delay` is invalid. + /// + /// Returns `true` if the frame was processed and `false` if it was ignored because of being + /// stale. + pub(super) fn ack_frequency_received( + &mut self, + frame: &AckFrequency, + pending_acks: &mut PendingAcks, + ) -> Result { + if self + .last_ack_frequency_frame + .is_some_and(|highest_sequence_nr| frame.sequence.into_inner() <= highest_sequence_nr) + { + return Ok(false); + } + + self.last_ack_frequency_frame = Some(frame.sequence.into_inner()); + + // Update max_ack_delay + let max_ack_delay = Duration::from_micros(frame.request_max_ack_delay.into_inner()); + if max_ack_delay < TIMER_GRANULARITY { + return Err(TransportError::PROTOCOL_VIOLATION( + "Requested Max Ack Delay in ACK_FREQUENCY frame is less than min_ack_delay", + )); + } + self.max_ack_delay = max_ack_delay; + + // Update the rest of the params + pending_acks.set_ack_frequency_params(frame); + + Ok(true) + } +} + +/// Maximum proportion difference between the most recently requested max ACK delay and the +/// currently desired one before a new request is sent, when the peer supports the ACK frequency +/// extension and an explicit max ACK delay is not configured. +const MAX_RTT_ERROR: f32 = 0.2; + +/// Minimum value to request the peer set max ACK delay to when the peer supports the ACK frequency +/// extension and an explicit max ACK delay is not configured. +// Keep in sync with `AckFrequencyConfig::max_ack_delay` documentation +const MIN_AUTOMATIC_ACK_DELAY: Duration = Duration::from_millis(25); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/assembler.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/assembler.rs new file mode 100644 index 0000000000000000000000000000000000000000..2288f5ecc4874c917c3752464acb3fdba53e1136 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/assembler.rs @@ -0,0 +1,663 @@ +use std::{ + cmp::Ordering, + collections::{BinaryHeap, binary_heap::PeekMut}, + mem, +}; + +use bytes::{Buf, Bytes, BytesMut}; + +use crate::range_set::RangeSet; + +/// Helper to assemble unordered stream frames into an ordered stream +#[derive(Debug, Default)] +pub(super) struct Assembler { + state: State, + data: BinaryHeap, + /// Total number of buffered bytes, including duplicates in ordered mode. + buffered: usize, + /// Estimated number of allocated bytes, will never be less than `buffered`. + allocated: usize, + /// Number of bytes read by the application. When only ordered reads have been used, this is the + /// length of the contiguous prefix of the stream which has been consumed by the application, + /// aka the stream offset. + bytes_read: u64, + end: u64, +} + +impl Assembler { + pub(super) fn new() -> Self { + Self::default() + } + + /// Reset to the initial state + pub(super) fn reinit(&mut self) { + let old_data = mem::take(&mut self.data); + *self = Self::default(); + self.data = old_data; + self.data.clear(); + } + + pub(super) fn ensure_ordering(&mut self, ordered: bool) -> Result<(), IllegalOrderedRead> { + if ordered && !self.state.is_ordered() { + return Err(IllegalOrderedRead); + } else if !ordered && self.state.is_ordered() { + // Enter unordered mode + if !self.data.is_empty() { + // Get rid of possible duplicates + self.defragment(); + } + let mut recvd = RangeSet::new(); + recvd.insert(0..self.bytes_read); + for chunk in &self.data { + recvd.insert(chunk.offset..chunk.offset + chunk.bytes.len() as u64); + } + self.state = State::Unordered { recvd }; + } + Ok(()) + } + + /// Get the the next chunk + pub(super) fn read(&mut self, max_length: usize, ordered: bool) -> Option { + loop { + let mut chunk = self.data.peek_mut()?; + + if ordered { + if chunk.offset > self.bytes_read { + // Next chunk is after current read index + return None; + } else if (chunk.offset + chunk.bytes.len() as u64) <= self.bytes_read { + // Next chunk is useless as the read index is beyond its end + self.buffered -= chunk.bytes.len(); + self.allocated -= chunk.allocation_size; + PeekMut::pop(chunk); + continue; + } + + // Determine `start` and `len` of the slice of useful data in chunk + let start = (self.bytes_read - chunk.offset) as usize; + if start > 0 { + chunk.bytes.advance(start); + chunk.offset += start as u64; + self.buffered -= start; + } + } + + return Some(if max_length < chunk.bytes.len() { + self.bytes_read += max_length as u64; + let offset = chunk.offset; + chunk.offset += max_length as u64; + self.buffered -= max_length; + Chunk::new(offset, chunk.bytes.split_to(max_length)) + } else { + self.bytes_read += chunk.bytes.len() as u64; + self.buffered -= chunk.bytes.len(); + self.allocated -= chunk.allocation_size; + let chunk = PeekMut::pop(chunk); + Chunk::new(chunk.offset, chunk.bytes) + }); + } + } + + /// Copy fragmented chunk data to new chunks backed by a single buffer + /// + /// This makes sure we're not unnecessarily holding on to many larger allocations. + /// We merge contiguous chunks in the process of doing so. + fn defragment(&mut self) { + let new = BinaryHeap::with_capacity(self.data.len()); + let old = mem::replace(&mut self.data, new); + let mut buffers = old.into_sorted_vec(); + self.buffered = 0; + let mut fragmented_buffered = 0; + let mut offset = 0; + for chunk in buffers.iter_mut().rev() { + chunk.try_mark_defragment(offset); + let size = chunk.bytes.len(); + offset = chunk.offset + size as u64; + self.buffered += size; + if !chunk.defragmented { + fragmented_buffered += size; + } + } + self.allocated = self.buffered; + let mut buffer = BytesMut::with_capacity(fragmented_buffered); + let mut offset = 0; + for chunk in buffers.into_iter().rev() { + if chunk.defragmented { + // bytes might be empty after try_mark_defragment + if !chunk.bytes.is_empty() { + self.data.push(chunk); + } + continue; + } + // Overlap is resolved by try_mark_defragment + if chunk.offset != offset + (buffer.len() as u64) { + if !buffer.is_empty() { + self.data + .push(Buffer::new_defragmented(offset, buffer.split().freeze())); + } + offset = chunk.offset; + } + buffer.extend_from_slice(&chunk.bytes); + } + if !buffer.is_empty() { + self.data + .push(Buffer::new_defragmented(offset, buffer.split().freeze())); + } + } + + // Note: If a packet contains many frames from the same stream, the estimated over-allocation + // will be much higher because we are counting the same allocation multiple times. + pub(super) fn insert(&mut self, mut offset: u64, mut bytes: Bytes, allocation_size: usize) { + debug_assert!( + bytes.len() <= allocation_size, + "allocation_size less than bytes.len(): {:?} < {:?}", + allocation_size, + bytes.len() + ); + self.end = self.end.max(offset + bytes.len() as u64); + if let State::Unordered { ref mut recvd } = self.state { + // Discard duplicate data + for duplicate in recvd.replace(offset..offset + bytes.len() as u64) { + if duplicate.start > offset { + let buffer = Buffer::new( + offset, + bytes.split_to((duplicate.start - offset) as usize), + allocation_size, + ); + self.buffered += buffer.bytes.len(); + self.allocated += buffer.allocation_size; + self.data.push(buffer); + offset = duplicate.start; + } + bytes.advance((duplicate.end - offset) as usize); + offset = duplicate.end; + } + } else if offset < self.bytes_read { + if (offset + bytes.len() as u64) <= self.bytes_read { + return; + } else { + let diff = self.bytes_read - offset; + offset += diff; + bytes.advance(diff as usize); + } + } + + if bytes.is_empty() { + return; + } + let buffer = Buffer::new(offset, bytes, allocation_size); + self.buffered += buffer.bytes.len(); + self.allocated += buffer.allocation_size; + self.data.push(buffer); + // `self.buffered` also counts duplicate bytes, therefore we use + // `self.end - self.bytes_read` as an upper bound of buffered unique + // bytes. This will cause a defragmentation if the amount of duplicate + // bytes exceedes a proportion of the receive window size. + let buffered = self.buffered.min((self.end - self.bytes_read) as usize); + let over_allocation = self.allocated - buffered; + // Rationale: on the one hand, we want to defragment rarely, ideally never + // in non-pathological scenarios. However, a pathological or malicious + // peer could send us one-byte frames, and since we use reference-counted + // buffers in order to prevent copying, this could result in keeping a lot + // of memory allocated. This limits over-allocation in proportion to the + // buffered data. The constants are chosen somewhat arbitrarily and try to + // balance between defragmentation overhead and over-allocation. + let threshold = 32768.max(buffered * 3 / 2); + if over_allocation > threshold { + self.defragment() + } + } + + /// Number of bytes consumed by the application + pub(super) fn bytes_read(&self) -> u64 { + self.bytes_read + } + + /// Discard all buffered data + pub(super) fn clear(&mut self) { + self.data.clear(); + self.buffered = 0; + self.allocated = 0; + } +} + +/// A chunk of data from the receive stream +#[derive(Debug, PartialEq, Eq)] +pub struct Chunk { + /// The offset in the stream + pub offset: u64, + /// The contents of the chunk + pub bytes: Bytes, +} + +impl Chunk { + fn new(offset: u64, bytes: Bytes) -> Self { + Self { offset, bytes } + } +} + +#[derive(Debug, Eq)] +struct Buffer { + offset: u64, + bytes: Bytes, + /// Size of the allocation behind `bytes`, if `defragmented == false`. + /// Otherwise this will be set to `bytes.len()` by `try_mark_defragment`. + /// Will never be less than `bytes.len()`. + allocation_size: usize, + defragmented: bool, +} + +impl Buffer { + /// Constructs a new fragmented Buffer + fn new(offset: u64, bytes: Bytes, allocation_size: usize) -> Self { + Self { + offset, + bytes, + allocation_size, + defragmented: false, + } + } + + /// Constructs a new defragmented Buffer + fn new_defragmented(offset: u64, bytes: Bytes) -> Self { + let allocation_size = bytes.len(); + Self { + offset, + bytes, + allocation_size, + defragmented: true, + } + } + + /// Discards data before `offset` and flags `self` as defragmented if it has good utilization + fn try_mark_defragment(&mut self, offset: u64) { + let duplicate = offset.saturating_sub(self.offset) as usize; + self.offset = self.offset.max(offset); + if duplicate >= self.bytes.len() { + // All bytes are duplicate + self.bytes = Bytes::new(); + self.defragmented = true; + self.allocation_size = 0; + return; + } + self.bytes.advance(duplicate); + // Make sure that fragmented buffers with high utilization become defragmented and + // defragmented buffers remain defragmented + self.defragmented = self.defragmented || self.bytes.len() * 6 / 5 >= self.allocation_size; + if self.defragmented { + // Make sure that defragmented buffers do not contribute to over-allocation + self.allocation_size = self.bytes.len(); + } + } +} + +impl Ord for Buffer { + // Invert ordering based on offset (max-heap, min offset first), + // prioritize longer chunks at the same offset. + fn cmp(&self, other: &Self) -> Ordering { + self.offset + .cmp(&other.offset) + .reverse() + .then(self.bytes.len().cmp(&other.bytes.len())) + } +} + +impl PartialOrd for Buffer { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Buffer { + fn eq(&self, other: &Self) -> bool { + (self.offset, self.bytes.len()) == (other.offset, other.bytes.len()) + } +} + +#[derive(Debug)] +enum State { + Ordered, + Unordered { + /// The set of offsets that have been received from the peer, including portions not yet + /// read by the application. + recvd: RangeSet, + }, +} + +impl State { + fn is_ordered(&self) -> bool { + matches!(self, Self::Ordered) + } +} + +impl Default for State { + fn default() -> Self { + Self::Ordered + } +} + +/// Error indicating that an ordered read was performed on a stream after an unordered read +#[derive(Debug)] +pub struct IllegalOrderedRead; + +#[cfg(test)] +mod test { + use super::*; + use assert_matches::assert_matches; + + #[test] + fn assemble_ordered() { + let mut x = Assembler::new(); + assert_matches!(next(&mut x, 32), None); + x.insert(0, Bytes::from_static(b"123"), 3); + assert_matches!(next(&mut x, 1), Some(ref y) if &y[..] == b"1"); + assert_matches!(next(&mut x, 3), Some(ref y) if &y[..] == b"23"); + x.insert(3, Bytes::from_static(b"456"), 3); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"456"); + x.insert(6, Bytes::from_static(b"789"), 3); + x.insert(9, Bytes::from_static(b"10"), 2); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"789"); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"10"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_unordered() { + let mut x = Assembler::new(); + x.ensure_ordering(false).unwrap(); + x.insert(3, Bytes::from_static(b"456"), 3); + assert_matches!(next(&mut x, 32), None); + x.insert(0, Bytes::from_static(b"123"), 3); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123"); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"456"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_duplicate() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"123"), 3); + x.insert(0, Bytes::from_static(b"123"), 3); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_duplicate_compact() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"123"), 3); + x.insert(0, Bytes::from_static(b"123"), 3); + x.defragment(); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_contained() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"12345"), 5); + x.insert(1, Bytes::from_static(b"234"), 3); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"12345"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_contained_compact() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"12345"), 5); + x.insert(1, Bytes::from_static(b"234"), 3); + x.defragment(); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"12345"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_contains() { + let mut x = Assembler::new(); + x.insert(1, Bytes::from_static(b"234"), 3); + x.insert(0, Bytes::from_static(b"12345"), 5); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"12345"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_contains_compact() { + let mut x = Assembler::new(); + x.insert(1, Bytes::from_static(b"234"), 3); + x.insert(0, Bytes::from_static(b"12345"), 5); + x.defragment(); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"12345"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_overlapping() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"123"), 3); + x.insert(1, Bytes::from_static(b"234"), 3); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123"); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"4"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_overlapping_compact() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"123"), 4); + x.insert(1, Bytes::from_static(b"234"), 4); + x.defragment(); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"1234"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_complex() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"1"), 1); + x.insert(2, Bytes::from_static(b"3"), 1); + x.insert(4, Bytes::from_static(b"5"), 1); + x.insert(0, Bytes::from_static(b"123456"), 6); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123456"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_complex_compact() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"1"), 1); + x.insert(2, Bytes::from_static(b"3"), 1); + x.insert(4, Bytes::from_static(b"5"), 1); + x.insert(0, Bytes::from_static(b"123456"), 6); + x.defragment(); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"123456"); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn assemble_old() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"1234"), 4); + assert_matches!(next(&mut x, 32), Some(ref y) if &y[..] == b"1234"); + x.insert(0, Bytes::from_static(b"1234"), 4); + assert_matches!(next(&mut x, 32), None); + } + + #[test] + fn compact() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"abc"), 4); + x.insert(3, Bytes::from_static(b"def"), 4); + x.insert(9, Bytes::from_static(b"jkl"), 4); + x.insert(12, Bytes::from_static(b"mno"), 4); + x.defragment(); + assert_eq!( + next_unordered(&mut x), + Chunk::new(0, Bytes::from_static(b"abcdef")) + ); + assert_eq!( + next_unordered(&mut x), + Chunk::new(9, Bytes::from_static(b"jklmno")) + ); + } + + #[test] + fn defrag_with_missing_prefix() { + let mut x = Assembler::new(); + x.insert(3, Bytes::from_static(b"def"), 3); + x.defragment(); + assert_eq!( + next_unordered(&mut x), + Chunk::new(3, Bytes::from_static(b"def")) + ); + } + + #[test] + fn defrag_read_chunk() { + let mut x = Assembler::new(); + x.insert(3, Bytes::from_static(b"def"), 4); + x.insert(0, Bytes::from_static(b"abc"), 4); + x.insert(7, Bytes::from_static(b"hij"), 4); + x.insert(11, Bytes::from_static(b"lmn"), 4); + x.defragment(); + assert_matches!(x.read(usize::MAX, true), Some(ref y) if &y.bytes[..] == b"abcdef"); + x.insert(5, Bytes::from_static(b"fghijklmn"), 9); + assert_matches!(x.read(usize::MAX, true), Some(ref y) if &y.bytes[..] == b"ghijklmn"); + x.insert(13, Bytes::from_static(b"nopq"), 4); + assert_matches!(x.read(usize::MAX, true), Some(ref y) if &y.bytes[..] == b"opq"); + x.insert(15, Bytes::from_static(b"pqrs"), 4); + assert_matches!(x.read(usize::MAX, true), Some(ref y) if &y.bytes[..] == b"rs"); + assert_matches!(x.read(usize::MAX, true), None); + } + + #[test] + fn unordered_happy_path() { + let mut x = Assembler::new(); + x.ensure_ordering(false).unwrap(); + x.insert(0, Bytes::from_static(b"abc"), 3); + assert_eq!( + next_unordered(&mut x), + Chunk::new(0, Bytes::from_static(b"abc")) + ); + assert_eq!(x.read(usize::MAX, false), None); + x.insert(3, Bytes::from_static(b"def"), 3); + assert_eq!( + next_unordered(&mut x), + Chunk::new(3, Bytes::from_static(b"def")) + ); + assert_eq!(x.read(usize::MAX, false), None); + } + + #[test] + fn unordered_dedup() { + let mut x = Assembler::new(); + x.ensure_ordering(false).unwrap(); + x.insert(3, Bytes::from_static(b"def"), 3); + assert_eq!( + next_unordered(&mut x), + Chunk::new(3, Bytes::from_static(b"def")) + ); + assert_eq!(x.read(usize::MAX, false), None); + x.insert(0, Bytes::from_static(b"a"), 1); + x.insert(0, Bytes::from_static(b"abcdefghi"), 9); + x.insert(0, Bytes::from_static(b"abcd"), 4); + assert_eq!( + next_unordered(&mut x), + Chunk::new(0, Bytes::from_static(b"a")) + ); + assert_eq!( + next_unordered(&mut x), + Chunk::new(1, Bytes::from_static(b"bc")) + ); + assert_eq!( + next_unordered(&mut x), + Chunk::new(6, Bytes::from_static(b"ghi")) + ); + assert_eq!(x.read(usize::MAX, false), None); + x.insert(8, Bytes::from_static(b"ijkl"), 4); + assert_eq!( + next_unordered(&mut x), + Chunk::new(9, Bytes::from_static(b"jkl")) + ); + assert_eq!(x.read(usize::MAX, false), None); + x.insert(12, Bytes::from_static(b"mno"), 3); + assert_eq!( + next_unordered(&mut x), + Chunk::new(12, Bytes::from_static(b"mno")) + ); + assert_eq!(x.read(usize::MAX, false), None); + x.insert(2, Bytes::from_static(b"cde"), 3); + assert_eq!(x.read(usize::MAX, false), None); + } + + #[test] + fn chunks_dedup() { + let mut x = Assembler::new(); + x.insert(3, Bytes::from_static(b"def"), 3); + assert_eq!(x.read(usize::MAX, true), None); + x.insert(0, Bytes::from_static(b"a"), 1); + x.insert(1, Bytes::from_static(b"bcdefghi"), 9); + x.insert(0, Bytes::from_static(b"abcd"), 4); + assert_eq!( + x.read(usize::MAX, true), + Some(Chunk::new(0, Bytes::from_static(b"abcd"))) + ); + assert_eq!( + x.read(usize::MAX, true), + Some(Chunk::new(4, Bytes::from_static(b"efghi"))) + ); + assert_eq!(x.read(usize::MAX, true), None); + x.insert(8, Bytes::from_static(b"ijkl"), 4); + assert_eq!( + x.read(usize::MAX, true), + Some(Chunk::new(9, Bytes::from_static(b"jkl"))) + ); + assert_eq!(x.read(usize::MAX, true), None); + x.insert(12, Bytes::from_static(b"mno"), 3); + assert_eq!( + x.read(usize::MAX, true), + Some(Chunk::new(12, Bytes::from_static(b"mno"))) + ); + assert_eq!(x.read(usize::MAX, true), None); + x.insert(2, Bytes::from_static(b"cde"), 3); + assert_eq!(x.read(usize::MAX, true), None); + } + + #[test] + fn ordered_eager_discard() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"abc"), 3); + assert_eq!(x.data.len(), 1); + assert_eq!( + x.read(usize::MAX, true), + Some(Chunk::new(0, Bytes::from_static(b"abc"))) + ); + x.insert(0, Bytes::from_static(b"ab"), 2); + assert_eq!(x.data.len(), 0); + x.insert(2, Bytes::from_static(b"cd"), 2); + assert_eq!( + x.data.peek(), + Some(&Buffer::new(3, Bytes::from_static(b"d"), 2)) + ); + } + + #[test] + fn ordered_insert_unordered_read() { + let mut x = Assembler::new(); + x.insert(0, Bytes::from_static(b"abc"), 3); + x.insert(0, Bytes::from_static(b"abc"), 3); + x.ensure_ordering(false).unwrap(); + assert_eq!( + x.read(3, false), + Some(Chunk::new(0, Bytes::from_static(b"abc"))) + ); + assert_eq!(x.read(3, false), None); + } + + fn next_unordered(x: &mut Assembler) -> Chunk { + x.read(usize::MAX, false).unwrap() + } + + fn next(x: &mut Assembler, size: usize) -> Option { + x.read(size, true).map(|chunk| chunk.bytes) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/cid_state.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/cid_state.rs new file mode 100644 index 0000000000000000000000000000000000000000..08ad20cb4262ae92f7b5f69dd865c6364019bb6f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/cid_state.rs @@ -0,0 +1,223 @@ +//! Maintain the state of local connection IDs +use std::collections::VecDeque; + +use rustc_hash::FxHashSet; +use tracing::{debug, trace}; + +use crate::{Duration, Instant, TransportError, shared::IssuedCid}; + +/// Local connection ID management +pub(super) struct CidState { + /// Timestamp when issued cids should be retired + retire_timestamp: VecDeque, + /// Number of local connection IDs that have been issued in NEW_CONNECTION_ID frames. + issued: u64, + /// Sequence numbers of local connection IDs not yet retired by the peer + active_seq: FxHashSet, + /// Sequence number the peer has already retired all CIDs below at our request via `retire_prior_to` + prev_retire_seq: u64, + /// Sequence number to set in retire_prior_to field in NEW_CONNECTION_ID frame + retire_seq: u64, + /// cid length used to decode short packet + cid_len: usize, + //// cid lifetime + cid_lifetime: Option, +} + +impl CidState { + pub(crate) fn new( + cid_len: usize, + cid_lifetime: Option, + now: Instant, + issued: u64, + ) -> Self { + let mut active_seq = FxHashSet::default(); + // Add sequence number of CIDs used in handshaking into tracking set + for seq in 0..issued { + active_seq.insert(seq); + } + let mut this = Self { + retire_timestamp: VecDeque::new(), + issued, + active_seq, + prev_retire_seq: 0, + retire_seq: 0, + cid_len, + cid_lifetime, + }; + // Track lifetime of CIDs used in handshaking + for seq in 0..issued { + this.track_lifetime(seq, now); + } + this + } + + /// Find the next timestamp when previously issued CID should be retired + pub(crate) fn next_timeout(&mut self) -> Option { + self.retire_timestamp.front().map(|nc| { + trace!("CID {} will expire at {:?}", nc.sequence, nc.timestamp); + nc.timestamp + }) + } + + /// Track the lifetime of issued cids in `retire_timestamp` + fn track_lifetime(&mut self, new_cid_seq: u64, now: Instant) { + let lifetime = match self.cid_lifetime { + Some(lifetime) => lifetime, + None => return, + }; + + let expire_timestamp = now.checked_add(lifetime); + let expire_at = match expire_timestamp { + Some(expire_at) => expire_at, + None => return, + }; + + let last_record = self.retire_timestamp.back_mut(); + if let Some(last) = last_record { + // Compare the timestamp with the last inserted record + // Combine into a single batch if timestamp of current cid is same as the last record + if expire_at == last.timestamp { + debug_assert!(new_cid_seq > last.sequence); + last.sequence = new_cid_seq; + return; + } + } + + self.retire_timestamp.push_back(CidTimestamp { + sequence: new_cid_seq, + timestamp: expire_at, + }); + } + + /// Update local CID state when previously issued CID is retired + /// + /// Return whether a new CID needs to be pushed that notifies remote peer to respond `RETIRE_CONNECTION_ID` + pub(crate) fn on_cid_timeout(&mut self) -> bool { + // Whether the peer hasn't retired all the CIDs we asked it to yet + let unretired_ids_found = + (self.prev_retire_seq..self.retire_seq).any(|seq| self.active_seq.contains(&seq)); + + let current_retire_prior_to = self.retire_seq; + let next_retire_sequence = self + .retire_timestamp + .pop_front() + .map(|seq| seq.sequence + 1); + + // According to RFC: + // Endpoints SHOULD NOT issue updates of the Retire Prior To field + // before receiving RETIRE_CONNECTION_ID frames that retire all + // connection IDs indicated by the previous Retire Prior To value. + // https://tools.ietf.org/html/draft-ietf-quic-transport-29#section-5.1.2 + if !unretired_ids_found { + // All Cids are retired, `prev_retire_cid_seq` can be assigned to `retire_cid_seq` + self.prev_retire_seq = self.retire_seq; + // Advance `retire_seq` if next cid that needs to be retired exists + if let Some(next_retire_prior_to) = next_retire_sequence { + self.retire_seq = next_retire_prior_to; + } + } + + // Check if retirement of all CIDs that reach their lifetime is still needed + // According to RFC: + // An endpoint MUST NOT + // provide more connection IDs than the peer's limit. An endpoint MAY + // send connection IDs that temporarily exceed a peer's limit if the + // NEW_CONNECTION_ID frame also requires the retirement of any excess, + // by including a sufficiently large value in the Retire Prior To field. + // + // If yes (return true), a new CID must be pushed with updated `retire_prior_to` field to remote peer. + // If no (return false), it means CIDs that reach the end of lifetime have been retired already. Do not push a new CID in order to avoid violating above RFC. + (current_retire_prior_to..self.retire_seq).any(|seq| self.active_seq.contains(&seq)) + } + + /// Update cid state when `NewIdentifiers` event is received + pub(crate) fn new_cids(&mut self, ids: &[IssuedCid], now: Instant) { + // `ids` could be `None` once active_connection_id_limit is set to 1 by peer + let last_cid = match ids.last() { + Some(cid) => cid, + None => return, + }; + self.issued += ids.len() as u64; + // Record the timestamp of CID with the largest seq number + let sequence = last_cid.sequence; + ids.iter().for_each(|frame| { + self.active_seq.insert(frame.sequence); + }); + self.track_lifetime(sequence, now); + } + + /// Update CidState for receipt of a `RETIRE_CONNECTION_ID` frame + /// + /// Returns whether a new CID can be issued, or an error if the frame was illegal. + pub(crate) fn on_cid_retirement( + &mut self, + sequence: u64, + limit: u64, + ) -> Result { + if self.cid_len == 0 { + return Err(TransportError::PROTOCOL_VIOLATION( + "RETIRE_CONNECTION_ID when CIDs aren't in use", + )); + } + if sequence > self.issued { + debug!( + sequence, + "got RETIRE_CONNECTION_ID for unissued sequence number" + ); + return Err(TransportError::PROTOCOL_VIOLATION( + "RETIRE_CONNECTION_ID for unissued sequence number", + )); + } + self.active_seq.remove(&sequence); + // Consider a scenario where peer A has active remote cid 0,1,2. + // Peer B first send a NEW_CONNECTION_ID with cid 3 and retire_prior_to set to 1. + // Peer A processes this NEW_CONNECTION_ID frame; update remote cid to 1,2,3 + // and meanwhile send a RETIRE_CONNECTION_ID to retire cid 0 to peer B. + // If peer B doesn't check the cid limit here and send a new cid again, peer A will then face CONNECTION_ID_LIMIT_ERROR + Ok(limit > self.active_seq.len() as u64) + } + + /// Length of local Connection IDs + pub(crate) fn cid_len(&self) -> usize { + self.cid_len + } + + /// The value for `retire_prior_to` field in `NEW_CONNECTION_ID` frame + pub(crate) fn retire_prior_to(&self) -> u64 { + self.retire_seq + } + + #[cfg(test)] + pub(crate) fn active_seq(&self) -> (u64, u64) { + let mut min = u64::MAX; + let mut max = u64::MIN; + for n in self.active_seq.iter() { + if n < &min { + min = *n; + } + if n > &max { + max = *n; + } + } + (min, max) + } + + #[cfg(test)] + pub(crate) fn assign_retire_seq(&mut self, v: u64) -> u64 { + // Cannot retire more CIDs than what have been issued + debug_assert!(v <= *self.active_seq.iter().max().unwrap() + 1); + let n = v.checked_sub(self.retire_seq).unwrap(); + self.retire_seq = v; + n + } +} + +/// Data structure that records when issued cids should be retired +#[derive(Copy, Clone, Eq, PartialEq)] +struct CidTimestamp { + /// Highest cid sequence number created in a batch + sequence: u64, + /// Timestamp when cid needs to be retired + timestamp: Instant, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/datagrams.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/datagrams.rs new file mode 100644 index 0000000000000000000000000000000000000000..c22e8d71552653224f3cc9b8d58adad25e58665c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/datagrams.rs @@ -0,0 +1,211 @@ +use std::collections::VecDeque; + +use bytes::Bytes; +use thiserror::Error; +use tracing::{debug, trace}; + +use super::Connection; +use crate::{ + TransportError, + frame::{Datagram, FrameStruct}, +}; + +/// API to control datagram traffic +pub struct Datagrams<'a> { + pub(super) conn: &'a mut Connection, +} + +impl Datagrams<'_> { + /// Queue an unreliable, unordered datagram for immediate transmission + /// + /// If `drop` is true, previously queued datagrams which are still unsent may be discarded to + /// make space for this datagram, in order of oldest to newest. If `drop` is false, and there + /// isn't enough space due to previously queued datagrams, this function will return + /// `SendDatagramError::Blocked`. `Event::DatagramsUnblocked` will be emitted once datagrams + /// have been sent. + /// + /// Returns `Err` iff a `len`-byte datagram cannot currently be sent. + pub fn send(&mut self, data: Bytes, drop: bool) -> Result<(), SendDatagramError> { + if self.conn.config.datagram_receive_buffer_size.is_none() { + return Err(SendDatagramError::Disabled); + } + let max = self + .max_size() + .ok_or(SendDatagramError::UnsupportedByPeer)?; + if data.len() > max { + return Err(SendDatagramError::TooLarge); + } + if drop { + while self.conn.datagrams.outgoing_total > self.conn.config.datagram_send_buffer_size { + let prev = self + .conn + .datagrams + .outgoing + .pop_front() + .expect("datagrams.outgoing_total desynchronized"); + trace!(len = prev.data.len(), "dropping outgoing datagram"); + self.conn.datagrams.outgoing_total -= prev.data.len(); + } + } else if self.conn.datagrams.outgoing_total + data.len() + > self.conn.config.datagram_send_buffer_size + { + self.conn.datagrams.send_blocked = true; + return Err(SendDatagramError::Blocked(data)); + } + self.conn.datagrams.outgoing_total += data.len(); + self.conn.datagrams.outgoing.push_back(Datagram { data }); + Ok(()) + } + + /// Compute the maximum size of datagrams that may passed to `send_datagram` + /// + /// Returns `None` if datagrams are unsupported by the peer or disabled locally. + /// + /// This may change over the lifetime of a connection according to variation in the path MTU + /// estimate. The peer can also enforce an arbitrarily small fixed limit, but if the peer's + /// limit is large this is guaranteed to be a little over a kilobyte at minimum. + /// + /// Not necessarily the maximum size of received datagrams. + pub fn max_size(&self) -> Option { + // We use the conservative overhead bound for any packet number, reducing the budget by at + // most 3 bytes, so that PN size fluctuations don't cause users sending maximum-size + // datagrams to suffer avoidable packet loss. + let max_size = self.conn.path.current_mtu() as usize + - self.conn.predict_1rtt_overhead(None) + - Datagram::SIZE_BOUND; + let limit = self + .conn + .peer_params + .max_datagram_frame_size? + .into_inner() + .saturating_sub(Datagram::SIZE_BOUND as u64); + Some(limit.min(max_size as u64) as usize) + } + + /// Receive an unreliable, unordered datagram + pub fn recv(&mut self) -> Option { + self.conn.datagrams.recv() + } + + /// Bytes available in the outgoing datagram buffer + /// + /// When greater than zero, [`send`](Self::send)ing a datagram of at most this size is + /// guaranteed not to cause older datagrams to be dropped. + pub fn send_buffer_space(&self) -> usize { + self.conn + .config + .datagram_send_buffer_size + .saturating_sub(self.conn.datagrams.outgoing_total) + } +} + +#[derive(Default)] +pub(super) struct DatagramState { + /// Number of bytes of datagrams that have been received by the local transport but not + /// delivered to the application + pub(super) recv_buffered: usize, + pub(super) incoming: VecDeque, + pub(super) outgoing: VecDeque, + pub(super) outgoing_total: usize, + pub(super) send_blocked: bool, +} + +impl DatagramState { + pub(super) fn received( + &mut self, + datagram: Datagram, + window: &Option, + ) -> Result { + let window = match window { + None => { + return Err(TransportError::PROTOCOL_VIOLATION( + "unexpected DATAGRAM frame", + )); + } + Some(x) => *x, + }; + + if datagram.data.len() > window { + return Err(TransportError::PROTOCOL_VIOLATION("oversized datagram")); + } + + let was_empty = self.recv_buffered == 0; + while datagram.data.len() + self.recv_buffered > window { + debug!("dropping stale datagram"); + self.recv(); + } + + self.recv_buffered += datagram.data.len(); + self.incoming.push_back(datagram); + Ok(was_empty) + } + + /// Discard outgoing datagrams with a payload larger than `max_payload` bytes + /// + /// Used to ensure that reductions in MTU don't get us stuck in a state where we have a datagram + /// queued but can't send it. + pub(super) fn drop_oversized(&mut self, max_payload: usize) { + self.outgoing.retain(|datagram| { + let result = datagram.data.len() < max_payload; + if !result { + trace!( + "dropping {} byte datagram violating {} byte limit", + datagram.data.len(), + max_payload + ); + self.outgoing_total -= datagram.data.len(); + } + result + }); + } + + /// Attempt to write a datagram frame into `buf`, consuming it from `self.outgoing` + /// + /// Returns whether a frame was written. At most `max_size` bytes will be written, including + /// framing. + pub(super) fn write(&mut self, buf: &mut Vec, max_size: usize) -> bool { + let datagram = match self.outgoing.pop_front() { + Some(x) => x, + None => return false, + }; + + if buf.len() + datagram.size(true) > max_size { + // Future work: we could be more clever about cramming small datagrams into + // mostly-full packets when a larger one is queued first + self.outgoing.push_front(datagram); + return false; + } + + trace!(len = datagram.data.len(), "DATAGRAM"); + + self.outgoing_total -= datagram.data.len(); + datagram.encode(true, buf); + true + } + + pub(super) fn recv(&mut self) -> Option { + let x = self.incoming.pop_front()?.data; + self.recv_buffered -= x.len(); + Some(x) + } +} + +/// Errors that can arise when sending a datagram +#[derive(Debug, Error, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum SendDatagramError { + /// The peer does not support receiving datagram frames + #[error("datagrams not supported by peer")] + UnsupportedByPeer, + /// Datagram support is disabled locally + #[error("datagram support disabled")] + Disabled, + /// The datagram is larger than the connection can currently accommodate + /// + /// Indicates that the path MTU minus overhead or the limit advertised by the peer has been + /// exceeded. + #[error("datagram too large")] + TooLarge, + /// Send would block + #[error("datagram send blocked")] + Blocked(Bytes), +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..38ea8fbb43c450fccbfd3e924a76f8d19534bd18 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mod.rs @@ -0,0 +1,4102 @@ +use std::{ + cmp, + collections::VecDeque, + convert::TryFrom, + fmt, io, mem, + net::{IpAddr, SocketAddr}, + sync::Arc, +}; + +use bytes::{Bytes, BytesMut}; +use frame::StreamMetaVec; + +use rand::{Rng, SeedableRng, rngs::StdRng}; +use thiserror::Error; +use tracing::{debug, error, trace, trace_span, warn}; + +use crate::{ + Dir, Duration, EndpointConfig, Frame, INITIAL_MTU, Instant, MAX_CID_SIZE, MAX_STREAM_COUNT, + MIN_INITIAL_SIZE, Side, StreamId, TIMER_GRANULARITY, TokenStore, Transmit, TransportError, + TransportErrorCode, VarInt, + cid_generator::ConnectionIdGenerator, + cid_queue::CidQueue, + coding::BufMutExt, + config::{ServerConfig, TransportConfig}, + crypto::{self, KeyPair, Keys, PacketKey}, + frame::{self, Close, Datagram, FrameStruct, NewConnectionId, NewToken}, + packet::{ + FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, LongType, Packet, + PacketNumber, PartialDecode, SpaceId, + }, + range_set::ArrayRangeSet, + shared::{ + ConnectionEvent, ConnectionEventInner, ConnectionId, DatagramConnectionEvent, EcnCodepoint, + EndpointEvent, EndpointEventInner, + }, + token::{ResetToken, Token, TokenPayload}, + transport_parameters::TransportParameters, +}; + +mod ack_frequency; +use ack_frequency::AckFrequencyState; + +mod assembler; +pub use assembler::Chunk; + +mod cid_state; +use cid_state::CidState; + +mod datagrams; +use datagrams::DatagramState; +pub use datagrams::{Datagrams, SendDatagramError}; + +mod mtud; +mod pacing; + +mod packet_builder; +use packet_builder::PacketBuilder; + +mod packet_crypto; +use packet_crypto::{PrevCrypto, ZeroRttCrypto}; + +mod paths; +pub use paths::RttEstimator; +use paths::{PathData, PathResponses}; + +pub(crate) mod qlog; + +mod send_buffer; + +mod spaces; +#[cfg(fuzzing)] +pub use spaces::Retransmits; +#[cfg(not(fuzzing))] +use spaces::Retransmits; +use spaces::{PacketNumberFilter, PacketSpace, SendableFrames, SentPacket, ThinRetransmits}; + +mod stats; +pub use stats::{ConnectionStats, FrameStats, PathStats, UdpStats}; + +mod streams; +#[cfg(fuzzing)] +pub use streams::StreamsState; +#[cfg(not(fuzzing))] +use streams::StreamsState; +pub use streams::{ + Chunks, ClosedStream, FinishError, ReadError, ReadableError, RecvStream, SendStream, + ShouldTransmit, StreamEvent, Streams, WriteError, Written, +}; + +mod timer; +use crate::congestion::Controller; +use timer::{Timer, TimerTable}; + +/// Protocol state and logic for a single QUIC connection +/// +/// Objects of this type receive [`ConnectionEvent`]s and emit [`EndpointEvent`]s and application +/// [`Event`]s to make progress. To handle timeouts, a `Connection` returns timer updates and +/// expects timeouts through various methods. A number of simple getter methods are exposed +/// to allow callers to inspect some of the connection state. +/// +/// `Connection` has roughly 4 types of methods: +/// +/// - A. Simple getters, taking `&self` +/// - B. Handlers for incoming events from the network or system, named `handle_*`. +/// - C. State machine mutators, for incoming commands from the application. For convenience we +/// refer to this as "performing I/O" below, however as per the design of this library none of the +/// functions actually perform system-level I/O. For example, [`read`](RecvStream::read) and +/// [`write`](SendStream::write), but also things like [`reset`](SendStream::reset). +/// - D. Polling functions for outgoing events or actions for the caller to +/// take, named `poll_*`. +/// +/// The simplest way to use this API correctly is to call (B) and (C) whenever +/// appropriate, then after each of those calls, as soon as feasible call all +/// polling methods (D) and deal with their outputs appropriately, e.g. by +/// passing it to the application or by making a system-level I/O call. You +/// should call the polling functions in this order: +/// +/// 1. [`poll_transmit`](Self::poll_transmit) +/// 2. [`poll_timeout`](Self::poll_timeout) +/// 3. [`poll_endpoint_events`](Self::poll_endpoint_events) +/// 4. [`poll`](Self::poll) +/// +/// Currently the only actual dependency is from (2) to (1), however additional +/// dependencies may be added in future, so the above order is recommended. +/// +/// (A) may be called whenever desired. +/// +/// Care should be made to ensure that the input events represent monotonically +/// increasing time. Specifically, calling [`handle_timeout`](Self::handle_timeout) +/// with events of the same [`Instant`] may be interleaved in any order with a +/// call to [`handle_event`](Self::handle_event) at that same instant; however +/// events or timeouts with different instants must not be interleaved. +pub struct Connection { + endpoint_config: Arc, + config: Arc, + rng: StdRng, + crypto: Box, + /// The CID we initially chose, for use during the handshake + handshake_cid: ConnectionId, + /// The CID the peer initially chose, for use during the handshake + rem_handshake_cid: ConnectionId, + /// The "real" local IP address which was was used to receive the initial packet. + /// This is only populated for the server case, and if known + local_ip: Option, + path: PathData, + /// Incremented every time we see a new path + /// + /// Stored separately from `path.generation` to account for aborted migrations + path_counter: u64, + /// Whether MTU detection is supported in this environment + allow_mtud: bool, + prev_path: Option<(ConnectionId, PathData)>, + state: State, + side: ConnectionSide, + /// Whether or not 0-RTT was enabled during the handshake. Does not imply acceptance. + zero_rtt_enabled: bool, + /// Set if 0-RTT is supported, then cleared when no longer needed. + zero_rtt_crypto: Option, + key_phase: bool, + /// How many packets are in the current key phase. Used only for `Data` space. + key_phase_size: u64, + /// Transport parameters set by the peer + peer_params: TransportParameters, + /// Source ConnectionId of the first packet received from the peer + orig_rem_cid: ConnectionId, + /// Destination ConnectionId sent by the client on the first Initial + initial_dst_cid: ConnectionId, + /// The value that the server included in the Source Connection ID field of a Retry packet, if + /// one was received + retry_src_cid: Option, + events: VecDeque, + endpoint_events: VecDeque, + /// Whether the spin bit is in use for this connection + spin_enabled: bool, + /// Outgoing spin bit state + spin: bool, + /// Packet number spaces: initial, handshake, 1-RTT + spaces: [PacketSpace; 3], + /// Highest usable packet number space + highest_space: SpaceId, + /// 1-RTT keys used prior to a key update + prev_crypto: Option, + /// 1-RTT keys to be used for the next key update + /// + /// These are generated in advance to prevent timing attacks and/or DoS by third-party attackers + /// spoofing key updates. + next_crypto: Option>>, + accepted_0rtt: bool, + /// Whether the idle timer should be reset the next time an ack-eliciting packet is transmitted. + permit_idle_reset: bool, + /// Negotiated idle timeout + idle_timeout: Option, + timers: TimerTable, + /// Number of packets received which could not be authenticated + authentication_failures: u64, + /// Why the connection was lost, if it has been + error: Option, + /// Identifies Data-space packet numbers to skip. Not used in earlier spaces. + packet_number_filter: PacketNumberFilter, + + // + // Queued non-retransmittable 1-RTT data + // + /// Responses to PATH_CHALLENGE frames + path_responses: PathResponses, + close: bool, + + // + // ACK frequency + // + ack_frequency: AckFrequencyState, + + // + // Loss Detection + // + /// The number of times a PTO has been sent without receiving an ack. + pto_count: u32, + + // + // Congestion Control + // + /// Whether the most recently received packet had an ECN codepoint set + receiving_ecn: bool, + /// Number of packets authenticated + total_authed_packets: u64, + /// Whether the last `poll_transmit` call yielded no data because there was + /// no outgoing application data. + app_limited: bool, + + streams: StreamsState, + /// Surplus remote CIDs for future use on new paths + rem_cids: CidQueue, + // Attributes of CIDs generated by local peer + local_cid_state: CidState, + /// State of the unreliable datagram extension + datagrams: DatagramState, + /// Connection level statistics + stats: ConnectionStats, + /// QUIC version used for the connection. + version: u32, +} + +impl Connection { + pub(crate) fn new( + endpoint_config: Arc, + config: Arc, + init_cid: ConnectionId, + loc_cid: ConnectionId, + rem_cid: ConnectionId, + remote: SocketAddr, + local_ip: Option, + crypto: Box, + cid_gen: &dyn ConnectionIdGenerator, + now: Instant, + version: u32, + allow_mtud: bool, + rng_seed: [u8; 32], + side_args: SideArgs, + ) -> Self { + let pref_addr_cid = side_args.pref_addr_cid(); + let path_validated = side_args.path_validated(); + let connection_side = ConnectionSide::from(side_args); + let side = connection_side.side(); + let initial_space = PacketSpace { + crypto: Some(crypto.initial_keys(&init_cid, side)), + ..PacketSpace::new(now) + }; + let state = State::Handshake(state::Handshake { + rem_cid_set: side.is_server(), + expected_token: Bytes::new(), + client_hello: None, + }); + let mut rng = StdRng::from_seed(rng_seed); + let mut this = Self { + endpoint_config, + crypto, + handshake_cid: loc_cid, + rem_handshake_cid: rem_cid, + local_cid_state: CidState::new( + cid_gen.cid_len(), + cid_gen.cid_lifetime(), + now, + if pref_addr_cid.is_some() { 2 } else { 1 }, + ), + path: PathData::new(remote, allow_mtud, None, 0, now, &config), + path_counter: 0, + allow_mtud, + local_ip, + prev_path: None, + state, + side: connection_side, + zero_rtt_enabled: false, + zero_rtt_crypto: None, + key_phase: false, + // A small initial key phase size ensures peers that don't handle key updates correctly + // fail sooner rather than later. It's okay for both peers to do this, as the first one + // to perform an update will reset the other's key phase size in `update_keys`, and a + // simultaneous key update by both is just like a regular key update with a really fast + // response. Inspired by quic-go's similar behavior of performing the first key update + // at the 100th short-header packet. + key_phase_size: rng.random_range(10..1000), + peer_params: TransportParameters::default(), + orig_rem_cid: rem_cid, + initial_dst_cid: init_cid, + retry_src_cid: None, + events: VecDeque::new(), + endpoint_events: VecDeque::new(), + spin_enabled: config.allow_spin && rng.random_ratio(7, 8), + spin: false, + spaces: [initial_space, PacketSpace::new(now), PacketSpace::new(now)], + highest_space: SpaceId::Initial, + prev_crypto: None, + next_crypto: None, + accepted_0rtt: false, + permit_idle_reset: true, + idle_timeout: match config.max_idle_timeout { + None | Some(VarInt(0)) => None, + Some(dur) => Some(Duration::from_millis(dur.0)), + }, + timers: TimerTable::default(), + authentication_failures: 0, + error: None, + #[cfg(test)] + packet_number_filter: match config.deterministic_packet_numbers { + false => PacketNumberFilter::new(&mut rng), + true => PacketNumberFilter::disabled(), + }, + #[cfg(not(test))] + packet_number_filter: PacketNumberFilter::new(&mut rng), + + path_responses: PathResponses::default(), + close: false, + + ack_frequency: AckFrequencyState::new(get_max_ack_delay( + &TransportParameters::default(), + )), + + pto_count: 0, + + app_limited: false, + receiving_ecn: false, + total_authed_packets: 0, + + streams: StreamsState::new( + side, + config.max_concurrent_uni_streams, + config.max_concurrent_bidi_streams, + config.send_window, + config.receive_window, + config.stream_receive_window, + ), + datagrams: DatagramState::default(), + config, + rem_cids: CidQueue::new(rem_cid), + rng, + stats: ConnectionStats::default(), + version, + }; + if path_validated { + this.on_path_validated(); + } + if side.is_client() { + // Kick off the connection + this.write_crypto(); + this.init_0rtt(); + } + this + } + + /// Returns the next time at which `handle_timeout` should be called + /// + /// The value returned may change after: + /// - the application performed some I/O on the connection + /// - a call was made to `handle_event` + /// - a call to `poll_transmit` returned `Some` + /// - a call was made to `handle_timeout` + #[must_use] + pub fn poll_timeout(&mut self) -> Option { + self.timers.next_timeout() + } + + /// Returns application-facing events + /// + /// Connections should be polled for events after: + /// - a call was made to `handle_event` + /// - a call was made to `handle_timeout` + #[must_use] + pub fn poll(&mut self) -> Option { + if let Some(x) = self.events.pop_front() { + return Some(x); + } + + if let Some(event) = self.streams.poll() { + return Some(Event::Stream(event)); + } + + if let Some(err) = self.error.take() { + return Some(Event::ConnectionLost { reason: err }); + } + + None + } + + /// Return endpoint-facing events + #[must_use] + pub fn poll_endpoint_events(&mut self) -> Option { + self.endpoint_events.pop_front().map(EndpointEvent) + } + + /// Provide control over streams + #[must_use] + pub fn streams(&mut self) -> Streams<'_> { + Streams { + state: &mut self.streams, + conn_state: &self.state, + } + } + + /// Provide control over streams + #[must_use] + pub fn recv_stream(&mut self, id: StreamId) -> RecvStream<'_> { + assert!(id.dir() == Dir::Bi || id.initiator() != self.side.side()); + RecvStream { + id, + state: &mut self.streams, + pending: &mut self.spaces[SpaceId::Data].pending, + } + } + + /// Provide control over streams + #[must_use] + pub fn send_stream(&mut self, id: StreamId) -> SendStream<'_> { + assert!(id.dir() == Dir::Bi || id.initiator() == self.side.side()); + SendStream { + id, + state: &mut self.streams, + pending: &mut self.spaces[SpaceId::Data].pending, + conn_state: &self.state, + } + } + + /// Returns packets to transmit + /// + /// Connections should be polled for transmit after: + /// - the application performed some I/O on the connection + /// - a call was made to `handle_event` + /// - a call was made to `handle_timeout` + /// + /// `max_datagrams` specifies how many datagrams can be returned inside a + /// single Transmit using GSO. This must be at least 1. + #[must_use] + pub fn poll_transmit( + &mut self, + now: Instant, + max_datagrams: usize, + buf: &mut Vec, + ) -> Option { + assert!(max_datagrams != 0); + let max_datagrams = match self.config.enable_segmentation_offload { + false => 1, + true => max_datagrams, + }; + + let mut num_datagrams = 0; + // Position in `buf` of the first byte of the current UDP datagram. When coalescing QUIC + // packets, this can be earlier than the start of the current QUIC packet. + let mut datagram_start = 0; + let mut segment_size = usize::from(self.path.current_mtu()); + + if let Some(challenge) = self.send_path_challenge(now, buf) { + return Some(challenge); + } + + // If we need to send a probe, make sure we have something to send. + for space in SpaceId::iter() { + let request_immediate_ack = + space == SpaceId::Data && self.peer_supports_ack_frequency(); + self.spaces[space].maybe_queue_probe(request_immediate_ack, &self.streams); + } + + // Check whether we need to send a close message + let close = match self.state { + State::Drained => { + self.app_limited = true; + return None; + } + State::Draining | State::Closed(_) => { + // self.close is only reset once the associated packet had been + // encoded successfully + if !self.close { + self.app_limited = true; + return None; + } + true + } + _ => false, + }; + + // Check whether we need to send an ACK_FREQUENCY frame + if let Some(config) = &self.config.ack_frequency_config { + self.spaces[SpaceId::Data].pending.ack_frequency = self + .ack_frequency + .should_send_ack_frequency(self.path.rtt.get(), config, &self.peer_params) + && self.highest_space == SpaceId::Data + && self.peer_supports_ack_frequency(); + } + + // Reserving capacity can provide more capacity than we asked for. However, we are not + // allowed to write more than `segment_size`. Therefore the maximum capacity is tracked + // separately. + let mut buf_capacity = 0; + + let mut coalesce = true; + let mut builder_storage: Option = None; + let mut sent_frames = None; + let mut pad_datagram = false; + let mut pad_datagram_to_mtu = false; + let mut congestion_blocked = false; + + // Iterate over all spaces and find data to send + let mut space_idx = 0; + let spaces = [SpaceId::Initial, SpaceId::Handshake, SpaceId::Data]; + // This loop will potentially spend multiple iterations in the same `SpaceId`, + // so we cannot trivially rewrite it to take advantage of `SpaceId::iter()`. + while space_idx < spaces.len() { + let space_id = spaces[space_idx]; + // Number of bytes available for frames if this is a 1-RTT packet. We're guaranteed to + // be able to send an individual frame at least this large in the next 1-RTT + // packet. This could be generalized to support every space, but it's only needed to + // handle large fixed-size frames, which only exist in 1-RTT (application datagrams). We + // don't account for coalesced packets potentially occupying space because frames can + // always spill into the next datagram. + let pn = self.packet_number_filter.peek(&self.spaces[SpaceId::Data]); + let frame_space_1rtt = + segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn))); + + // Is there data or a close message to send in this space? + let can_send = self.space_can_send(space_id, frame_space_1rtt); + if can_send.is_empty() && (!close || self.spaces[space_id].crypto.is_none()) { + space_idx += 1; + continue; + } + + let mut ack_eliciting = !self.spaces[space_id].pending.is_empty(&self.streams) + || self.spaces[space_id].ping_pending + || self.spaces[space_id].immediate_ack_pending; + if space_id == SpaceId::Data { + ack_eliciting |= self.can_send_1rtt(frame_space_1rtt); + } + + pad_datagram_to_mtu |= space_id == SpaceId::Data && self.config.pad_to_mtu; + + // Can we append more data into the current buffer? + // It is not safe to assume that `buf.len()` is the end of the data, + // since the last packet might not have been finished. + let buf_end = if let Some(builder) = &builder_storage { + buf.len().max(builder.min_size) + builder.tag_len + } else { + buf.len() + }; + + let tag_len = if let Some(ref crypto) = self.spaces[space_id].crypto { + crypto.packet.local.tag_len() + } else if space_id == SpaceId::Data { + self.zero_rtt_crypto.as_ref().expect( + "sending packets in the application data space requires known 0-RTT or 1-RTT keys", + ).packet.tag_len() + } else { + unreachable!("tried to send {:?} packet without keys", space_id) + }; + if !coalesce || buf_capacity - buf_end < MIN_PACKET_SPACE + tag_len { + // We need to send 1 more datagram and extend the buffer for that. + + // Is 1 more datagram allowed? + if num_datagrams >= max_datagrams { + // No more datagrams allowed + break; + } + + // Anti-amplification is only based on `total_sent`, which gets + // updated at the end of this method. Therefore we pass the amount + // of bytes for datagrams that are already created, as well as 1 byte + // for starting another datagram. If there is any anti-amplification + // budget left, we always allow a full MTU to be sent + // (see https://github.com/quinn-rs/quinn/issues/1082) + if self + .path + .anti_amplification_blocked(segment_size as u64 * (num_datagrams as u64) + 1) + { + trace!("blocked by anti-amplification"); + break; + } + + // Congestion control and pacing checks + // Tail loss probes must not be blocked by congestion, or a deadlock could arise + if ack_eliciting && self.spaces[space_id].loss_probes == 0 { + // Assume the current packet will get padded to fill the segment + let untracked_bytes = if let Some(builder) = &builder_storage { + buf_capacity - builder.partial_encode.start + } else { + 0 + } as u64; + debug_assert!(untracked_bytes <= segment_size as u64); + + let bytes_to_send = segment_size as u64 + untracked_bytes; + if self.path.in_flight.bytes + bytes_to_send >= self.path.congestion.window() { + space_idx += 1; + congestion_blocked = true; + // We continue instead of breaking here in order to avoid + // blocking loss probes queued for higher spaces. + trace!("blocked by congestion control"); + continue; + } + + // Check whether the next datagram is blocked by pacing + let smoothed_rtt = self.path.rtt.get(); + if let Some(delay) = self.path.pacing.delay( + smoothed_rtt, + bytes_to_send, + self.path.current_mtu(), + self.path.congestion.window(), + now, + ) { + self.timers.set(Timer::Pacing, delay); + congestion_blocked = true; + // Loss probes should be subject to pacing, even though + // they are not congestion controlled. + trace!("blocked by pacing"); + break; + } + } + + // Finish current packet + if let Some(mut builder) = builder_storage.take() { + if pad_datagram { + builder.pad_to(MIN_INITIAL_SIZE); + } + + if num_datagrams > 1 || pad_datagram_to_mtu { + // If too many padding bytes would be required to continue the GSO batch + // after this packet, end the GSO batch here. Ensures that fixed-size frames + // with heterogeneous sizes (e.g. application datagrams) won't inadvertently + // waste large amounts of bandwidth. The exact threshold is a bit arbitrary + // and might benefit from further tuning, though there's no universally + // optimal value. + // + // Additionally, if this datagram is a loss probe and `segment_size` is + // larger than `INITIAL_MTU`, then padding it to `segment_size` to continue + // the GSO batch would risk failure to recover from a reduction in path + // MTU. Loss probes are the only packets for which we might grow + // `buf_capacity` by less than `segment_size`. + const MAX_PADDING: usize = 16; + let packet_len_unpadded = cmp::max(builder.min_size, buf.len()) + - datagram_start + + builder.tag_len; + if (packet_len_unpadded + MAX_PADDING < segment_size + && !pad_datagram_to_mtu) + || datagram_start + segment_size > buf_capacity + { + trace!( + "GSO truncated by demand for {} padding bytes or loss probe", + segment_size - packet_len_unpadded + ); + builder_storage = Some(builder); + break; + } + + // Pad the current datagram to GSO segment size so it can be included in the + // GSO batch. + builder.pad_to(segment_size as u16); + } + + builder.finish_and_track(now, self, sent_frames.take(), buf); + + if num_datagrams == 1 { + // Set the segment size for this GSO batch to the size of the first UDP + // datagram in the batch. Larger data that cannot be fragmented + // (e.g. application datagrams) will be included in a future batch. When + // sending large enough volumes of data for GSO to be useful, we expect + // packet sizes to usually be consistent, e.g. populated by max-size STREAM + // frames or uniformly sized datagrams. + segment_size = buf.len(); + // Clip the unused capacity out of the buffer so future packets don't + // overrun + buf_capacity = buf.len(); + + // Check whether the data we planned to send will fit in the reduced segment + // size. If not, bail out and leave it for the next GSO batch so we don't + // end up trying to send an empty packet. We can't easily compute the right + // segment size before the original call to `space_can_send`, because at + // that time we haven't determined whether we're going to coalesce with the + // first datagram or potentially pad it to `MIN_INITIAL_SIZE`. + if space_id == SpaceId::Data { + let frame_space_1rtt = + segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn))); + if self.space_can_send(space_id, frame_space_1rtt).is_empty() { + break; + } + } + } + } + + // Allocate space for another datagram + let next_datagram_size_limit = match self.spaces[space_id].loss_probes { + 0 => segment_size, + _ => { + self.spaces[space_id].loss_probes -= 1; + // Clamp the datagram to at most the minimum MTU to ensure that loss probes + // can get through and enable recovery even if the path MTU has shrank + // unexpectedly. + std::cmp::min(segment_size, usize::from(INITIAL_MTU)) + } + }; + buf_capacity += next_datagram_size_limit; + if buf.capacity() < buf_capacity { + // We reserve the maximum space for sending `max_datagrams` upfront + // to avoid any reallocations if more datagrams have to be appended later on. + // Benchmarks have shown shown a 5-10% throughput improvement + // compared to continuously resizing the datagram buffer. + // While this will lead to over-allocation for small transmits + // (e.g. purely containing ACKs), modern memory allocators + // (e.g. mimalloc and jemalloc) will pool certain allocation sizes + // and therefore this is still rather efficient. + buf.reserve(max_datagrams * segment_size); + } + num_datagrams += 1; + coalesce = true; + pad_datagram = false; + datagram_start = buf.len(); + + debug_assert_eq!( + datagram_start % segment_size, + 0, + "datagrams in a GSO batch must be aligned to the segment size" + ); + } else { + // We can append/coalesce the next packet into the current + // datagram. + // Finish current packet without adding extra padding + if let Some(builder) = builder_storage.take() { + builder.finish_and_track(now, self, sent_frames.take(), buf); + } + } + + debug_assert!(buf_capacity - buf.len() >= MIN_PACKET_SPACE); + + // + // From here on, we've determined that a packet will definitely be sent. + // + + if self.spaces[SpaceId::Initial].crypto.is_some() + && space_id == SpaceId::Handshake + && self.side.is_client() + { + // A client stops both sending and processing Initial packets when it + // sends its first Handshake packet. + self.discard_space(now, SpaceId::Initial); + } + if let Some(ref mut prev) = self.prev_crypto { + prev.update_unacked = false; + } + + debug_assert!( + builder_storage.is_none() && sent_frames.is_none(), + "Previous packet must have been finished" + ); + + let builder = builder_storage.insert(PacketBuilder::new( + now, + space_id, + self.rem_cids.active(), + buf, + buf_capacity, + datagram_start, + ack_eliciting, + self, + )?); + coalesce = coalesce && !builder.short_header; + + // https://tools.ietf.org/html/draft-ietf-quic-transport-34#section-14.1 + pad_datagram |= + space_id == SpaceId::Initial && (self.side.is_client() || ack_eliciting); + + if close { + trace!("sending CONNECTION_CLOSE"); + // Encode ACKs before the ConnectionClose message, to give the receiver + // a better approximate on what data has been processed. This is + // especially important with ack delay, since the peer might not + // have gotten any other ACK for the data earlier on. + if !self.spaces[space_id].pending_acks.ranges().is_empty() { + Self::populate_acks( + now, + self.receiving_ecn, + &mut SentFrames::default(), + &mut self.spaces[space_id], + buf, + &mut self.stats, + ); + } + + // Since there only 64 ACK frames there will always be enough space + // to encode the ConnectionClose frame too. However we still have the + // check here to prevent crashes if something changes. + debug_assert!( + buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size, + "ACKs should leave space for ConnectionClose" + ); + if buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size { + let max_frame_size = builder.max_size - buf.len(); + match self.state { + State::Closed(state::Closed { ref reason }) => { + if space_id == SpaceId::Data || reason.is_transport_layer() { + reason.encode(buf, max_frame_size) + } else { + frame::ConnectionClose { + error_code: TransportErrorCode::APPLICATION_ERROR, + frame_type: None, + reason: Bytes::new(), + } + .encode(buf, max_frame_size) + } + } + State::Draining => frame::ConnectionClose { + error_code: TransportErrorCode::NO_ERROR, + frame_type: None, + reason: Bytes::new(), + } + .encode(buf, max_frame_size), + _ => unreachable!( + "tried to make a close packet when the connection wasn't closed" + ), + } + } + if space_id == self.highest_space { + // Don't send another close packet + self.close = false; + // `CONNECTION_CLOSE` is the final packet + break; + } else { + // Send a close frame in every possible space for robustness, per RFC9000 + // "Immediate Close during the Handshake". Don't bother trying to send anything + // else. + space_idx += 1; + continue; + } + } + + // Send an off-path PATH_RESPONSE. Prioritized over on-path data to ensure that path + // validation can occur while the link is saturated. + if space_id == SpaceId::Data && num_datagrams == 1 { + if let Some((token, remote)) = self.path_responses.pop_off_path(self.path.remote) { + // `unwrap` guaranteed to succeed because `builder_storage` was populated just + // above. + let mut builder = builder_storage.take().unwrap(); + trace!("PATH_RESPONSE {:08x} (off-path)", token); + buf.write(frame::FrameType::PATH_RESPONSE); + buf.write(token); + self.stats.frame_tx.path_response += 1; + builder.pad_to(MIN_INITIAL_SIZE); + builder.finish_and_track( + now, + self, + Some(SentFrames { + non_retransmits: true, + ..SentFrames::default() + }), + buf, + ); + self.stats.udp_tx.on_sent(1, buf.len()); + return Some(Transmit { + destination: remote, + size: buf.len(), + ecn: None, + segment_size: None, + src_ip: self.local_ip, + }); + } + } + + let sent = + self.populate_packet(now, space_id, buf, builder.max_size, builder.exact_number); + + // ACK-only packets should only be sent when explicitly allowed. If we write them due to + // any other reason, there is a bug which leads to one component announcing write + // readiness while not writing any data. This degrades performance. The condition is + // only checked if the full MTU is available and when potentially large fixed-size + // frames aren't queued, so that lack of space in the datagram isn't the reason for just + // writing ACKs. + debug_assert!( + !(sent.is_ack_only(&self.streams) + && !can_send.acks + && can_send.other + && (buf_capacity - builder.datagram_start) == self.path.current_mtu() as usize + && self.datagrams.outgoing.is_empty()), + "SendableFrames was {can_send:?}, but only ACKs have been written" + ); + pad_datagram |= sent.requires_padding; + + if sent.largest_acked.is_some() { + self.spaces[space_id].pending_acks.acks_sent(); + self.timers.stop(Timer::MaxAckDelay); + } + + // Keep information about the packet around until it gets finalized + sent_frames = Some(sent); + + // Don't increment space_idx. + // We stay in the current space and check if there is more data to send. + } + + // Finish the last packet + if let Some(mut builder) = builder_storage { + if pad_datagram { + builder.pad_to(MIN_INITIAL_SIZE); + } + + // If this datagram is a loss probe and `segment_size` is larger than `INITIAL_MTU`, + // then padding it to `segment_size` would risk failure to recover from a reduction in + // path MTU. + // Loss probes are the only packets for which we might grow `buf_capacity` + // by less than `segment_size`. + if pad_datagram_to_mtu && buf_capacity >= datagram_start + segment_size { + builder.pad_to(segment_size as u16); + } + + let last_packet_number = builder.exact_number; + builder.finish_and_track(now, self, sent_frames, buf); + self.path + .congestion + .on_sent(now, buf.len() as u64, last_packet_number); + + self.config.qlog_sink.emit_recovery_metrics( + self.pto_count, + &mut self.path, + now, + self.orig_rem_cid, + ); + } + + self.app_limited = buf.is_empty() && !congestion_blocked; + + // Send MTU probe if necessary + if buf.is_empty() && self.state.is_established() { + let space_id = SpaceId::Data; + let probe_size = self + .path + .mtud + .poll_transmit(now, self.packet_number_filter.peek(&self.spaces[space_id]))?; + + let buf_capacity = probe_size as usize; + buf.reserve(buf_capacity); + + let mut builder = PacketBuilder::new( + now, + space_id, + self.rem_cids.active(), + buf, + buf_capacity, + 0, + true, + self, + )?; + + // We implement MTU probes as ping packets padded up to the probe size + buf.write(frame::FrameType::PING); + self.stats.frame_tx.ping += 1; + + // If supported by the peer, we want no delays to the probe's ACK + if self.peer_supports_ack_frequency() { + buf.write(frame::FrameType::IMMEDIATE_ACK); + self.stats.frame_tx.immediate_ack += 1; + } + + builder.pad_to(probe_size); + let sent_frames = SentFrames { + non_retransmits: true, + ..Default::default() + }; + builder.finish_and_track(now, self, Some(sent_frames), buf); + + self.stats.path.sent_plpmtud_probes += 1; + num_datagrams = 1; + + trace!(?probe_size, "writing MTUD probe"); + } + + if buf.is_empty() { + return None; + } + + trace!("sending {} bytes in {} datagrams", buf.len(), num_datagrams); + self.path.total_sent = self.path.total_sent.saturating_add(buf.len() as u64); + + self.stats.udp_tx.on_sent(num_datagrams as u64, buf.len()); + + Some(Transmit { + destination: self.path.remote, + size: buf.len(), + ecn: if self.path.sending_ecn { + Some(EcnCodepoint::Ect0) + } else { + None + }, + segment_size: match num_datagrams { + 1 => None, + _ => Some(segment_size), + }, + src_ip: self.local_ip, + }) + } + + /// Send PATH_CHALLENGE for a previous path if necessary + fn send_path_challenge(&mut self, now: Instant, buf: &mut Vec) -> Option { + let (prev_cid, prev_path) = self.prev_path.as_mut()?; + if !prev_path.challenge_pending { + return None; + } + prev_path.challenge_pending = false; + let token = prev_path + .challenge + .expect("previous path challenge pending without token"); + let destination = prev_path.remote; + debug_assert_eq!( + self.highest_space, + SpaceId::Data, + "PATH_CHALLENGE queued without 1-RTT keys" + ); + buf.reserve(MIN_INITIAL_SIZE as usize); + + let buf_capacity = buf.capacity(); + + // Use the previous CID to avoid linking the new path with the previous path. We + // don't bother accounting for possible retirement of that prev_cid because this is + // sent once, immediately after migration, when the CID is known to be valid. Even + // if a post-migration packet caused the CID to be retired, it's fair to pretend + // this is sent first. + let mut builder = PacketBuilder::new( + now, + SpaceId::Data, + *prev_cid, + buf, + buf_capacity, + 0, + false, + self, + )?; + trace!("validating previous path with PATH_CHALLENGE {:08x}", token); + buf.write(frame::FrameType::PATH_CHALLENGE); + buf.write(token); + self.stats.frame_tx.path_challenge += 1; + + // An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame + // to at least the smallest allowed maximum datagram size of 1200 bytes, + // unless the anti-amplification limit for the path does not permit + // sending a datagram of this size + builder.pad_to(MIN_INITIAL_SIZE); + + builder.finish(self, now, buf); + self.stats.udp_tx.on_sent(1, buf.len()); + + Some(Transmit { + destination, + size: buf.len(), + ecn: None, + segment_size: None, + src_ip: self.local_ip, + }) + } + + /// Indicate what types of frames are ready to send for the given space + fn space_can_send(&self, space_id: SpaceId, frame_space_1rtt: usize) -> SendableFrames { + if self.spaces[space_id].crypto.is_none() + && (space_id != SpaceId::Data + || self.zero_rtt_crypto.is_none() + || self.side.is_server()) + { + // No keys available for this space + return SendableFrames::empty(); + } + let mut can_send = self.spaces[space_id].can_send(&self.streams); + if space_id == SpaceId::Data { + can_send.other |= self.can_send_1rtt(frame_space_1rtt); + } + can_send + } + + /// Process `ConnectionEvent`s generated by the associated `Endpoint` + /// + /// Will execute protocol logic upon receipt of a connection event, in turn preparing signals + /// (including application `Event`s, `EndpointEvent`s and outgoing datagrams) that should be + /// extracted through the relevant methods. + pub fn handle_event(&mut self, event: ConnectionEvent) { + use ConnectionEventInner::*; + match event.0 { + Datagram(DatagramConnectionEvent { + now, + remote, + ecn, + first_decode, + remaining, + }) => { + // If this packet could initiate a migration and we're a client or a server that + // forbids migration, drop the datagram. This could be relaxed to heuristically + // permit NAT-rebinding-like migration. + if remote != self.path.remote && !self.side.remote_may_migrate() { + trace!("discarding packet from unrecognized peer {}", remote); + return; + } + + let was_anti_amplification_blocked = self.path.anti_amplification_blocked(1); + + self.stats.udp_rx.datagrams += 1; + self.stats.udp_rx.bytes += first_decode.len() as u64; + let data_len = first_decode.len(); + + self.handle_decode(now, remote, ecn, first_decode); + // The current `path` might have changed inside `handle_decode`, + // since the packet could have triggered a migration. Make sure + // the data received is accounted for the most recent path by accessing + // `path` after `handle_decode`. + self.path.total_recvd = self.path.total_recvd.saturating_add(data_len as u64); + + if let Some(data) = remaining { + self.stats.udp_rx.bytes += data.len() as u64; + self.handle_coalesced(now, remote, ecn, data); + } + + self.config.qlog_sink.emit_recovery_metrics( + self.pto_count, + &mut self.path, + now, + self.orig_rem_cid, + ); + + if was_anti_amplification_blocked { + // A prior attempt to set the loss detection timer may have failed due to + // anti-amplification, so ensure it's set now. Prevents a handshake deadlock if + // the server's first flight is lost. + self.set_loss_detection_timer(now); + } + } + NewIdentifiers(ids, now) => { + self.local_cid_state.new_cids(&ids, now); + ids.into_iter().rev().for_each(|frame| { + self.spaces[SpaceId::Data].pending.new_cids.push(frame); + }); + // Update Timer::PushNewCid + if self + .timers + .get(Timer::PushNewCid) + .map_or(true, |x| x <= now) + { + self.reset_cid_retirement(); + } + } + } + } + + /// Process timer expirations + /// + /// Executes protocol logic, potentially preparing signals (including application `Event`s, + /// `EndpointEvent`s and outgoing datagrams) that should be extracted through the relevant + /// methods. + /// + /// It is most efficient to call this immediately after the system clock reaches the latest + /// `Instant` that was output by `poll_timeout`; however spurious extra calls will simply + /// no-op and therefore are safe. + pub fn handle_timeout(&mut self, now: Instant) { + for &timer in &Timer::VALUES { + if !self.timers.is_expired(timer, now) { + continue; + } + self.timers.stop(timer); + trace!(timer = ?timer, "timeout"); + match timer { + Timer::Close => { + self.state = State::Drained; + self.endpoint_events.push_back(EndpointEventInner::Drained); + } + Timer::Idle => { + self.kill(ConnectionError::TimedOut); + } + Timer::KeepAlive => { + trace!("sending keep-alive"); + self.ping(); + } + Timer::LossDetection => { + self.on_loss_detection_timeout(now); + + self.config.qlog_sink.emit_recovery_metrics( + self.pto_count, + &mut self.path, + now, + self.orig_rem_cid, + ); + } + Timer::KeyDiscard => { + self.zero_rtt_crypto = None; + self.prev_crypto = None; + } + Timer::PathValidation => { + debug!("path validation failed"); + if let Some((_, prev)) = self.prev_path.take() { + self.path = prev; + } + self.path.challenge = None; + self.path.challenge_pending = false; + } + Timer::Pacing => trace!("pacing timer expired"), + Timer::PushNewCid => { + // Update `retire_prior_to` field in NEW_CONNECTION_ID frame + let num_new_cid = self.local_cid_state.on_cid_timeout().into(); + if !self.state.is_closed() { + trace!( + "push a new cid to peer RETIRE_PRIOR_TO field {}", + self.local_cid_state.retire_prior_to() + ); + self.endpoint_events + .push_back(EndpointEventInner::NeedIdentifiers(now, num_new_cid)); + } + } + Timer::MaxAckDelay => { + trace!("max ack delay reached"); + // This timer is only armed in the Data space + self.spaces[SpaceId::Data] + .pending_acks + .on_max_ack_delay_timeout() + } + } + } + } + + /// Close a connection immediately + /// + /// This does not ensure delivery of outstanding data. It is the application's responsibility to + /// call this only when all important communications have been completed, e.g. by calling + /// [`SendStream::finish`] on outstanding streams and waiting for the corresponding + /// [`StreamEvent::Finished`] event. + /// + /// If [`Streams::send_streams`] returns 0, all outstanding stream data has been + /// delivered. There may still be data from the peer that has not been received. + /// + /// [`StreamEvent::Finished`]: crate::StreamEvent::Finished + pub fn close(&mut self, now: Instant, error_code: VarInt, reason: Bytes) { + self.close_inner( + now, + Close::Application(frame::ApplicationClose { error_code, reason }), + ) + } + + fn close_inner(&mut self, now: Instant, reason: Close) { + let was_closed = self.state.is_closed(); + if !was_closed { + self.close_common(); + self.set_close_timer(now); + self.close = true; + self.state = State::Closed(state::Closed { reason }); + } + } + + /// Control datagrams + pub fn datagrams(&mut self) -> Datagrams<'_> { + Datagrams { conn: self } + } + + /// Returns connection statistics + pub fn stats(&self) -> ConnectionStats { + let mut stats = self.stats; + stats.path.rtt = self.path.rtt.get(); + stats.path.cwnd = self.path.congestion.window(); + stats.path.current_mtu = self.path.mtud.current_mtu(); + + stats + } + + /// Ping the remote endpoint + /// + /// Causes an ACK-eliciting packet to be transmitted. + pub fn ping(&mut self) { + self.spaces[self.highest_space].ping_pending = true; + } + + /// Update traffic keys spontaneously + /// + /// This can be useful for testing key updates, as they otherwise only happen infrequently. + pub fn force_key_update(&mut self) { + if !self.state.is_established() { + debug!("ignoring forced key update in illegal state"); + return; + } + if self.prev_crypto.is_some() { + // We already just updated, or are currently updating, the keys. Concurrent key updates + // are illegal. + debug!("ignoring redundant forced key update"); + return; + } + self.update_keys(None, false); + } + + // Compatibility wrapper for quinn < 0.11.7. Remove for 0.12. + #[doc(hidden)] + #[deprecated] + pub fn initiate_key_update(&mut self) { + self.force_key_update(); + } + + /// Get a session reference + pub fn crypto_session(&self) -> &dyn crypto::Session { + &*self.crypto + } + + /// Whether the connection is in the process of being established + /// + /// If this returns `false`, the connection may be either established or closed, signaled by the + /// emission of a `Connected` or `ConnectionLost` message respectively. + pub fn is_handshaking(&self) -> bool { + self.state.is_handshake() + } + + /// Whether the connection is closed + /// + /// Closed connections cannot transport any further data. A connection becomes closed when + /// either peer application intentionally closes it, or when either transport layer detects an + /// error such as a time-out or certificate validation failure. + /// + /// A `ConnectionLost` event is emitted with details when the connection becomes closed. + pub fn is_closed(&self) -> bool { + self.state.is_closed() + } + + /// Whether there is no longer any need to keep the connection around + /// + /// Closed connections become drained after a brief timeout to absorb any remaining in-flight + /// packets from the peer. All drained connections have been closed. + pub fn is_drained(&self) -> bool { + self.state.is_drained() + } + + /// For clients, if the peer accepted the 0-RTT data packets + /// + /// The value is meaningless until after the handshake completes. + pub fn accepted_0rtt(&self) -> bool { + self.accepted_0rtt + } + + /// Whether 0-RTT is/was possible during the handshake + pub fn has_0rtt(&self) -> bool { + self.zero_rtt_enabled + } + + /// Whether there are any pending retransmits + pub fn has_pending_retransmits(&self) -> bool { + !self.spaces[SpaceId::Data].pending.is_empty(&self.streams) + } + + /// Look up whether we're the client or server of this Connection + pub fn side(&self) -> Side { + self.side.side() + } + + /// The latest socket address for this connection's peer + pub fn remote_address(&self) -> SocketAddr { + self.path.remote + } + + /// The local IP address which was used when the peer established + /// the connection + /// + /// This can be different from the address the endpoint is bound to, in case + /// the endpoint is bound to a wildcard address like `0.0.0.0` or `::`. + /// + /// This will return `None` for clients, or when no `local_ip` was passed to + /// [`Endpoint::handle()`](crate::Endpoint::handle) for the datagrams establishing this + /// connection. + pub fn local_ip(&self) -> Option { + self.local_ip + } + + /// Current best estimate of this connection's latency (round-trip-time) + pub fn rtt(&self) -> Duration { + self.path.rtt.get() + } + + /// Current state of this connection's congestion controller, for debugging purposes + pub fn congestion_state(&self) -> &dyn Controller { + self.path.congestion.as_ref() + } + + /// Resets path-specific settings. + /// + /// This will force-reset several subsystems related to a specific network path. + /// Currently this is the congestion controller, round-trip estimator, and the MTU + /// discovery. + /// + /// This is useful when it is known the underlying network path has changed and the old + /// state of these subsystems is no longer valid or optimal. In this case it might be + /// faster or reduce loss to settle on optimal values by restarting from the initial + /// configuration in the [`TransportConfig`]. + pub fn path_changed(&mut self, now: Instant) { + self.path.reset(now, &self.config); + } + + /// Modify the number of remotely initiated streams that may be concurrently open + /// + /// No streams may be opened by the peer unless fewer than `count` are already open. Large + /// `count`s increase both minimum and worst-case memory consumption. + pub fn set_max_concurrent_streams(&mut self, dir: Dir, count: VarInt) { + self.streams.set_max_concurrent(dir, count); + // If the limit was reduced, then a flow control update previously deemed insignificant may + // now be significant. + let pending = &mut self.spaces[SpaceId::Data].pending; + self.streams.queue_max_stream_id(pending); + } + + /// Current number of remotely initiated streams that may be concurrently open + /// + /// If the target for this limit is reduced using [`set_max_concurrent_streams`](Self::set_max_concurrent_streams), + /// it will not change immediately, even if fewer streams are open. Instead, it will + /// decrement by one for each time a remotely initiated stream of matching directionality is closed. + pub fn max_concurrent_streams(&self, dir: Dir) -> u64 { + self.streams.max_concurrent(dir) + } + + /// See [`TransportConfig::send_window()`] + pub fn set_send_window(&mut self, send_window: u64) { + self.streams.set_send_window(send_window); + } + + /// See [`TransportConfig::receive_window()`] + pub fn set_receive_window(&mut self, receive_window: VarInt) { + if self.streams.set_receive_window(receive_window) { + self.spaces[SpaceId::Data].pending.max_data = true; + } + } + + fn on_ack_received( + &mut self, + now: Instant, + space: SpaceId, + ack: frame::Ack, + ) -> Result<(), TransportError> { + if ack.largest >= self.spaces[space].next_packet_number { + return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked")); + } + let new_largest = { + let space = &mut self.spaces[space]; + if space + .largest_acked_packet + .map_or(true, |pn| ack.largest > pn) + { + space.largest_acked_packet = Some(ack.largest); + if let Some(info) = space.sent_packets.get(&ack.largest) { + // This should always succeed, but a misbehaving peer might ACK a packet we + // haven't sent. At worst, that will result in us spuriously reducing the + // congestion window. + space.largest_acked_packet_sent = info.time_sent; + } + true + } else { + false + } + }; + + // Avoid DoS from unreasonably huge ack ranges by filtering out just the new acks. + let mut newly_acked = ArrayRangeSet::new(); + for range in ack.iter() { + self.packet_number_filter.check_ack(space, range.clone())?; + for (&pn, _) in self.spaces[space].sent_packets.range(range) { + newly_acked.insert_one(pn); + } + } + + if newly_acked.is_empty() { + return Ok(()); + } + + let mut ack_eliciting_acked = false; + for packet in newly_acked.elts() { + if let Some(info) = self.spaces[space].take(packet) { + if let Some(acked) = info.largest_acked { + // Assume ACKs for all packets below the largest acknowledged in `packet` have + // been received. This can cause the peer to spuriously retransmit if some of + // our earlier ACKs were lost, but allows for simpler state tracking. See + // discussion at + // https://www.rfc-editor.org/rfc/rfc9000.html#name-limiting-ranges-by-tracking + self.spaces[space].pending_acks.subtract_below(acked); + } + ack_eliciting_acked |= info.ack_eliciting; + + // Notify MTU discovery that a packet was acked, because it might be an MTU probe + let mtu_updated = self.path.mtud.on_acked(space, packet, info.size); + if mtu_updated { + self.path + .congestion + .on_mtu_update(self.path.mtud.current_mtu()); + } + + // Notify ack frequency that a packet was acked, because it might contain an ACK_FREQUENCY frame + self.ack_frequency.on_acked(packet); + + self.on_packet_acked(now, info); + } + } + + self.path.congestion.on_end_acks( + now, + self.path.in_flight.bytes, + self.app_limited, + self.spaces[space].largest_acked_packet, + ); + + if new_largest && ack_eliciting_acked { + let ack_delay = if space != SpaceId::Data { + Duration::from_micros(0) + } else { + cmp::min( + self.ack_frequency.peer_max_ack_delay, + Duration::from_micros(ack.delay << self.peer_params.ack_delay_exponent.0), + ) + }; + let rtt = instant_saturating_sub(now, self.spaces[space].largest_acked_packet_sent); + self.path.rtt.update(ack_delay, rtt); + if self.path.first_packet_after_rtt_sample.is_none() { + self.path.first_packet_after_rtt_sample = + Some((space, self.spaces[space].next_packet_number)); + } + } + + // Must be called before crypto/pto_count are clobbered + self.detect_lost_packets(now, space, true); + + if self.peer_completed_address_validation() { + self.pto_count = 0; + } + + // Explicit congestion notification + if self.path.sending_ecn { + if let Some(ecn) = ack.ecn { + // We only examine ECN counters from ACKs that we are certain we received in transmit + // order, allowing us to compute an increase in ECN counts to compare against the number + // of newly acked packets that remains well-defined in the presence of arbitrary packet + // reordering. + if new_largest { + let sent = self.spaces[space].largest_acked_packet_sent; + self.process_ecn(now, space, newly_acked.len() as u64, ecn, sent); + } + } else { + // We always start out sending ECN, so any ack that doesn't acknowledge it disables it. + debug!("ECN not acknowledged by peer"); + self.path.sending_ecn = false; + } + } + + self.set_loss_detection_timer(now); + Ok(()) + } + + /// Process a new ECN block from an in-order ACK + fn process_ecn( + &mut self, + now: Instant, + space: SpaceId, + newly_acked: u64, + ecn: frame::EcnCounts, + largest_sent_time: Instant, + ) { + match self.spaces[space].detect_ecn(newly_acked, ecn) { + Err(e) => { + debug!("halting ECN due to verification failure: {}", e); + self.path.sending_ecn = false; + // Wipe out the existing value because it might be garbage and could interfere with + // future attempts to use ECN on new paths. + self.spaces[space].ecn_feedback = frame::EcnCounts::ZERO; + } + Ok(false) => {} + Ok(true) => { + self.stats.path.congestion_events += 1; + self.path + .congestion + .on_congestion_event(now, largest_sent_time, false, 0); + } + } + } + + // Not timing-aware, so it's safe to call this for inferred acks, such as arise from + // high-latency handshakes + fn on_packet_acked(&mut self, now: Instant, info: SentPacket) { + self.remove_in_flight(&info); + if info.ack_eliciting && self.path.challenge.is_none() { + // Only pass ACKs to the congestion controller if we are not validating the current + // path, so as to ignore any ACKs from older paths still coming in. + self.path.congestion.on_ack( + now, + info.time_sent, + info.size.into(), + self.app_limited, + &self.path.rtt, + ); + } + + // Update state for confirmed delivery of frames + if let Some(retransmits) = info.retransmits.get() { + for (id, _) in retransmits.reset_stream.iter() { + self.streams.reset_acked(*id); + } + } + + for frame in info.stream_frames { + self.streams.received_ack_of(frame); + } + } + + fn set_key_discard_timer(&mut self, now: Instant, space: SpaceId) { + let start = if self.zero_rtt_crypto.is_some() { + now + } else { + self.prev_crypto + .as_ref() + .expect("no previous keys") + .end_packet + .as_ref() + .expect("update not acknowledged yet") + .1 + }; + self.timers + .set(Timer::KeyDiscard, start + self.pto(space) * 3); + } + + fn on_loss_detection_timeout(&mut self, now: Instant) { + if let Some((_, pn_space)) = self.loss_time_and_space() { + // Time threshold loss Detection + self.detect_lost_packets(now, pn_space, false); + self.set_loss_detection_timer(now); + return; + } + + let (_, space) = match self.pto_time_and_space(now) { + Some(x) => x, + None => { + error!("PTO expired while unset"); + return; + } + }; + trace!( + in_flight = self.path.in_flight.bytes, + count = self.pto_count, + ?space, + "PTO fired" + ); + + let count = match self.path.in_flight.ack_eliciting { + // A PTO when we're not expecting any ACKs must be due to handshake anti-amplification + // deadlock preventions + 0 => { + debug_assert!(!self.peer_completed_address_validation()); + 1 + } + // Conventional loss probe + _ => 2, + }; + self.spaces[space].loss_probes = self.spaces[space].loss_probes.saturating_add(count); + self.pto_count = self.pto_count.saturating_add(1); + self.set_loss_detection_timer(now); + } + + fn detect_lost_packets(&mut self, now: Instant, pn_space: SpaceId, due_to_ack: bool) { + let mut lost_packets = Vec::::new(); + let mut lost_mtu_probe = None; + let in_flight_mtu_probe = self.path.mtud.in_flight_mtu_probe(); + let rtt = self.path.rtt.conservative(); + let loss_delay = cmp::max(rtt.mul_f32(self.config.time_threshold), TIMER_GRANULARITY); + + // Packets sent before this time are deemed lost. + let lost_send_time = now.checked_sub(loss_delay).unwrap(); + let largest_acked_packet = self.spaces[pn_space].largest_acked_packet.unwrap(); + let packet_threshold = self.config.packet_threshold as u64; + let mut size_of_lost_packets = 0u64; + + // InPersistentCongestion: Determine if all packets in the time period before the newest + // lost packet, including the edges, are marked lost. PTO computation must always + // include max ACK delay, i.e. operate as if in Data space (see RFC9001 §7.6.1). + let congestion_period = + self.pto(SpaceId::Data) * self.config.persistent_congestion_threshold; + let mut persistent_congestion_start: Option = None; + let mut prev_packet = None; + let mut in_persistent_congestion = false; + + let space = &mut self.spaces[pn_space]; + space.loss_time = None; + + for (&packet, info) in space.sent_packets.range(0..largest_acked_packet) { + if prev_packet != Some(packet.wrapping_sub(1)) { + // An intervening packet was acknowledged + persistent_congestion_start = None; + } + + if info.time_sent <= lost_send_time || largest_acked_packet >= packet + packet_threshold + { + if Some(packet) == in_flight_mtu_probe { + // Lost MTU probes are not included in `lost_packets`, because they should not + // trigger a congestion control response + lost_mtu_probe = in_flight_mtu_probe; + } else { + lost_packets.push(packet); + size_of_lost_packets += info.size as u64; + if info.ack_eliciting && due_to_ack { + match persistent_congestion_start { + // Two ACK-eliciting packets lost more than congestion_period apart, with no + // ACKed packets in between + Some(start) if info.time_sent - start > congestion_period => { + in_persistent_congestion = true; + } + // Persistent congestion must start after the first RTT sample + None if self + .path + .first_packet_after_rtt_sample + .is_some_and(|x| x < (pn_space, packet)) => + { + persistent_congestion_start = Some(info.time_sent); + } + _ => {} + } + } + } + } else { + let next_loss_time = info.time_sent + loss_delay; + space.loss_time = Some( + space + .loss_time + .map_or(next_loss_time, |x| cmp::min(x, next_loss_time)), + ); + persistent_congestion_start = None; + } + + prev_packet = Some(packet); + } + + // OnPacketsLost + if let Some(largest_lost) = lost_packets.last().cloned() { + let old_bytes_in_flight = self.path.in_flight.bytes; + let largest_lost_sent = self.spaces[pn_space].sent_packets[&largest_lost].time_sent; + self.stats.path.lost_packets += lost_packets.len() as u64; + self.stats.path.lost_bytes += size_of_lost_packets; + trace!( + "packets lost: {:?}, bytes lost: {}", + lost_packets, size_of_lost_packets + ); + + for &packet in &lost_packets { + let info = self.spaces[pn_space].take(packet).unwrap(); // safe: lost_packets is populated just above + self.config.qlog_sink.emit_packet_lost( + packet, + &info, + lost_send_time, + pn_space, + now, + self.orig_rem_cid, + ); + self.remove_in_flight(&info); + for frame in info.stream_frames { + self.streams.retransmit(frame); + } + self.spaces[pn_space].pending |= info.retransmits; + self.path.mtud.on_non_probe_lost(packet, info.size); + } + + if self.path.mtud.black_hole_detected(now) { + self.stats.path.black_holes_detected += 1; + self.path + .congestion + .on_mtu_update(self.path.mtud.current_mtu()); + if let Some(max_datagram_size) = self.datagrams().max_size() { + self.datagrams.drop_oversized(max_datagram_size); + } + } + + // Don't apply congestion penalty for lost ack-only packets + let lost_ack_eliciting = old_bytes_in_flight != self.path.in_flight.bytes; + + if lost_ack_eliciting { + self.stats.path.congestion_events += 1; + self.path.congestion.on_congestion_event( + now, + largest_lost_sent, + in_persistent_congestion, + size_of_lost_packets, + ); + } + } + + // Handle a lost MTU probe + if let Some(packet) = lost_mtu_probe { + let info = self.spaces[SpaceId::Data].take(packet).unwrap(); // safe: lost_mtu_probe is omitted from lost_packets, and therefore must not have been removed yet + self.remove_in_flight(&info); + self.path.mtud.on_probe_lost(); + self.stats.path.lost_plpmtud_probes += 1; + } + } + + fn loss_time_and_space(&self) -> Option<(Instant, SpaceId)> { + SpaceId::iter() + .filter_map(|id| Some((self.spaces[id].loss_time?, id))) + .min_by_key(|&(time, _)| time) + } + + fn pto_time_and_space(&self, now: Instant) -> Option<(Instant, SpaceId)> { + let backoff = 2u32.pow(self.pto_count.min(MAX_BACKOFF_EXPONENT)); + let mut duration = self.path.rtt.pto_base() * backoff; + + if self.path.in_flight.ack_eliciting == 0 { + debug_assert!(!self.peer_completed_address_validation()); + let space = match self.highest_space { + SpaceId::Handshake => SpaceId::Handshake, + _ => SpaceId::Initial, + }; + return Some((now + duration, space)); + } + + let mut result = None; + for space in SpaceId::iter() { + if !self.spaces[space].has_in_flight() { + continue; + } + if space == SpaceId::Data { + // Skip ApplicationData until handshake completes. + if self.is_handshaking() { + return result; + } + // Include max_ack_delay and backoff for ApplicationData. + duration += self.ack_frequency.max_ack_delay_for_pto() * backoff; + } + let last_ack_eliciting = match self.spaces[space].time_of_last_ack_eliciting_packet { + Some(time) => time, + None => continue, + }; + let pto = last_ack_eliciting + duration; + if result.map_or(true, |(earliest_pto, _)| pto < earliest_pto) { + result = Some((pto, space)); + } + } + result + } + + fn peer_completed_address_validation(&self) -> bool { + if self.side.is_server() || self.state.is_closed() { + return true; + } + // The server is guaranteed to have validated our address if any of our handshake or 1-RTT + // packets are acknowledged or we've seen HANDSHAKE_DONE and discarded handshake keys. + self.spaces[SpaceId::Handshake] + .largest_acked_packet + .is_some() + || self.spaces[SpaceId::Data].largest_acked_packet.is_some() + || (self.spaces[SpaceId::Data].crypto.is_some() + && self.spaces[SpaceId::Handshake].crypto.is_none()) + } + + fn set_loss_detection_timer(&mut self, now: Instant) { + if self.state.is_closed() { + // No loss detection takes place on closed connections, and `close_common` already + // stopped time timer. Ensure we don't restart it inadvertently, e.g. in response to a + // reordered packet being handled by state-insensitive code. + return; + } + + if let Some((loss_time, _)) = self.loss_time_and_space() { + // Time threshold loss detection. + self.timers.set(Timer::LossDetection, loss_time); + return; + } + + if self.path.anti_amplification_blocked(1) { + // We wouldn't be able to send anything, so don't bother. + self.timers.stop(Timer::LossDetection); + return; + } + + if self.path.in_flight.ack_eliciting == 0 && self.peer_completed_address_validation() { + // There is nothing to detect lost, so no timer is set. However, the client needs to arm + // the timer if the server might be blocked by the anti-amplification limit. + self.timers.stop(Timer::LossDetection); + return; + } + + // Determine which PN space to arm PTO for. + // Calculate PTO duration + if let Some((timeout, _)) = self.pto_time_and_space(now) { + self.timers.set(Timer::LossDetection, timeout); + } else { + self.timers.stop(Timer::LossDetection); + } + } + + /// Probe Timeout + fn pto(&self, space: SpaceId) -> Duration { + let max_ack_delay = match space { + SpaceId::Initial | SpaceId::Handshake => Duration::ZERO, + SpaceId::Data => self.ack_frequency.max_ack_delay_for_pto(), + }; + self.path.rtt.pto_base() + max_ack_delay + } + + fn on_packet_authenticated( + &mut self, + now: Instant, + space_id: SpaceId, + ecn: Option, + packet: Option, + spin: bool, + is_1rtt: bool, + ) { + self.total_authed_packets += 1; + self.reset_keep_alive(now); + self.reset_idle_timeout(now, space_id); + self.permit_idle_reset = true; + self.receiving_ecn |= ecn.is_some(); + if let Some(x) = ecn { + let space = &mut self.spaces[space_id]; + space.ecn_counters += x; + + if x.is_ce() { + space.pending_acks.set_immediate_ack_required(); + } + } + + let packet = match packet { + Some(x) => x, + None => return, + }; + if self.side.is_server() { + if self.spaces[SpaceId::Initial].crypto.is_some() && space_id == SpaceId::Handshake { + // A server stops sending and processing Initial packets when it receives its first Handshake packet. + self.discard_space(now, SpaceId::Initial); + } + if self.zero_rtt_crypto.is_some() && is_1rtt { + // Discard 0-RTT keys soon after receiving a 1-RTT packet + self.set_key_discard_timer(now, space_id) + } + } + let space = &mut self.spaces[space_id]; + space.pending_acks.insert_one(packet, now); + if packet >= space.rx_packet { + space.rx_packet = packet; + // Update outgoing spin bit, inverting iff we're the client + self.spin = self.side.is_client() ^ spin; + } + + self.config.qlog_sink.emit_packet_received( + packet, + space_id, + !is_1rtt, + now, + self.orig_rem_cid, + ); + } + + fn reset_idle_timeout(&mut self, now: Instant, space: SpaceId) { + let timeout = match self.idle_timeout { + None => return, + Some(dur) => dur, + }; + if self.state.is_closed() { + self.timers.stop(Timer::Idle); + return; + } + let dt = cmp::max(timeout, 3 * self.pto(space)); + self.timers.set(Timer::Idle, now + dt); + } + + fn reset_keep_alive(&mut self, now: Instant) { + let interval = match self.config.keep_alive_interval { + Some(x) if self.state.is_established() => x, + _ => return, + }; + self.timers.set(Timer::KeepAlive, now + interval); + } + + fn reset_cid_retirement(&mut self) { + if let Some(t) = self.local_cid_state.next_timeout() { + self.timers.set(Timer::PushNewCid, t); + } + } + + /// Handle the already-decrypted first packet from the client + /// + /// Decrypting the first packet in the `Endpoint` allows stateless packet handling to be more + /// efficient. + pub(crate) fn handle_first_packet( + &mut self, + now: Instant, + remote: SocketAddr, + ecn: Option, + packet_number: u64, + packet: InitialPacket, + remaining: Option, + ) -> Result<(), ConnectionError> { + let span = trace_span!("first recv"); + let _guard = span.enter(); + debug_assert!(self.side.is_server()); + let len = packet.header_data.len() + packet.payload.len(); + self.path.total_recvd = len as u64; + + match self.state { + State::Handshake(ref mut state) => { + state.expected_token = packet.header.token.clone(); + } + _ => unreachable!("first packet must be delivered in Handshake state"), + } + + self.on_packet_authenticated( + now, + SpaceId::Initial, + ecn, + Some(packet_number), + false, + false, + ); + + self.process_decrypted_packet(now, remote, Some(packet_number), packet.into())?; + if let Some(data) = remaining { + self.handle_coalesced(now, remote, ecn, data); + } + + self.config.qlog_sink.emit_recovery_metrics( + self.pto_count, + &mut self.path, + now, + self.orig_rem_cid, + ); + + Ok(()) + } + + fn init_0rtt(&mut self) { + let (header, packet) = match self.crypto.early_crypto() { + Some(x) => x, + None => return, + }; + if self.side.is_client() { + match self.crypto.transport_parameters() { + Ok(params) => { + let params = params + .expect("crypto layer didn't supply transport parameters with ticket"); + // Certain values must not be cached + let params = TransportParameters { + initial_src_cid: None, + original_dst_cid: None, + preferred_address: None, + retry_src_cid: None, + stateless_reset_token: None, + min_ack_delay: None, + ack_delay_exponent: TransportParameters::default().ack_delay_exponent, + max_ack_delay: TransportParameters::default().max_ack_delay, + ..params + }; + self.set_peer_params(params); + } + Err(e) => { + error!("session ticket has malformed transport parameters: {}", e); + return; + } + } + } + trace!("0-RTT enabled"); + self.zero_rtt_enabled = true; + self.zero_rtt_crypto = Some(ZeroRttCrypto { header, packet }); + } + + fn read_crypto( + &mut self, + space: SpaceId, + crypto: &frame::Crypto, + payload_len: usize, + ) -> Result<(), TransportError> { + let expected = if !self.state.is_handshake() { + SpaceId::Data + } else if self.highest_space == SpaceId::Initial { + SpaceId::Initial + } else { + // On the server, self.highest_space can be Data after receiving the client's first + // flight, but we expect Handshake CRYPTO until the handshake is complete. + SpaceId::Handshake + }; + // We can't decrypt Handshake packets when highest_space is Initial, CRYPTO frames in 0-RTT + // packets are illegal, and we don't process 1-RTT packets until the handshake is + // complete. Therefore, we will never see CRYPTO data from a later-than-expected space. + debug_assert!(space <= expected, "received out-of-order CRYPTO data"); + + let end = crypto.offset + crypto.data.len() as u64; + if space < expected && end > self.spaces[space].crypto_stream.bytes_read() { + warn!( + "received new {:?} CRYPTO data when expecting {:?}", + space, expected + ); + return Err(TransportError::PROTOCOL_VIOLATION( + "new data at unexpected encryption level", + )); + } + + let space = &mut self.spaces[space]; + let max = end.saturating_sub(space.crypto_stream.bytes_read()); + if max > self.config.crypto_buffer_size as u64 { + return Err(TransportError::CRYPTO_BUFFER_EXCEEDED("")); + } + + space + .crypto_stream + .insert(crypto.offset, crypto.data.clone(), payload_len); + while let Some(chunk) = space.crypto_stream.read(usize::MAX, true) { + trace!("consumed {} CRYPTO bytes", chunk.bytes.len()); + if self.crypto.read_handshake(&chunk.bytes)? { + self.events.push_back(Event::HandshakeDataReady); + } + } + + Ok(()) + } + + fn write_crypto(&mut self) { + loop { + let space = self.highest_space; + let mut outgoing = Vec::new(); + if let Some(crypto) = self.crypto.write_handshake(&mut outgoing) { + match space { + SpaceId::Initial => { + self.upgrade_crypto(SpaceId::Handshake, crypto); + } + SpaceId::Handshake => { + self.upgrade_crypto(SpaceId::Data, crypto); + } + _ => unreachable!("got updated secrets during 1-RTT"), + } + } + if outgoing.is_empty() { + if space == self.highest_space { + break; + } else { + // Keys updated, check for more data to send + continue; + } + } + let offset = self.spaces[space].crypto_offset; + let outgoing = Bytes::from(outgoing); + if let State::Handshake(ref mut state) = self.state { + if space == SpaceId::Initial && offset == 0 && self.side.is_client() { + state.client_hello = Some(outgoing.clone()); + } + } + self.spaces[space].crypto_offset += outgoing.len() as u64; + trace!("wrote {} {:?} CRYPTO bytes", outgoing.len(), space); + self.spaces[space].pending.crypto.push_back(frame::Crypto { + offset, + data: outgoing, + }); + } + } + + /// Switch to stronger cryptography during handshake + fn upgrade_crypto(&mut self, space: SpaceId, crypto: Keys) { + debug_assert!( + self.spaces[space].crypto.is_none(), + "already reached packet space {space:?}" + ); + trace!("{:?} keys ready", space); + if space == SpaceId::Data { + // Precompute the first key update + self.next_crypto = Some( + self.crypto + .next_1rtt_keys() + .expect("handshake should be complete"), + ); + } + + self.spaces[space].crypto = Some(crypto); + debug_assert!(space as usize > self.highest_space as usize); + self.highest_space = space; + if space == SpaceId::Data && self.side.is_client() { + // Discard 0-RTT keys because 1-RTT keys are available. + self.zero_rtt_crypto = None; + } + } + + fn discard_space(&mut self, now: Instant, space_id: SpaceId) { + debug_assert!(space_id != SpaceId::Data); + trace!("discarding {:?} keys", space_id); + if space_id == SpaceId::Initial { + // No longer needed + if let ConnectionSide::Client { token, .. } = &mut self.side { + *token = Bytes::new(); + } + } + let space = &mut self.spaces[space_id]; + space.crypto = None; + space.time_of_last_ack_eliciting_packet = None; + space.loss_time = None; + let sent_packets = mem::take(&mut space.sent_packets); + for packet in sent_packets.into_values() { + self.remove_in_flight(&packet); + } + self.set_loss_detection_timer(now) + } + + fn handle_coalesced( + &mut self, + now: Instant, + remote: SocketAddr, + ecn: Option, + data: BytesMut, + ) { + self.path.total_recvd = self.path.total_recvd.saturating_add(data.len() as u64); + let mut remaining = Some(data); + while let Some(data) = remaining { + match PartialDecode::new( + data, + &FixedLengthConnectionIdParser::new(self.local_cid_state.cid_len()), + &[self.version], + self.endpoint_config.grease_quic_bit, + ) { + Ok((partial_decode, rest)) => { + remaining = rest; + self.handle_decode(now, remote, ecn, partial_decode); + } + Err(e) => { + trace!("malformed header: {}", e); + return; + } + } + } + } + + fn handle_decode( + &mut self, + now: Instant, + remote: SocketAddr, + ecn: Option, + partial_decode: PartialDecode, + ) { + if let Some(decoded) = packet_crypto::unprotect_header( + partial_decode, + &self.spaces, + self.zero_rtt_crypto.as_ref(), + self.peer_params.stateless_reset_token, + ) { + self.handle_packet(now, remote, ecn, decoded.packet, decoded.stateless_reset); + } + } + + fn handle_packet( + &mut self, + now: Instant, + remote: SocketAddr, + ecn: Option, + packet: Option, + stateless_reset: bool, + ) { + self.stats.udp_rx.ios += 1; + if let Some(ref packet) = packet { + trace!( + "got {:?} packet ({} bytes) from {} using id {}", + packet.header.space(), + packet.payload.len() + packet.header_data.len(), + remote, + packet.header.dst_cid(), + ); + } + + if self.is_handshaking() && remote != self.path.remote { + debug!("discarding packet with unexpected remote during handshake"); + return; + } + + let was_closed = self.state.is_closed(); + let was_drained = self.state.is_drained(); + + let decrypted = match packet { + None => Err(None), + Some(mut packet) => self + .decrypt_packet(now, &mut packet) + .map(move |number| (packet, number)), + }; + let result = match decrypted { + _ if stateless_reset => { + debug!("got stateless reset"); + Err(ConnectionError::Reset) + } + Err(Some(e)) => { + warn!("illegal packet: {}", e); + Err(e.into()) + } + Err(None) => { + debug!("failed to authenticate packet"); + self.authentication_failures += 1; + let integrity_limit = self.spaces[self.highest_space] + .crypto + .as_ref() + .unwrap() + .packet + .local + .integrity_limit(); + if self.authentication_failures > integrity_limit { + Err(TransportError::AEAD_LIMIT_REACHED("integrity limit violated").into()) + } else { + return; + } + } + Ok((packet, number)) => { + let span = match number { + Some(pn) => trace_span!("recv", space = ?packet.header.space(), pn), + None => trace_span!("recv", space = ?packet.header.space()), + }; + let _guard = span.enter(); + + let is_duplicate = |n| self.spaces[packet.header.space()].dedup.insert(n); + if number.is_some_and(is_duplicate) { + debug!("discarding possible duplicate packet"); + return; + } else if self.state.is_handshake() && packet.header.is_short() { + // TODO: SHOULD buffer these to improve reordering tolerance. + trace!("dropping short packet during handshake"); + return; + } else { + if let Header::Initial(InitialHeader { ref token, .. }) = packet.header { + if let State::Handshake(ref hs) = self.state { + if self.side.is_server() && token != &hs.expected_token { + // Clients must send the same retry token in every Initial. Initial + // packets can be spoofed, so we discard rather than killing the + // connection. + warn!("discarding Initial with invalid retry token"); + return; + } + } + } + + if !self.state.is_closed() { + let spin = match packet.header { + Header::Short { spin, .. } => spin, + _ => false, + }; + self.on_packet_authenticated( + now, + packet.header.space(), + ecn, + number, + spin, + packet.header.is_1rtt(), + ); + } + + self.process_decrypted_packet(now, remote, number, packet) + } + } + }; + + // State transitions for error cases + if let Err(conn_err) = result { + self.error = Some(conn_err.clone()); + self.state = match conn_err { + ConnectionError::ApplicationClosed(reason) => State::closed(reason), + ConnectionError::ConnectionClosed(reason) => State::closed(reason), + ConnectionError::Reset + | ConnectionError::TransportError(TransportError { + code: TransportErrorCode::AEAD_LIMIT_REACHED, + .. + }) => State::Drained, + ConnectionError::TimedOut => { + unreachable!("timeouts aren't generated by packet processing"); + } + ConnectionError::TransportError(err) => { + debug!("closing connection due to transport error: {}", err); + State::closed(err) + } + ConnectionError::VersionMismatch => State::Draining, + ConnectionError::LocallyClosed => { + unreachable!("LocallyClosed isn't generated by packet processing"); + } + ConnectionError::CidsExhausted => { + unreachable!("CidsExhausted isn't generated by packet processing"); + } + }; + } + + if !was_closed && self.state.is_closed() { + self.close_common(); + if !self.state.is_drained() { + self.set_close_timer(now); + } + } + if !was_drained && self.state.is_drained() { + self.endpoint_events.push_back(EndpointEventInner::Drained); + // Close timer may have been started previously, e.g. if we sent a close and got a + // stateless reset in response + self.timers.stop(Timer::Close); + } + + // Transmit CONNECTION_CLOSE if necessary + if let State::Closed(_) = self.state { + self.close = remote == self.path.remote; + } + } + + fn process_decrypted_packet( + &mut self, + now: Instant, + remote: SocketAddr, + number: Option, + packet: Packet, + ) -> Result<(), ConnectionError> { + let state = match self.state { + State::Established => { + match packet.header.space() { + SpaceId::Data => self.process_payload(now, remote, number.unwrap(), packet)?, + _ if packet.header.has_frames() => self.process_early_payload(now, packet)?, + _ => { + trace!("discarding unexpected pre-handshake packet"); + } + } + return Ok(()); + } + State::Closed(_) => { + for result in frame::Iter::new(packet.payload.freeze())? { + let frame = match result { + Ok(frame) => frame, + Err(err) => { + debug!("frame decoding error: {err:?}"); + continue; + } + }; + + if let Frame::Padding = frame { + continue; + }; + + self.stats.frame_rx.record(&frame); + + if let Frame::Close(_) = frame { + trace!("draining"); + self.state = State::Draining; + break; + } + } + return Ok(()); + } + State::Draining | State::Drained => return Ok(()), + State::Handshake(ref mut state) => state, + }; + + match packet.header { + Header::Retry { + src_cid: rem_cid, .. + } => { + if self.side.is_server() { + return Err(TransportError::PROTOCOL_VIOLATION("client sent Retry").into()); + } + + if self.total_authed_packets > 1 + || packet.payload.len() <= 16 // token + 16 byte tag + || !self.crypto.is_valid_retry( + &self.rem_cids.active(), + &packet.header_data, + &packet.payload, + ) + { + trace!("discarding invalid Retry"); + // - After the client has received and processed an Initial or Retry + // packet from the server, it MUST discard any subsequent Retry + // packets that it receives. + // - A client MUST discard a Retry packet with a zero-length Retry Token + // field. + // - Clients MUST discard Retry packets that have a Retry Integrity Tag + // that cannot be validated + return Ok(()); + } + + trace!("retrying with CID {}", rem_cid); + let client_hello = state.client_hello.take().unwrap(); + self.retry_src_cid = Some(rem_cid); + self.rem_cids.update_initial_cid(rem_cid); + self.rem_handshake_cid = rem_cid; + + let space = &mut self.spaces[SpaceId::Initial]; + if let Some(info) = space.take(0) { + self.on_packet_acked(now, info); + }; + + self.discard_space(now, SpaceId::Initial); // Make sure we clean up after any retransmitted Initials + self.spaces[SpaceId::Initial] = PacketSpace { + crypto: Some(self.crypto.initial_keys(&rem_cid, self.side.side())), + next_packet_number: self.spaces[SpaceId::Initial].next_packet_number, + crypto_offset: client_hello.len() as u64, + ..PacketSpace::new(now) + }; + self.spaces[SpaceId::Initial] + .pending + .crypto + .push_back(frame::Crypto { + offset: 0, + data: client_hello, + }); + + // Retransmit all 0-RTT data + let zero_rtt = mem::take(&mut self.spaces[SpaceId::Data].sent_packets); + for info in zero_rtt.into_values() { + self.remove_in_flight(&info); + self.spaces[SpaceId::Data].pending |= info.retransmits; + } + self.streams.retransmit_all_for_0rtt(); + + let token_len = packet.payload.len() - 16; + let ConnectionSide::Client { ref mut token, .. } = self.side else { + unreachable!("we already short-circuited if we're server"); + }; + *token = packet.payload.freeze().split_to(token_len); + self.state = State::Handshake(state::Handshake { + expected_token: Bytes::new(), + rem_cid_set: false, + client_hello: None, + }); + Ok(()) + } + Header::Long { + ty: LongType::Handshake, + src_cid: rem_cid, + .. + } => { + if rem_cid != self.rem_handshake_cid { + debug!( + "discarding packet with mismatched remote CID: {} != {}", + self.rem_handshake_cid, rem_cid + ); + return Ok(()); + } + self.on_path_validated(); + + self.process_early_payload(now, packet)?; + if self.state.is_closed() { + return Ok(()); + } + + if self.crypto.is_handshaking() { + trace!("handshake ongoing"); + return Ok(()); + } + + if self.side.is_client() { + // Client-only because server params were set from the client's Initial + let params = + self.crypto + .transport_parameters()? + .ok_or_else(|| TransportError { + code: TransportErrorCode::crypto(0x6d), + frame: None, + reason: "transport parameters missing".into(), + })?; + + if self.has_0rtt() { + if !self.crypto.early_data_accepted().unwrap() { + debug_assert!(self.side.is_client()); + debug!("0-RTT rejected"); + self.accepted_0rtt = false; + self.streams.zero_rtt_rejected(); + + // Discard already-queued frames + self.spaces[SpaceId::Data].pending = Retransmits::default(); + + // Discard 0-RTT packets + let sent_packets = + mem::take(&mut self.spaces[SpaceId::Data].sent_packets); + for packet in sent_packets.into_values() { + self.remove_in_flight(&packet); + } + } else { + self.accepted_0rtt = true; + params.validate_resumption_from(&self.peer_params)?; + } + } + if let Some(token) = params.stateless_reset_token { + self.endpoint_events + .push_back(EndpointEventInner::ResetToken(self.path.remote, token)); + } + self.handle_peer_params(params)?; + self.issue_first_cids(now); + } else { + // Server-only + self.spaces[SpaceId::Data].pending.handshake_done = true; + self.discard_space(now, SpaceId::Handshake); + } + + self.events.push_back(Event::Connected); + self.state = State::Established; + trace!("established"); + Ok(()) + } + Header::Initial(InitialHeader { + src_cid: rem_cid, .. + }) => { + if !state.rem_cid_set { + trace!("switching remote CID to {}", rem_cid); + let mut state = state.clone(); + self.rem_cids.update_initial_cid(rem_cid); + self.rem_handshake_cid = rem_cid; + self.orig_rem_cid = rem_cid; + state.rem_cid_set = true; + self.state = State::Handshake(state); + } else if rem_cid != self.rem_handshake_cid { + debug!( + "discarding packet with mismatched remote CID: {} != {}", + self.rem_handshake_cid, rem_cid + ); + return Ok(()); + } + + let starting_space = self.highest_space; + self.process_early_payload(now, packet)?; + + if self.side.is_server() + && starting_space == SpaceId::Initial + && self.highest_space != SpaceId::Initial + { + let params = + self.crypto + .transport_parameters()? + .ok_or_else(|| TransportError { + code: TransportErrorCode::crypto(0x6d), + frame: None, + reason: "transport parameters missing".into(), + })?; + self.handle_peer_params(params)?; + self.issue_first_cids(now); + self.init_0rtt(); + } + Ok(()) + } + Header::Long { + ty: LongType::ZeroRtt, + .. + } => { + self.process_payload(now, remote, number.unwrap(), packet)?; + Ok(()) + } + Header::VersionNegotiate { .. } => { + if self.total_authed_packets > 1 { + return Ok(()); + } + let supported = packet + .payload + .chunks(4) + .any(|x| match <[u8; 4]>::try_from(x) { + Ok(version) => self.version == u32::from_be_bytes(version), + Err(_) => false, + }); + if supported { + return Ok(()); + } + debug!("remote doesn't support our version"); + Err(ConnectionError::VersionMismatch) + } + Header::Short { .. } => unreachable!( + "short packets received during handshake are discarded in handle_packet" + ), + } + } + + /// Process an Initial or Handshake packet payload + fn process_early_payload( + &mut self, + now: Instant, + packet: Packet, + ) -> Result<(), TransportError> { + debug_assert_ne!(packet.header.space(), SpaceId::Data); + let payload_len = packet.payload.len(); + let mut ack_eliciting = false; + for result in frame::Iter::new(packet.payload.freeze())? { + let frame = result?; + let span = match frame { + Frame::Padding => continue, + _ => Some(trace_span!("frame", ty = %frame.ty())), + }; + + self.stats.frame_rx.record(&frame); + + let _guard = span.as_ref().map(|x| x.enter()); + ack_eliciting |= frame.is_ack_eliciting(); + + // Process frames + match frame { + Frame::Padding | Frame::Ping => {} + Frame::Crypto(frame) => { + self.read_crypto(packet.header.space(), &frame, payload_len)?; + } + Frame::Ack(ack) => { + self.on_ack_received(now, packet.header.space(), ack)?; + } + Frame::Close(reason) => { + self.error = Some(reason.into()); + self.state = State::Draining; + return Ok(()); + } + _ => { + let mut err = + TransportError::PROTOCOL_VIOLATION("illegal frame type in handshake"); + err.frame = Some(frame.ty()); + return Err(err); + } + } + } + + if ack_eliciting { + // In the initial and handshake spaces, ACKs must be sent immediately + self.spaces[packet.header.space()] + .pending_acks + .set_immediate_ack_required(); + } + + self.write_crypto(); + Ok(()) + } + + fn process_payload( + &mut self, + now: Instant, + remote: SocketAddr, + number: u64, + packet: Packet, + ) -> Result<(), TransportError> { + let payload = packet.payload.freeze(); + let mut is_probing_packet = true; + let mut close = None; + let payload_len = payload.len(); + let mut ack_eliciting = false; + for result in frame::Iter::new(payload)? { + let frame = result?; + let span = match frame { + Frame::Padding => continue, + _ => Some(trace_span!("frame", ty = %frame.ty())), + }; + + self.stats.frame_rx.record(&frame); + // Crypto, Stream and Datagram frames are special cased in order no pollute + // the log with payload data + match &frame { + Frame::Crypto(f) => { + trace!(offset = f.offset, len = f.data.len(), "got crypto frame"); + } + Frame::Stream(f) => { + trace!(id = %f.id, offset = f.offset, len = f.data.len(), fin = f.fin, "got stream frame"); + } + Frame::Datagram(f) => { + trace!(len = f.data.len(), "got datagram frame"); + } + f => { + trace!("got frame {:?}", f); + } + } + + let _guard = span.as_ref().map(|x| x.enter()); + if packet.header.is_0rtt() { + match frame { + Frame::Crypto(_) | Frame::Close(Close::Application(_)) => { + return Err(TransportError::PROTOCOL_VIOLATION( + "illegal frame type in 0-RTT", + )); + } + _ => {} + } + } + ack_eliciting |= frame.is_ack_eliciting(); + + // Check whether this could be a probing packet + match frame { + Frame::Padding + | Frame::PathChallenge(_) + | Frame::PathResponse(_) + | Frame::NewConnectionId(_) => {} + _ => { + is_probing_packet = false; + } + } + match frame { + Frame::Crypto(frame) => { + self.read_crypto(SpaceId::Data, &frame, payload_len)?; + } + Frame::Stream(frame) => { + if self.streams.received(frame, payload_len)?.should_transmit() { + self.spaces[SpaceId::Data].pending.max_data = true; + } + } + Frame::Ack(ack) => { + self.on_ack_received(now, SpaceId::Data, ack)?; + } + Frame::Padding | Frame::Ping => {} + Frame::Close(reason) => { + close = Some(reason); + } + Frame::PathChallenge(token) => { + self.path_responses.push(number, token, remote); + if remote == self.path.remote { + // PATH_CHALLENGE on active path, possible off-path packet forwarding + // attack. Send a non-probing packet to recover the active path. + match self.peer_supports_ack_frequency() { + true => self.immediate_ack(), + false => self.ping(), + } + } + } + Frame::PathResponse(token) => { + if self.path.challenge == Some(token) && remote == self.path.remote { + trace!("new path validated"); + self.timers.stop(Timer::PathValidation); + self.path.challenge = None; + self.path.validated = true; + if let Some((_, ref mut prev_path)) = self.prev_path { + prev_path.challenge = None; + prev_path.challenge_pending = false; + } + } else { + debug!(token, "ignoring invalid PATH_RESPONSE"); + } + } + Frame::MaxData(bytes) => { + self.streams.received_max_data(bytes); + } + Frame::MaxStreamData { id, offset } => { + self.streams.received_max_stream_data(id, offset)?; + } + Frame::MaxStreams { dir, count } => { + self.streams.received_max_streams(dir, count)?; + } + Frame::ResetStream(frame) => { + if self.streams.received_reset(frame)?.should_transmit() { + self.spaces[SpaceId::Data].pending.max_data = true; + } + } + Frame::DataBlocked { offset } => { + debug!(offset, "peer claims to be blocked at connection level"); + } + Frame::StreamDataBlocked { id, offset } => { + if id.initiator() == self.side.side() && id.dir() == Dir::Uni { + debug!("got STREAM_DATA_BLOCKED on send-only {}", id); + return Err(TransportError::STREAM_STATE_ERROR( + "STREAM_DATA_BLOCKED on send-only stream", + )); + } + debug!( + stream = %id, + offset, "peer claims to be blocked at stream level" + ); + } + Frame::StreamsBlocked { dir, limit } => { + if limit > MAX_STREAM_COUNT { + return Err(TransportError::FRAME_ENCODING_ERROR( + "unrepresentable stream limit", + )); + } + debug!( + "peer claims to be blocked opening more than {} {} streams", + limit, dir + ); + } + Frame::StopSending(frame::StopSending { id, error_code }) => { + if id.initiator() != self.side.side() { + if id.dir() == Dir::Uni { + debug!("got STOP_SENDING on recv-only {}", id); + return Err(TransportError::STREAM_STATE_ERROR( + "STOP_SENDING on recv-only stream", + )); + } + } else if self.streams.is_local_unopened(id) { + return Err(TransportError::STREAM_STATE_ERROR( + "STOP_SENDING on unopened stream", + )); + } + self.streams.received_stop_sending(id, error_code); + } + Frame::RetireConnectionId { sequence } => { + let allow_more_cids = self + .local_cid_state + .on_cid_retirement(sequence, self.peer_params.issue_cids_limit())?; + self.endpoint_events + .push_back(EndpointEventInner::RetireConnectionId( + now, + sequence, + allow_more_cids, + )); + } + Frame::NewConnectionId(frame) => { + trace!( + sequence = frame.sequence, + id = %frame.id, + retire_prior_to = frame.retire_prior_to, + ); + if self.rem_cids.active().is_empty() { + return Err(TransportError::PROTOCOL_VIOLATION( + "NEW_CONNECTION_ID when CIDs aren't in use", + )); + } + if frame.retire_prior_to > frame.sequence { + return Err(TransportError::PROTOCOL_VIOLATION( + "NEW_CONNECTION_ID retiring unissued CIDs", + )); + } + + use crate::cid_queue::InsertError; + match self.rem_cids.insert(frame) { + Ok(None) => {} + Ok(Some((retired, reset_token))) => { + let pending_retired = + &mut self.spaces[SpaceId::Data].pending.retire_cids; + /// Ensure `pending_retired` cannot grow without bound. Limit is + /// somewhat arbitrary but very permissive. + const MAX_PENDING_RETIRED_CIDS: u64 = CidQueue::LEN as u64 * 10; + // We don't bother counting in-flight frames because those are bounded + // by congestion control. + if (pending_retired.len() as u64) + .saturating_add(retired.end.saturating_sub(retired.start)) + > MAX_PENDING_RETIRED_CIDS + { + return Err(TransportError::CONNECTION_ID_LIMIT_ERROR( + "queued too many retired CIDs", + )); + } + pending_retired.extend(retired); + self.set_reset_token(reset_token); + } + Err(InsertError::ExceedsLimit) => { + return Err(TransportError::CONNECTION_ID_LIMIT_ERROR("")); + } + Err(InsertError::Retired) => { + trace!("discarding already-retired"); + // RETIRE_CONNECTION_ID might not have been previously sent if e.g. a + // range of connection IDs larger than the active connection ID limit + // was retired all at once via retire_prior_to. + self.spaces[SpaceId::Data] + .pending + .retire_cids + .push(frame.sequence); + continue; + } + }; + + if self.side.is_server() && self.rem_cids.active_seq() == 0 { + // We're a server still using the initial remote CID for the client, so + // let's switch immediately to enable clientside stateless resets. + self.update_rem_cid(); + } + } + Frame::NewToken(NewToken { token }) => { + let ConnectionSide::Client { + token_store, + server_name, + .. + } = &self.side + else { + return Err(TransportError::PROTOCOL_VIOLATION("client sent NEW_TOKEN")); + }; + if token.is_empty() { + return Err(TransportError::FRAME_ENCODING_ERROR("empty token")); + } + trace!("got new token"); + token_store.insert(server_name, token); + } + Frame::Datagram(datagram) => { + if self + .datagrams + .received(datagram, &self.config.datagram_receive_buffer_size)? + { + self.events.push_back(Event::DatagramReceived); + } + } + Frame::AckFrequency(ack_frequency) => { + // This frame can only be sent in the Data space + let space = &mut self.spaces[SpaceId::Data]; + + if !self + .ack_frequency + .ack_frequency_received(&ack_frequency, &mut space.pending_acks)? + { + // The AckFrequency frame is stale (we have already received a more recent one) + continue; + } + + // Our `max_ack_delay` has been updated, so we may need to adjust its associated + // timeout + if let Some(timeout) = space + .pending_acks + .max_ack_delay_timeout(self.ack_frequency.max_ack_delay) + { + self.timers.set(Timer::MaxAckDelay, timeout); + } + } + Frame::ImmediateAck => { + // This frame can only be sent in the Data space + self.spaces[SpaceId::Data] + .pending_acks + .set_immediate_ack_required(); + } + Frame::HandshakeDone => { + if self.side.is_server() { + return Err(TransportError::PROTOCOL_VIOLATION( + "client sent HANDSHAKE_DONE", + )); + } + if self.spaces[SpaceId::Handshake].crypto.is_some() { + self.discard_space(now, SpaceId::Handshake); + } + } + } + } + + let space = &mut self.spaces[SpaceId::Data]; + if space + .pending_acks + .packet_received(now, number, ack_eliciting, &space.dedup) + { + self.timers + .set(Timer::MaxAckDelay, now + self.ack_frequency.max_ack_delay); + } + + // Issue stream ID credit due to ACKs of outgoing finish/resets and incoming finish/resets + // on stopped streams. Incoming finishes/resets on open streams are not handled here as they + // are only freed, and hence only issue credit, once the application has been notified + // during a read on the stream. + let pending = &mut self.spaces[SpaceId::Data].pending; + self.streams.queue_max_stream_id(pending); + + if let Some(reason) = close { + self.error = Some(reason.into()); + self.state = State::Draining; + self.close = true; + } + + if remote != self.path.remote + && !is_probing_packet + && number == self.spaces[SpaceId::Data].rx_packet + { + let ConnectionSide::Server { ref server_config } = self.side else { + panic!("packets from unknown remote should be dropped by clients"); + }; + debug_assert!( + server_config.migration, + "migration-initiating packets should have been dropped immediately" + ); + self.migrate(now, remote); + // Break linkability, if possible + self.update_rem_cid(); + self.spin = false; + } + + Ok(()) + } + + fn migrate(&mut self, now: Instant, remote: SocketAddr) { + trace!(%remote, "migration initiated"); + self.path_counter = self.path_counter.wrapping_add(1); + // Reset rtt/congestion state for new path unless it looks like a NAT rebinding. + // Note that the congestion window will not grow until validation terminates. Helps mitigate + // amplification attacks performed by spoofing source addresses. + let mut new_path = if remote.is_ipv4() && remote.ip() == self.path.remote.ip() { + PathData::from_previous(remote, &self.path, self.path_counter, now) + } else { + let peer_max_udp_payload_size = + u16::try_from(self.peer_params.max_udp_payload_size.into_inner()) + .unwrap_or(u16::MAX); + PathData::new( + remote, + self.allow_mtud, + Some(peer_max_udp_payload_size), + self.path_counter, + now, + &self.config, + ) + }; + new_path.challenge = Some(self.rng.random()); + new_path.challenge_pending = true; + let prev_pto = self.pto(SpaceId::Data); + + let mut prev = mem::replace(&mut self.path, new_path); + // Don't clobber the original path if the previous one hasn't been validated yet + if prev.challenge.is_none() { + prev.challenge = Some(self.rng.random()); + prev.challenge_pending = true; + // We haven't updated the remote CID yet, this captures the remote CID we were using on + // the previous path. + self.prev_path = Some((self.rem_cids.active(), prev)); + } + + self.timers.set( + Timer::PathValidation, + now + 3 * cmp::max(self.pto(SpaceId::Data), prev_pto), + ); + } + + /// Handle a change in the local address, i.e. an active migration + pub fn local_address_changed(&mut self) { + self.update_rem_cid(); + self.ping(); + } + + /// Switch to a previously unused remote connection ID, if possible + fn update_rem_cid(&mut self) { + let (reset_token, retired) = match self.rem_cids.next() { + Some(x) => x, + None => return, + }; + + // Retire the current remote CID and any CIDs we had to skip. + self.spaces[SpaceId::Data] + .pending + .retire_cids + .extend(retired); + self.set_reset_token(reset_token); + } + + fn set_reset_token(&mut self, reset_token: ResetToken) { + self.endpoint_events + .push_back(EndpointEventInner::ResetToken( + self.path.remote, + reset_token, + )); + self.peer_params.stateless_reset_token = Some(reset_token); + } + + /// Issue an initial set of connection IDs to the peer upon connection + fn issue_first_cids(&mut self, now: Instant) { + if self.local_cid_state.cid_len() == 0 { + return; + } + + // Subtract 1 to account for the CID we supplied while handshaking + let mut n = self.peer_params.issue_cids_limit() - 1; + if let ConnectionSide::Server { server_config } = &self.side { + if server_config.has_preferred_address() { + // We also sent a CID in the transport parameters + n -= 1; + } + } + self.endpoint_events + .push_back(EndpointEventInner::NeedIdentifiers(now, n)); + } + + fn populate_packet( + &mut self, + now: Instant, + space_id: SpaceId, + buf: &mut Vec, + max_size: usize, + pn: u64, + ) -> SentFrames { + let mut sent = SentFrames::default(); + let space = &mut self.spaces[space_id]; + let is_0rtt = space_id == SpaceId::Data && space.crypto.is_none(); + space.pending_acks.maybe_ack_non_eliciting(); + + // HANDSHAKE_DONE + if !is_0rtt && mem::replace(&mut space.pending.handshake_done, false) { + buf.write(frame::FrameType::HANDSHAKE_DONE); + sent.retransmits.get_or_create().handshake_done = true; + // This is just a u8 counter and the frame is typically just sent once + self.stats.frame_tx.handshake_done = + self.stats.frame_tx.handshake_done.saturating_add(1); + } + + // PING + if mem::replace(&mut space.ping_pending, false) { + trace!("PING"); + buf.write(frame::FrameType::PING); + sent.non_retransmits = true; + self.stats.frame_tx.ping += 1; + } + + // IMMEDIATE_ACK + if mem::replace(&mut space.immediate_ack_pending, false) { + trace!("IMMEDIATE_ACK"); + buf.write(frame::FrameType::IMMEDIATE_ACK); + sent.non_retransmits = true; + self.stats.frame_tx.immediate_ack += 1; + } + + // ACK + if space.pending_acks.can_send() { + Self::populate_acks( + now, + self.receiving_ecn, + &mut sent, + space, + buf, + &mut self.stats, + ); + } + + // ACK_FREQUENCY + if mem::replace(&mut space.pending.ack_frequency, false) { + let sequence_number = self.ack_frequency.next_sequence_number(); + + // Safe to unwrap because this is always provided when ACK frequency is enabled + let config = self.config.ack_frequency_config.as_ref().unwrap(); + + // Ensure the delay is within bounds to avoid a PROTOCOL_VIOLATION error + let max_ack_delay = self.ack_frequency.candidate_max_ack_delay( + self.path.rtt.get(), + config, + &self.peer_params, + ); + + trace!(?max_ack_delay, "ACK_FREQUENCY"); + + frame::AckFrequency { + sequence: sequence_number, + ack_eliciting_threshold: config.ack_eliciting_threshold, + request_max_ack_delay: max_ack_delay.as_micros().try_into().unwrap_or(VarInt::MAX), + reordering_threshold: config.reordering_threshold, + } + .encode(buf); + + sent.retransmits.get_or_create().ack_frequency = true; + + self.ack_frequency.ack_frequency_sent(pn, max_ack_delay); + self.stats.frame_tx.ack_frequency += 1; + } + + // PATH_CHALLENGE + if buf.len() + 9 < max_size && space_id == SpaceId::Data { + // Transmit challenges with every outgoing frame on an unvalidated path + if let Some(token) = self.path.challenge { + // But only send a packet solely for that purpose at most once + self.path.challenge_pending = false; + sent.non_retransmits = true; + sent.requires_padding = true; + trace!("PATH_CHALLENGE {:08x}", token); + buf.write(frame::FrameType::PATH_CHALLENGE); + buf.write(token); + self.stats.frame_tx.path_challenge += 1; + } + } + + // PATH_RESPONSE + if buf.len() + 9 < max_size && space_id == SpaceId::Data { + if let Some(token) = self.path_responses.pop_on_path(self.path.remote) { + sent.non_retransmits = true; + sent.requires_padding = true; + trace!("PATH_RESPONSE {:08x}", token); + buf.write(frame::FrameType::PATH_RESPONSE); + buf.write(token); + self.stats.frame_tx.path_response += 1; + } + } + + // CRYPTO + while buf.len() + frame::Crypto::SIZE_BOUND < max_size && !is_0rtt { + let mut frame = match space.pending.crypto.pop_front() { + Some(x) => x, + None => break, + }; + + // Calculate the maximum amount of crypto data we can store in the buffer. + // Since the offset is known, we can reserve the exact size required to encode it. + // For length we reserve 2bytes which allows to encode up to 2^14, + // which is more than what fits into normally sized QUIC frames. + let max_crypto_data_size = max_size + - buf.len() + - 1 // Frame Type + - VarInt::size(unsafe { VarInt::from_u64_unchecked(frame.offset) }) + - 2; // Maximum encoded length for frame size, given we send less than 2^14 bytes + + let len = frame + .data + .len() + .min(2usize.pow(14) - 1) + .min(max_crypto_data_size); + + let data = frame.data.split_to(len); + let truncated = frame::Crypto { + offset: frame.offset, + data, + }; + trace!( + "CRYPTO: off {} len {}", + truncated.offset, + truncated.data.len() + ); + truncated.encode(buf); + self.stats.frame_tx.crypto += 1; + sent.retransmits.get_or_create().crypto.push_back(truncated); + if !frame.data.is_empty() { + frame.offset += len as u64; + space.pending.crypto.push_front(frame); + } + } + + if space_id == SpaceId::Data { + self.streams.write_control_frames( + buf, + &mut space.pending, + &mut sent.retransmits, + &mut self.stats.frame_tx, + max_size, + ); + } + + // NEW_CONNECTION_ID + while buf.len() + NewConnectionId::SIZE_BOUND < max_size { + let issued = match space.pending.new_cids.pop() { + Some(x) => x, + None => break, + }; + trace!( + sequence = issued.sequence, + id = %issued.id, + "NEW_CONNECTION_ID" + ); + frame::NewConnectionId { + sequence: issued.sequence, + retire_prior_to: self.local_cid_state.retire_prior_to(), + id: issued.id, + reset_token: issued.reset_token, + } + .encode(buf); + sent.retransmits.get_or_create().new_cids.push(issued); + self.stats.frame_tx.new_connection_id += 1; + } + + // RETIRE_CONNECTION_ID + while buf.len() + frame::RETIRE_CONNECTION_ID_SIZE_BOUND < max_size { + let seq = match space.pending.retire_cids.pop() { + Some(x) => x, + None => break, + }; + trace!(sequence = seq, "RETIRE_CONNECTION_ID"); + buf.write(frame::FrameType::RETIRE_CONNECTION_ID); + buf.write_var(seq); + sent.retransmits.get_or_create().retire_cids.push(seq); + self.stats.frame_tx.retire_connection_id += 1; + } + + // DATAGRAM + let mut sent_datagrams = false; + while buf.len() + Datagram::SIZE_BOUND < max_size && space_id == SpaceId::Data { + match self.datagrams.write(buf, max_size) { + true => { + sent_datagrams = true; + sent.non_retransmits = true; + self.stats.frame_tx.datagram += 1; + } + false => break, + } + } + if self.datagrams.send_blocked && sent_datagrams { + self.events.push_back(Event::DatagramsUnblocked); + self.datagrams.send_blocked = false; + } + + // NEW_TOKEN + while let Some(remote_addr) = space.pending.new_tokens.pop() { + debug_assert_eq!(space_id, SpaceId::Data); + let ConnectionSide::Server { server_config } = &self.side else { + panic!("NEW_TOKEN frames should not be enqueued by clients"); + }; + + if remote_addr != self.path.remote { + // NEW_TOKEN frames contain tokens bound to a client's IP address, and are only + // useful if used from the same IP address. Thus, we abandon enqueued NEW_TOKEN + // frames upon an path change. Instead, when the new path becomes validated, + // NEW_TOKEN frames may be enqueued for the new path instead. + continue; + } + + let token = Token::new( + TokenPayload::Validation { + ip: remote_addr.ip(), + issued: server_config.time_source.now(), + }, + &mut self.rng, + ); + let new_token = NewToken { + token: token.encode(&*server_config.token_key).into(), + }; + + if buf.len() + new_token.size() >= max_size { + space.pending.new_tokens.push(remote_addr); + break; + } + + new_token.encode(buf); + sent.retransmits + .get_or_create() + .new_tokens + .push(remote_addr); + self.stats.frame_tx.new_token += 1; + } + + // STREAM + if space_id == SpaceId::Data { + sent.stream_frames = + self.streams + .write_stream_frames(buf, max_size, self.config.send_fairness); + self.stats.frame_tx.stream += sent.stream_frames.len() as u64; + } + + sent + } + + /// Write pending ACKs into a buffer + /// + /// This method assumes ACKs are pending, and should only be called if + /// `!PendingAcks::ranges().is_empty()` returns `true`. + fn populate_acks( + now: Instant, + receiving_ecn: bool, + sent: &mut SentFrames, + space: &mut PacketSpace, + buf: &mut Vec, + stats: &mut ConnectionStats, + ) { + debug_assert!(!space.pending_acks.ranges().is_empty()); + + // 0-RTT packets must never carry acks (which would have to be of handshake packets) + debug_assert!(space.crypto.is_some(), "tried to send ACK in 0-RTT"); + let ecn = if receiving_ecn { + Some(&space.ecn_counters) + } else { + None + }; + sent.largest_acked = space.pending_acks.ranges().max(); + + let delay_micros = space.pending_acks.ack_delay(now).as_micros() as u64; + + // TODO: This should come from `TransportConfig` if that gets configurable. + let ack_delay_exp = TransportParameters::default().ack_delay_exponent; + let delay = delay_micros >> ack_delay_exp.into_inner(); + + trace!( + "ACK {:?}, Delay = {}us", + space.pending_acks.ranges(), + delay_micros + ); + + frame::Ack::encode(delay as _, space.pending_acks.ranges(), ecn, buf); + stats.frame_tx.acks += 1; + } + + fn close_common(&mut self) { + trace!("connection closed"); + for &timer in &Timer::VALUES { + self.timers.stop(timer); + } + } + + fn set_close_timer(&mut self, now: Instant) { + self.timers + .set(Timer::Close, now + 3 * self.pto(self.highest_space)); + } + + /// Handle transport parameters received from the peer + fn handle_peer_params(&mut self, params: TransportParameters) -> Result<(), TransportError> { + if Some(self.orig_rem_cid) != params.initial_src_cid + || (self.side.is_client() + && (Some(self.initial_dst_cid) != params.original_dst_cid + || self.retry_src_cid != params.retry_src_cid)) + { + return Err(TransportError::TRANSPORT_PARAMETER_ERROR( + "CID authentication failure", + )); + } + + self.set_peer_params(params); + + Ok(()) + } + + fn set_peer_params(&mut self, params: TransportParameters) { + self.streams.set_params(¶ms); + self.idle_timeout = + negotiate_max_idle_timeout(self.config.max_idle_timeout, Some(params.max_idle_timeout)); + trace!("negotiated max idle timeout {:?}", self.idle_timeout); + if let Some(ref info) = params.preferred_address { + self.rem_cids.insert(frame::NewConnectionId { + sequence: 1, + id: info.connection_id, + reset_token: info.stateless_reset_token, + retire_prior_to: 0, + }).expect("preferred address CID is the first received, and hence is guaranteed to be legal"); + } + self.ack_frequency.peer_max_ack_delay = get_max_ack_delay(¶ms); + self.peer_params = params; + self.path.mtud.on_peer_max_udp_payload_size_received( + u16::try_from(self.peer_params.max_udp_payload_size.into_inner()).unwrap_or(u16::MAX), + ); + } + + fn decrypt_packet( + &mut self, + now: Instant, + packet: &mut Packet, + ) -> Result, Option> { + let result = packet_crypto::decrypt_packet_body( + packet, + &self.spaces, + self.zero_rtt_crypto.as_ref(), + self.key_phase, + self.prev_crypto.as_ref(), + self.next_crypto.as_ref(), + )?; + + let result = match result { + Some(r) => r, + None => return Ok(None), + }; + + if result.outgoing_key_update_acked { + if let Some(prev) = self.prev_crypto.as_mut() { + prev.end_packet = Some((result.number, now)); + self.set_key_discard_timer(now, packet.header.space()); + } + } + + if result.incoming_key_update { + trace!("key update authenticated"); + self.update_keys(Some((result.number, now)), true); + self.set_key_discard_timer(now, packet.header.space()); + } + + Ok(Some(result.number)) + } + + fn update_keys(&mut self, end_packet: Option<(u64, Instant)>, remote: bool) { + trace!("executing key update"); + // Generate keys for the key phase after the one we're switching to, store them in + // `next_crypto`, make the contents of `next_crypto` current, and move the current keys into + // `prev_crypto`. + let new = self + .crypto + .next_1rtt_keys() + .expect("only called for `Data` packets"); + self.key_phase_size = new + .local + .confidentiality_limit() + .saturating_sub(KEY_UPDATE_MARGIN); + let old = mem::replace( + &mut self.spaces[SpaceId::Data] + .crypto + .as_mut() + .unwrap() // safe because update_keys() can only be triggered by short packets + .packet, + mem::replace(self.next_crypto.as_mut().unwrap(), new), + ); + self.spaces[SpaceId::Data].sent_with_keys = 0; + self.prev_crypto = Some(PrevCrypto { + crypto: old, + end_packet, + update_unacked: remote, + }); + self.key_phase = !self.key_phase; + } + + fn peer_supports_ack_frequency(&self) -> bool { + self.peer_params.min_ack_delay.is_some() + } + + /// Send an IMMEDIATE_ACK frame to the remote endpoint + /// + /// According to the spec, this will result in an error if the remote endpoint does not support + /// the Acknowledgement Frequency extension + pub(crate) fn immediate_ack(&mut self) { + self.spaces[self.highest_space].immediate_ack_pending = true; + } + + /// Decodes a packet, returning its decrypted payload, so it can be inspected in tests + #[cfg(test)] + pub(crate) fn decode_packet(&self, event: &ConnectionEvent) -> Option> { + let (first_decode, remaining) = match &event.0 { + ConnectionEventInner::Datagram(DatagramConnectionEvent { + first_decode, + remaining, + .. + }) => (first_decode, remaining), + _ => return None, + }; + + if remaining.is_some() { + panic!("Packets should never be coalesced in tests"); + } + + let decrypted_header = packet_crypto::unprotect_header( + first_decode.clone(), + &self.spaces, + self.zero_rtt_crypto.as_ref(), + self.peer_params.stateless_reset_token, + )?; + + let mut packet = decrypted_header.packet?; + packet_crypto::decrypt_packet_body( + &mut packet, + &self.spaces, + self.zero_rtt_crypto.as_ref(), + self.key_phase, + self.prev_crypto.as_ref(), + self.next_crypto.as_ref(), + ) + .ok()?; + + Some(packet.payload.to_vec()) + } + + /// The number of bytes of packets containing retransmittable frames that have not been + /// acknowledged or declared lost. + #[cfg(test)] + pub(crate) fn bytes_in_flight(&self) -> u64 { + self.path.in_flight.bytes + } + + /// Number of bytes worth of non-ack-only packets that may be sent + #[cfg(test)] + pub(crate) fn congestion_window(&self) -> u64 { + self.path + .congestion + .window() + .saturating_sub(self.path.in_flight.bytes) + } + + /// Whether no timers but keepalive, idle, rtt, pushnewcid, and key discard are running + #[cfg(test)] + pub(crate) fn is_idle(&self) -> bool { + Timer::VALUES + .iter() + .filter(|&&t| !matches!(t, Timer::KeepAlive | Timer::PushNewCid | Timer::KeyDiscard)) + .filter_map(|&t| Some((t, self.timers.get(t)?))) + .min_by_key(|&(_, time)| time) + .map_or(true, |(timer, _)| timer == Timer::Idle) + } + + /// Whether explicit congestion notification is in use on outgoing packets. + #[cfg(test)] + pub(crate) fn using_ecn(&self) -> bool { + self.path.sending_ecn + } + + /// The number of received bytes in the current path + #[cfg(test)] + pub(crate) fn total_recvd(&self) -> u64 { + self.path.total_recvd + } + + #[cfg(test)] + pub(crate) fn active_local_cid_seq(&self) -> (u64, u64) { + self.local_cid_state.active_seq() + } + + /// Instruct the peer to replace previously issued CIDs by sending a NEW_CONNECTION_ID frame + /// with updated `retire_prior_to` field set to `v` + #[cfg(test)] + pub(crate) fn rotate_local_cid(&mut self, v: u64, now: Instant) { + let n = self.local_cid_state.assign_retire_seq(v); + self.endpoint_events + .push_back(EndpointEventInner::NeedIdentifiers(now, n)); + } + + /// Check the current active remote CID sequence + #[cfg(test)] + pub(crate) fn active_rem_cid_seq(&self) -> u64 { + self.rem_cids.active_seq() + } + + /// Returns the detected maximum udp payload size for the current path + #[cfg(test)] + pub(crate) fn path_mtu(&self) -> u16 { + self.path.current_mtu() + } + + /// Whether we have 1-RTT data to send + /// + /// See also `self.space(SpaceId::Data).can_send()` + fn can_send_1rtt(&self, max_size: usize) -> bool { + self.streams.can_send_stream_data() + || self.path.challenge_pending + || self + .prev_path + .as_ref() + .is_some_and(|(_, x)| x.challenge_pending) + || !self.path_responses.is_empty() + || self + .datagrams + .outgoing + .front() + .is_some_and(|x| x.size(true) <= max_size) + } + + /// Update counters to account for a packet becoming acknowledged, lost, or abandoned + fn remove_in_flight(&mut self, packet: &SentPacket) { + // Visit known paths from newest to oldest to find the one `packet` was sent on + for path in [&mut self.path] + .into_iter() + .chain(self.prev_path.as_mut().map(|(_, data)| data)) + { + if path.remove_in_flight(packet) { + return; + } + } + } + + /// Terminate the connection instantly, without sending a close packet + fn kill(&mut self, reason: ConnectionError) { + self.close_common(); + self.error = Some(reason); + self.state = State::Drained; + self.endpoint_events.push_back(EndpointEventInner::Drained); + } + + /// Storage size required for the largest packet known to be supported by the current path + /// + /// Buffers passed to [`Connection::poll_transmit`] should be at least this large. + pub fn current_mtu(&self) -> u16 { + self.path.current_mtu() + } + + /// Size of non-frame data for a 1-RTT packet + /// + /// Quantifies space consumed by the QUIC header and AEAD tag. All other bytes in a packet are + /// frames. Changes if the length of the remote connection ID changes, which is expected to be + /// rare. If `pn` is specified, may additionally change unpredictably due to variations in + /// latency and packet loss. + fn predict_1rtt_overhead(&self, pn: Option) -> usize { + let pn_len = match pn { + Some(pn) => PacketNumber::new( + pn, + self.spaces[SpaceId::Data].largest_acked_packet.unwrap_or(0), + ) + .len(), + // Upper bound + None => 4, + }; + + // 1 byte for flags + 1 + self.rem_cids.active().len() + pn_len + self.tag_len_1rtt() + } + + fn tag_len_1rtt(&self) -> usize { + let key = match self.spaces[SpaceId::Data].crypto.as_ref() { + Some(crypto) => Some(&*crypto.packet.local), + None => self.zero_rtt_crypto.as_ref().map(|x| &*x.packet), + }; + // If neither Data nor 0-RTT keys are available, make a reasonable tag length guess. As of + // this writing, all QUIC cipher suites use 16-byte tags. We could return `None` instead, + // but that would needlessly prevent sending datagrams during 0-RTT. + key.map_or(16, |x| x.tag_len()) + } + + /// Mark the path as validated, and enqueue NEW_TOKEN frames to be sent as appropriate + fn on_path_validated(&mut self) { + self.path.validated = true; + let ConnectionSide::Server { server_config } = &self.side else { + return; + }; + let new_tokens = &mut self.spaces[SpaceId::Data as usize].pending.new_tokens; + new_tokens.clear(); + for _ in 0..server_config.validation_token.sent { + new_tokens.push(self.path.remote); + } + } +} + +impl fmt::Debug for Connection { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Connection") + .field("handshake_cid", &self.handshake_cid) + .finish() + } +} + +/// Fields of `Connection` specific to it being client-side or server-side +enum ConnectionSide { + Client { + /// Sent in every outgoing Initial packet. Always empty after Initial keys are discarded + token: Bytes, + token_store: Arc, + server_name: String, + }, + Server { + server_config: Arc, + }, +} + +impl ConnectionSide { + fn remote_may_migrate(&self) -> bool { + match self { + Self::Server { server_config } => server_config.migration, + Self::Client { .. } => false, + } + } + + fn is_client(&self) -> bool { + self.side().is_client() + } + + fn is_server(&self) -> bool { + self.side().is_server() + } + + fn side(&self) -> Side { + match *self { + Self::Client { .. } => Side::Client, + Self::Server { .. } => Side::Server, + } + } +} + +impl From for ConnectionSide { + fn from(side: SideArgs) -> Self { + match side { + SideArgs::Client { + token_store, + server_name, + } => Self::Client { + token: token_store.take(&server_name).unwrap_or_default(), + token_store, + server_name, + }, + SideArgs::Server { + server_config, + pref_addr_cid: _, + path_validated: _, + } => Self::Server { server_config }, + } + } +} + +/// Parameters to `Connection::new` specific to it being client-side or server-side +pub(crate) enum SideArgs { + Client { + token_store: Arc, + server_name: String, + }, + Server { + server_config: Arc, + pref_addr_cid: Option, + path_validated: bool, + }, +} + +impl SideArgs { + pub(crate) fn pref_addr_cid(&self) -> Option { + match *self { + Self::Client { .. } => None, + Self::Server { pref_addr_cid, .. } => pref_addr_cid, + } + } + + pub(crate) fn path_validated(&self) -> bool { + match *self { + Self::Client { .. } => true, + Self::Server { path_validated, .. } => path_validated, + } + } + + pub(crate) fn side(&self) -> Side { + match *self { + Self::Client { .. } => Side::Client, + Self::Server { .. } => Side::Server, + } + } +} + +/// Reasons why a connection might be lost +#[derive(Debug, Error, Clone, PartialEq, Eq)] +pub enum ConnectionError { + /// The peer doesn't implement any supported version + #[error("peer doesn't implement any supported version")] + VersionMismatch, + /// The peer violated the QUIC specification as understood by this implementation + #[error(transparent)] + TransportError(#[from] TransportError), + /// The peer's QUIC stack aborted the connection automatically + #[error("aborted by peer: {0}")] + ConnectionClosed(frame::ConnectionClose), + /// The peer closed the connection + #[error("closed by peer: {0}")] + ApplicationClosed(frame::ApplicationClose), + /// The peer is unable to continue processing this connection, usually due to having restarted + #[error("reset by peer")] + Reset, + /// Communication with the peer has lapsed for longer than the negotiated idle timeout + /// + /// If neither side is sending keep-alives, a connection will time out after a long enough idle + /// period even if the peer is still reachable. See also [`TransportConfig::max_idle_timeout()`] + /// and [`TransportConfig::keep_alive_interval()`]. + #[error("timed out")] + TimedOut, + /// The local application closed the connection + #[error("closed")] + LocallyClosed, + /// The connection could not be created because not enough of the CID space is available + /// + /// Try using longer connection IDs. + #[error("CIDs exhausted")] + CidsExhausted, +} + +impl From for ConnectionError { + fn from(x: Close) -> Self { + match x { + Close::Connection(reason) => Self::ConnectionClosed(reason), + Close::Application(reason) => Self::ApplicationClosed(reason), + } + } +} + +// For compatibility with API consumers +impl From for io::Error { + fn from(x: ConnectionError) -> Self { + use ConnectionError::*; + let kind = match x { + TimedOut => io::ErrorKind::TimedOut, + Reset => io::ErrorKind::ConnectionReset, + ApplicationClosed(_) | ConnectionClosed(_) => io::ErrorKind::ConnectionAborted, + TransportError(_) | VersionMismatch | LocallyClosed | CidsExhausted => { + io::ErrorKind::Other + } + }; + Self::new(kind, x) + } +} + +#[allow(unreachable_pub)] // fuzzing only +#[derive(Clone)] +pub enum State { + Handshake(state::Handshake), + Established, + Closed(state::Closed), + Draining, + /// Waiting for application to call close so we can dispose of the resources + Drained, +} + +impl State { + fn closed>(reason: R) -> Self { + Self::Closed(state::Closed { + reason: reason.into(), + }) + } + + fn is_handshake(&self) -> bool { + matches!(*self, Self::Handshake(_)) + } + + fn is_established(&self) -> bool { + matches!(*self, Self::Established) + } + + fn is_closed(&self) -> bool { + matches!(*self, Self::Closed(_) | Self::Draining | Self::Drained) + } + + fn is_drained(&self) -> bool { + matches!(*self, Self::Drained) + } +} + +mod state { + use super::*; + + #[allow(unreachable_pub)] // fuzzing only + #[derive(Clone)] + pub struct Handshake { + /// Whether the remote CID has been set by the peer yet + /// + /// Always set for servers + pub(super) rem_cid_set: bool, + /// Stateless retry token received in the first Initial by a server. + /// + /// Must be present in every Initial. Always empty for clients. + pub(super) expected_token: Bytes, + /// First cryptographic message + /// + /// Only set for clients + pub(super) client_hello: Option, + } + + #[allow(unreachable_pub)] // fuzzing only + #[derive(Clone)] + pub struct Closed { + pub(super) reason: Close, + } +} + +/// Events of interest to the application +#[derive(Debug)] +pub enum Event { + /// The connection's handshake data is ready + HandshakeDataReady, + /// The connection was successfully established + Connected, + /// The connection was lost + /// + /// Emitted if the peer closes the connection or an error is encountered. + ConnectionLost { + /// Reason that the connection was closed + reason: ConnectionError, + }, + /// Stream events + Stream(StreamEvent), + /// One or more application datagrams have been received + DatagramReceived, + /// One or more application datagrams have been sent after blocking + DatagramsUnblocked, +} + +fn instant_saturating_sub(x: Instant, y: Instant) -> Duration { + if x > y { x - y } else { Duration::ZERO } +} + +fn get_max_ack_delay(params: &TransportParameters) -> Duration { + Duration::from_micros(params.max_ack_delay.0 * 1000) +} + +// Prevents overflow and improves behavior in extreme circumstances +const MAX_BACKOFF_EXPONENT: u32 = 16; + +/// Minimal remaining size to allow packet coalescing, excluding cryptographic tag +/// +/// This must be at least as large as the header for a well-formed empty packet to be coalesced, +/// plus some space for frames. We only care about handshake headers because short header packets +/// necessarily have smaller headers, and initial packets are only ever the first packet in a +/// datagram (because we coalesce in ascending packet space order and the only reason to split a +/// packet is when packet space changes). +const MIN_PACKET_SPACE: usize = MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE + 32; + +/// Largest amount of space that could be occupied by a Handshake or 0-RTT packet's header +/// +/// Excludes packet-type-specific fields such as packet number or Initial token +// https://www.rfc-editor.org/rfc/rfc9000.html#name-0-rtt: flags + version + dcid len + dcid + +// scid len + scid + length + pn +const MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE: usize = + 1 + 4 + 1 + MAX_CID_SIZE + 1 + MAX_CID_SIZE + VarInt::from_u32(u16::MAX as u32).size() + 4; + +/// Perform key updates this many packets before the AEAD confidentiality limit. +/// +/// Chosen arbitrarily, intended to be large enough to prevent spurious connection loss. +const KEY_UPDATE_MARGIN: u64 = 10_000; + +#[derive(Default)] +struct SentFrames { + retransmits: ThinRetransmits, + largest_acked: Option, + stream_frames: StreamMetaVec, + /// Whether the packet contains non-retransmittable frames (like datagrams) + non_retransmits: bool, + requires_padding: bool, +} + +impl SentFrames { + /// Returns whether the packet contains only ACKs + fn is_ack_only(&self, streams: &StreamsState) -> bool { + self.largest_acked.is_some() + && !self.non_retransmits + && self.stream_frames.is_empty() + && self.retransmits.is_empty(streams) + } +} + +/// Compute the negotiated idle timeout based on local and remote max_idle_timeout transport parameters. +/// +/// According to the definition of max_idle_timeout, a value of `0` means the timeout is disabled; see +/// +/// According to the negotiation procedure, either the minimum of the timeouts or one specified is used as the negotiated value; see +/// +/// Returns the negotiated idle timeout as a `Duration`, or `None` when both endpoints have opted out of idle timeout. +fn negotiate_max_idle_timeout(x: Option, y: Option) -> Option { + match (x, y) { + (Some(VarInt(0)) | None, Some(VarInt(0)) | None) => None, + (Some(VarInt(0)) | None, Some(y)) => Some(Duration::from_millis(y.0)), + (Some(x), Some(VarInt(0)) | None) => Some(Duration::from_millis(x.0)), + (Some(x), Some(y)) => Some(Duration::from_millis(cmp::min(x, y).0)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn negotiate_max_idle_timeout_commutative() { + let test_params = [ + (None, None, None), + (None, Some(VarInt(0)), None), + (None, Some(VarInt(2)), Some(Duration::from_millis(2))), + (Some(VarInt(0)), Some(VarInt(0)), None), + ( + Some(VarInt(2)), + Some(VarInt(0)), + Some(Duration::from_millis(2)), + ), + ( + Some(VarInt(1)), + Some(VarInt(4)), + Some(Duration::from_millis(1)), + ), + ]; + + for (left, right, result) in test_params { + assert_eq!(negotiate_max_idle_timeout(left, right), result); + assert_eq!(negotiate_max_idle_timeout(right, left), result); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mtud.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mtud.rs new file mode 100644 index 0000000000000000000000000000000000000000..b690731b410e5ed5022a9b7775f085c55d488555 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/mtud.rs @@ -0,0 +1,970 @@ +use crate::{Instant, MAX_UDP_PAYLOAD, MtuDiscoveryConfig, packet::SpaceId}; +use std::cmp; +use tracing::trace; + +/// Implements Datagram Packetization Layer Path Maximum Transmission Unit Discovery +/// +/// See [`MtuDiscoveryConfig`] for details +#[derive(Clone)] +pub(crate) struct MtuDiscovery { + /// Detected MTU for the path + current_mtu: u16, + /// The state of the MTU discovery, if enabled + state: Option, + /// The state of the black hole detector + black_hole_detector: BlackHoleDetector, +} + +impl MtuDiscovery { + pub(crate) fn new( + initial_plpmtu: u16, + min_mtu: u16, + peer_max_udp_payload_size: Option, + config: MtuDiscoveryConfig, + ) -> Self { + debug_assert!( + initial_plpmtu >= min_mtu, + "initial_max_udp_payload_size must be at least {min_mtu}" + ); + + let mut mtud = Self::with_state( + initial_plpmtu, + min_mtu, + Some(EnabledMtuDiscovery::new(config)), + ); + + // We might be migrating an existing connection to a new path, in which case the transport + // parameters have already been transmitted, and we already know the value of + // `peer_max_udp_payload_size` + if let Some(peer_max_udp_payload_size) = peer_max_udp_payload_size { + mtud.on_peer_max_udp_payload_size_received(peer_max_udp_payload_size); + } + + mtud + } + + /// MTU discovery will be disabled and the current MTU will be fixed to the provided value + pub(crate) fn disabled(plpmtu: u16, min_mtu: u16) -> Self { + Self::with_state(plpmtu, min_mtu, None) + } + + fn with_state(current_mtu: u16, min_mtu: u16, state: Option) -> Self { + Self { + current_mtu, + state, + black_hole_detector: BlackHoleDetector::new(min_mtu), + } + } + + pub(super) fn reset(&mut self, current_mtu: u16, min_mtu: u16) { + self.current_mtu = current_mtu; + if let Some(state) = self.state.take() { + self.state = Some(EnabledMtuDiscovery::new(state.config)); + self.on_peer_max_udp_payload_size_received(state.peer_max_udp_payload_size); + } + self.black_hole_detector = BlackHoleDetector::new(min_mtu); + } + + /// Returns the current MTU + pub(crate) fn current_mtu(&self) -> u16 { + self.current_mtu + } + + /// Returns the amount of bytes that should be sent as an MTU probe, if any + pub(crate) fn poll_transmit(&mut self, now: Instant, next_pn: u64) -> Option { + self.state + .as_mut() + .and_then(|state| state.poll_transmit(now, self.current_mtu, next_pn)) + } + + /// Notifies the [`MtuDiscovery`] that the peer's `max_udp_payload_size` transport parameter has + /// been received + pub(crate) fn on_peer_max_udp_payload_size_received(&mut self, peer_max_udp_payload_size: u16) { + self.current_mtu = self.current_mtu.min(peer_max_udp_payload_size); + + if let Some(state) = self.state.as_mut() { + // MTUD is only active after the connection has been fully established, so it is + // guaranteed we will receive the peer's transport parameters before we start probing + debug_assert!(matches!(state.phase, Phase::Initial)); + state.peer_max_udp_payload_size = peer_max_udp_payload_size; + } + } + + /// Notifies the [`MtuDiscovery`] that a packet has been ACKed + /// + /// Returns true if the packet was an MTU probe + pub(crate) fn on_acked(&mut self, space: SpaceId, pn: u64, len: u16) -> bool { + // MTU probes are only sent in application data space + if space != SpaceId::Data { + return false; + } + + // Update the state of the MTU search + if let Some(new_mtu) = self + .state + .as_mut() + .and_then(|state| state.on_probe_acked(pn)) + { + self.current_mtu = new_mtu; + trace!(current_mtu = self.current_mtu, "new MTU detected"); + + self.black_hole_detector.on_probe_acked(pn, len); + true + } else { + self.black_hole_detector.on_non_probe_acked(pn, len); + false + } + } + + /// Returns the packet number of the in-flight MTU probe, if any + pub(crate) fn in_flight_mtu_probe(&self) -> Option { + match &self.state { + Some(EnabledMtuDiscovery { + phase: Phase::Searching(search_state), + .. + }) => search_state.in_flight_probe, + _ => None, + } + } + + /// Notifies the [`MtuDiscovery`] that the in-flight MTU probe was lost + pub(crate) fn on_probe_lost(&mut self) { + if let Some(state) = &mut self.state { + state.on_probe_lost(); + } + } + + /// Notifies the [`MtuDiscovery`] that a non-probe packet was lost + /// + /// When done notifying of lost packets, [`MtuDiscovery::black_hole_detected`] must be called, to + /// ensure the last loss burst is properly processed and to trigger black hole recovery logic if + /// necessary. + pub(crate) fn on_non_probe_lost(&mut self, pn: u64, len: u16) { + self.black_hole_detector.on_non_probe_lost(pn, len); + } + + /// Returns true if a black hole was detected + /// + /// Calling this function will close the previous loss burst. If a black hole is detected, the + /// current MTU will be reset to `min_mtu`. + pub(crate) fn black_hole_detected(&mut self, now: Instant) -> bool { + if !self.black_hole_detector.black_hole_detected() { + return false; + } + + self.current_mtu = self.black_hole_detector.min_mtu; + + if let Some(state) = &mut self.state { + state.on_black_hole_detected(now); + } + + true + } +} + +/// Additional state for enabled MTU discovery +#[derive(Debug, Clone)] +struct EnabledMtuDiscovery { + phase: Phase, + peer_max_udp_payload_size: u16, + config: MtuDiscoveryConfig, +} + +impl EnabledMtuDiscovery { + fn new(config: MtuDiscoveryConfig) -> Self { + Self { + phase: Phase::Initial, + peer_max_udp_payload_size: MAX_UDP_PAYLOAD, + config, + } + } + + /// Returns the amount of bytes that should be sent as an MTU probe, if any + fn poll_transmit(&mut self, now: Instant, current_mtu: u16, next_pn: u64) -> Option { + if let Phase::Initial = &self.phase { + // Start the first search + self.phase = Phase::Searching(SearchState::new( + current_mtu, + self.peer_max_udp_payload_size, + &self.config, + )); + } else if let Phase::Complete(next_mtud_activation) = &self.phase { + if now < *next_mtud_activation { + return None; + } + + // Start a new search (we have reached the next activation time) + self.phase = Phase::Searching(SearchState::new( + current_mtu, + self.peer_max_udp_payload_size, + &self.config, + )); + } + + if let Phase::Searching(state) = &mut self.phase { + // Nothing to do while there is a probe in flight + if state.in_flight_probe.is_some() { + return None; + } + + // Retransmit lost probes, if any + if 0 < state.lost_probe_count && state.lost_probe_count < MAX_PROBE_RETRANSMITS { + state.in_flight_probe = Some(next_pn); + return Some(state.last_probed_mtu); + } + + let last_probe_succeeded = state.lost_probe_count == 0; + + // The probe is definitely lost (we reached the MAX_PROBE_RETRANSMITS threshold) + if !last_probe_succeeded { + state.lost_probe_count = 0; + state.in_flight_probe = None; + } + + if let Some(probe_udp_payload_size) = state.next_mtu_to_probe(last_probe_succeeded) { + state.in_flight_probe = Some(next_pn); + state.last_probed_mtu = probe_udp_payload_size; + return Some(probe_udp_payload_size); + } else { + let next_mtud_activation = now + self.config.interval; + self.phase = Phase::Complete(next_mtud_activation); + return None; + } + } + + None + } + + /// Called when a packet is acknowledged in [`SpaceId::Data`] + /// + /// Returns the new `current_mtu` if the packet number corresponds to the in-flight MTU probe + fn on_probe_acked(&mut self, pn: u64) -> Option { + match &mut self.phase { + Phase::Searching(state) if state.in_flight_probe == Some(pn) => { + state.in_flight_probe = None; + state.lost_probe_count = 0; + Some(state.last_probed_mtu) + } + _ => None, + } + } + + /// Called when the in-flight MTU probe was lost + fn on_probe_lost(&mut self) { + // We might no longer be searching, e.g. if a black hole was detected + if let Phase::Searching(state) = &mut self.phase { + state.in_flight_probe = None; + state.lost_probe_count += 1; + } + } + + /// Called when a black hole is detected + fn on_black_hole_detected(&mut self, now: Instant) { + // Stop searching, if applicable, and reset the timer + let next_mtud_activation = now + self.config.black_hole_cooldown; + self.phase = Phase::Complete(next_mtud_activation); + } +} + +#[derive(Debug, Clone, Copy)] +enum Phase { + /// We haven't started polling yet + Initial, + /// We are currently searching for a higher PMTU + Searching(SearchState), + /// Searching has completed and will be triggered again at the provided instant + Complete(Instant), +} + +#[derive(Debug, Clone, Copy)] +struct SearchState { + /// The lower bound for the current binary search + lower_bound: u16, + /// The upper bound for the current binary search + upper_bound: u16, + /// The minimum change to stop the current binary search + minimum_change: u16, + /// The UDP payload size we last sent a probe for + last_probed_mtu: u16, + /// Packet number of an in-flight probe (if any) + in_flight_probe: Option, + /// Lost probes at the current probe size + lost_probe_count: usize, +} + +impl SearchState { + /// Creates a new search state, with the specified lower bound (the upper bound is derived from + /// the config and the peer's `max_udp_payload_size` transport parameter) + fn new( + mut lower_bound: u16, + peer_max_udp_payload_size: u16, + config: &MtuDiscoveryConfig, + ) -> Self { + lower_bound = lower_bound.min(peer_max_udp_payload_size); + let upper_bound = config + .upper_bound + .clamp(lower_bound, peer_max_udp_payload_size); + + Self { + in_flight_probe: None, + lost_probe_count: 0, + lower_bound, + upper_bound, + minimum_change: config.minimum_change, + // During initialization, we consider the lower bound to have already been + // successfully probed + last_probed_mtu: lower_bound, + } + } + + /// Determines the next MTU to probe using binary search + fn next_mtu_to_probe(&mut self, last_probe_succeeded: bool) -> Option { + debug_assert_eq!(self.in_flight_probe, None); + + if last_probe_succeeded { + self.lower_bound = self.last_probed_mtu; + } else { + self.upper_bound = self.last_probed_mtu - 1; + } + + let next_mtu = (self.lower_bound as i32 + self.upper_bound as i32) / 2; + + // Binary search stopping condition + if ((next_mtu - self.last_probed_mtu as i32).unsigned_abs() as u16) < self.minimum_change { + // Special case: if the upper bound is far enough, we want to probe it as a last + // step (otherwise we will never achieve the upper bound) + if self.upper_bound.saturating_sub(self.last_probed_mtu) >= self.minimum_change { + return Some(self.upper_bound); + } + + return None; + } + + Some(next_mtu as u16) + } +} + +/// Judges whether packet loss might indicate a drop in MTU +/// +/// Our MTU black hole detection scheme is a heuristic based on the order in which packets were sent +/// (the packet number order), their sizes, and which are deemed lost. +/// +/// First, contiguous groups of lost packets ("loss bursts") are aggregated, because a group of +/// packets all lost together were probably lost for the same reason. +/// +/// A loss burst is deemed "suspicious" if it contains no packets that are (a) smaller than the +/// minimum MTU or (b) smaller than a more recent acknowledged packet, because such a burst could be +/// fully explained by a reduction in MTU. +/// +/// When the number of suspicious loss bursts exceeds [`BLACK_HOLE_THRESHOLD`], we judge the +/// evidence for an MTU black hole to be sufficient. +#[derive(Clone)] +struct BlackHoleDetector { + /// Packet loss bursts currently considered suspicious + suspicious_loss_bursts: Vec, + /// Loss burst currently being aggregated, if any + current_loss_burst: Option, + /// Packet number of the biggest packet larger than `min_mtu` which we've received + /// acknowledgment of more recently than any suspicious loss burst, if any + largest_post_loss_packet: u64, + /// The maximum of `min_mtu` and the size of `largest_post_loss_packet`, or exactly `min_mtu` if + /// no larger packets have been received since the most recent loss burst. + acked_mtu: u16, + /// The UDP payload size guaranteed to be supported by the network + min_mtu: u16, +} + +impl BlackHoleDetector { + fn new(min_mtu: u16) -> Self { + Self { + suspicious_loss_bursts: Vec::with_capacity(BLACK_HOLE_THRESHOLD + 1), + current_loss_burst: None, + largest_post_loss_packet: 0, + acked_mtu: min_mtu, + min_mtu, + } + } + + fn on_probe_acked(&mut self, pn: u64, len: u16) { + // MTU probes are always larger than the previous MTU, so no previous loss bursts are + // suspicious. At most one MTU probe is in flight at a time, so we don't need to worry about + // reordering between them. + self.suspicious_loss_bursts.clear(); + self.acked_mtu = len; + // This might go backwards, but that's okay: a successful ACK means we haven't yet judged a + // more recently sent packet lost, and we just want to track the largest packet that's been + // successfully delivered more recently than a loss. + self.largest_post_loss_packet = pn; + } + + fn on_non_probe_acked(&mut self, pn: u64, len: u16) { + if len <= self.acked_mtu { + // We've already seen a larger packet since the most recent suspicious loss burst; + // nothing to do. + return; + } + self.acked_mtu = len; + // This might go backwards, but that's okay as described in `on_probe_acked`. + self.largest_post_loss_packet = pn; + // Loss bursts packets smaller than this are retroactively deemed non-suspicious. + self.suspicious_loss_bursts + .retain(|burst| burst.smallest_packet_size > len); + } + + fn on_non_probe_lost(&mut self, pn: u64, len: u16) { + // A loss burst is a group of consecutive packets that are declared lost, so a distance + // greater than 1 indicates a new burst + let end_last_burst = self + .current_loss_burst + .as_ref() + .is_some_and(|current| pn - current.latest_non_probe != 1); + + if end_last_burst { + self.finish_loss_burst(); + } + + self.current_loss_burst = Some(CurrentLossBurst { + latest_non_probe: pn, + smallest_packet_size: self + .current_loss_burst + .map_or(len, |prev| cmp::min(prev.smallest_packet_size, len)), + }); + } + + fn black_hole_detected(&mut self) -> bool { + self.finish_loss_burst(); + + if self.suspicious_loss_bursts.len() <= BLACK_HOLE_THRESHOLD { + return false; + } + + self.suspicious_loss_bursts.clear(); + + true + } + + /// Marks the end of the current loss burst, checking whether it was suspicious + fn finish_loss_burst(&mut self) { + let Some(burst) = self.current_loss_burst.take() else { + return; + }; + // If a loss burst contains a packet smaller than the minimum MTU or a more recently + // transmitted packet, it is not suspicious. + if burst.smallest_packet_size < self.min_mtu + || (burst.latest_non_probe < self.largest_post_loss_packet + && burst.smallest_packet_size < self.acked_mtu) + { + return; + } + // The loss burst is now deemed suspicious. + + // A suspicious loss burst more recent than `largest_post_loss_packet` invalidates it. This + // makes `acked_mtu` a conservative approximation. Ideally we'd update `safe_mtu` and + // `largest_post_loss_packet` to describe the largest acknowledged packet sent later than + // this burst, but that would require tracking the size of an unpredictable number of + // recently acknowledged packets, and erring on the side of false positives is safe. + if burst.latest_non_probe > self.largest_post_loss_packet { + self.acked_mtu = self.min_mtu; + } + + let burst = LossBurst { + smallest_packet_size: burst.smallest_packet_size, + }; + + if self.suspicious_loss_bursts.len() <= BLACK_HOLE_THRESHOLD { + self.suspicious_loss_bursts.push(burst); + return; + } + + // To limit memory use, only track the most suspicious loss bursts. + let smallest = self + .suspicious_loss_bursts + .iter_mut() + .min_by_key(|prev| prev.smallest_packet_size) + .filter(|prev| prev.smallest_packet_size < burst.smallest_packet_size); + if let Some(smallest) = smallest { + *smallest = burst; + } + } + + #[cfg(test)] + fn suspicious_loss_burst_count(&self) -> usize { + self.suspicious_loss_bursts.len() + } + + #[cfg(test)] + fn largest_non_probe_lost(&self) -> Option { + self.current_loss_burst.as_ref().map(|x| x.latest_non_probe) + } +} + +#[derive(Copy, Clone)] +struct LossBurst { + smallest_packet_size: u16, +} + +#[derive(Copy, Clone)] +struct CurrentLossBurst { + smallest_packet_size: u16, + latest_non_probe: u64, +} + +// Corresponds to the RFC's `MAX_PROBES` constant (see +// https://www.rfc-editor.org/rfc/rfc8899#section-5.1.2) +const MAX_PROBE_RETRANSMITS: usize = 3; +/// Maximum number of suspicious loss bursts that will not trigger black hole detection +const BLACK_HOLE_THRESHOLD: usize = 3; + +#[cfg(test)] +mod tests { + use super::*; + use crate::Duration; + use crate::MAX_UDP_PAYLOAD; + use crate::packet::SpaceId; + use assert_matches::assert_matches; + + fn default_mtud() -> MtuDiscovery { + let config = MtuDiscoveryConfig::default(); + MtuDiscovery::new(1_200, 1_200, None, config) + } + + fn completed(mtud: &MtuDiscovery) -> bool { + matches!(mtud.state.as_ref().unwrap().phase, Phase::Complete(_)) + } + + /// Drives mtud until it reaches `Phase::Completed` + fn drive_to_completion( + mtud: &mut MtuDiscovery, + now: Instant, + link_payload_size_limit: u16, + ) -> Vec { + let mut probed_sizes = Vec::new(); + for probe_pn in 1..100 { + let result = mtud.poll_transmit(now, probe_pn); + + if completed(mtud) { + break; + } + + // "Send" next probe + assert!(result.is_some()); + let probe_size = result.unwrap(); + probed_sizes.push(probe_size); + + if probe_size <= link_payload_size_limit { + mtud.on_acked(SpaceId::Data, probe_pn, probe_size); + } else { + mtud.on_probe_lost(); + } + } + probed_sizes + } + + #[test] + fn black_hole_detector_ignores_burst_containing_non_suspicious_packet() { + let mut mtud = default_mtud(); + mtud.on_non_probe_lost(2, 1300); + mtud.on_non_probe_lost(3, 1300); + assert_eq!(mtud.black_hole_detector.largest_non_probe_lost(), Some(3)); + assert_eq!(mtud.black_hole_detector.suspicious_loss_burst_count(), 0); + + mtud.on_non_probe_lost(4, 800); + assert!(!mtud.black_hole_detected(Instant::now())); + assert_eq!(mtud.black_hole_detector.largest_non_probe_lost(), None); + assert_eq!(mtud.black_hole_detector.suspicious_loss_burst_count(), 0); + } + + #[test] + fn black_hole_detector_counts_burst_containing_only_suspicious_packets() { + let mut mtud = default_mtud(); + mtud.on_non_probe_lost(2, 1300); + mtud.on_non_probe_lost(3, 1300); + assert_eq!(mtud.black_hole_detector.largest_non_probe_lost(), Some(3)); + assert_eq!(mtud.black_hole_detector.suspicious_loss_burst_count(), 0); + + assert!(!mtud.black_hole_detected(Instant::now())); + assert_eq!(mtud.black_hole_detector.largest_non_probe_lost(), None); + assert_eq!(mtud.black_hole_detector.suspicious_loss_burst_count(), 1); + } + + #[test] + fn black_hole_detector_ignores_empty_burst() { + let mut mtud = default_mtud(); + assert!(!mtud.black_hole_detected(Instant::now())); + assert_eq!(mtud.black_hole_detector.suspicious_loss_burst_count(), 0); + } + + #[test] + fn mtu_discovery_disabled_does_nothing() { + let mut mtud = MtuDiscovery::disabled(1_200, 1_200); + let probe_size = mtud.poll_transmit(Instant::now(), 0); + assert_eq!(probe_size, None); + } + + #[test] + fn mtu_discovery_disabled_lost_four_packet_bursts_triggers_black_hole_detection() { + let mut mtud = MtuDiscovery::disabled(1_400, 1_250); + let now = Instant::now(); + + for i in 0..4 { + // The packets are never contiguous, so each one has its own burst + mtud.on_non_probe_lost(i * 2, 1300); + } + + assert!(mtud.black_hole_detected(now)); + assert_eq!(mtud.current_mtu, 1250); + assert_matches!(mtud.state, None); + } + + #[test] + fn mtu_discovery_lost_two_packet_bursts_does_not_trigger_black_hole_detection() { + let mut mtud = default_mtud(); + let now = Instant::now(); + + for i in 0..2 { + mtud.on_non_probe_lost(i, 1300); + assert!(!mtud.black_hole_detected(now)); + } + } + + #[test] + fn mtu_discovery_lost_four_packet_bursts_triggers_black_hole_detection_and_resets_timer() { + let mut mtud = default_mtud(); + let now = Instant::now(); + + for i in 0..4 { + // The packets are never contiguous, so each one has its own burst + mtud.on_non_probe_lost(i * 2, 1300); + } + + assert!(mtud.black_hole_detected(now)); + assert_eq!(mtud.current_mtu, 1200); + if let Phase::Complete(next_mtud_activation) = mtud.state.unwrap().phase { + assert_eq!(next_mtud_activation, now + Duration::from_secs(60)); + } else { + panic!("Unexpected MTUD phase!"); + } + } + + #[test] + fn mtu_discovery_after_complete_reactivates_when_interval_elapsed() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(9_000); + let mut mtud = MtuDiscovery::new(1_200, 1_200, None, config); + let now = Instant::now(); + drive_to_completion(&mut mtud, now, 1_500); + + // Polling right after completion does not cause new packets to be sent + assert_eq!(mtud.poll_transmit(now, 42), None); + assert!(completed(&mtud)); + assert_eq!(mtud.current_mtu, 1_471); + + // Polling after the interval has passed does (taking the current mtu as lower bound) + assert_eq!( + mtud.poll_transmit(now + Duration::from_secs(600), 43), + Some(5235) + ); + + match mtud.state.unwrap().phase { + Phase::Searching(state) => { + assert_eq!(state.lower_bound, 1_471); + assert_eq!(state.upper_bound, 9_000); + } + _ => { + panic!("Unexpected MTUD phase!") + } + } + } + + #[test] + fn mtu_discovery_lost_three_probes_lowers_probe_size() { + let mut mtud = default_mtud(); + + let mut probe_sizes = (0..4).map(|i| { + let probe_size = mtud.poll_transmit(Instant::now(), i); + assert!(probe_size.is_some(), "no probe returned for packet {i}"); + + mtud.on_probe_lost(); + probe_size.unwrap() + }); + + // After the first probe is lost, it gets retransmitted twice + let first_probe_size = probe_sizes.next().unwrap(); + for _ in 0..2 { + assert_eq!(probe_sizes.next().unwrap(), first_probe_size) + } + + // After the third probe is lost, we decrement our probe size + let fourth_probe_size = probe_sizes.next().unwrap(); + assert!(fourth_probe_size < first_probe_size); + assert_eq!( + fourth_probe_size, + first_probe_size - (first_probe_size - 1_200) / 2 - 1 + ); + } + + #[test] + fn mtu_discovery_with_peer_max_udp_payload_size_clamps_upper_bound() { + let mut mtud = default_mtud(); + + mtud.on_peer_max_udp_payload_size_received(1300); + let probed_sizes = drive_to_completion(&mut mtud, Instant::now(), 1500); + + assert_eq!(mtud.state.as_ref().unwrap().peer_max_udp_payload_size, 1300); + assert_eq!(mtud.current_mtu, 1300); + let expected_probed_sizes = &[1250, 1275, 1300]; + assert_eq!(probed_sizes, expected_probed_sizes); + assert!(completed(&mtud)); + } + + #[test] + fn mtu_discovery_with_previous_peer_max_udp_payload_size_clamps_upper_bound() { + let mut mtud = MtuDiscovery::new(1500, 1_200, Some(1400), MtuDiscoveryConfig::default()); + + assert_eq!(mtud.current_mtu, 1400); + assert_eq!(mtud.state.as_ref().unwrap().peer_max_udp_payload_size, 1400); + + let probed_sizes = drive_to_completion(&mut mtud, Instant::now(), 1500); + + assert_eq!(mtud.current_mtu, 1400); + assert!(probed_sizes.is_empty()); + assert!(completed(&mtud)); + } + + #[cfg(debug_assertions)] + #[test] + #[should_panic] + fn mtu_discovery_with_peer_max_udp_payload_size_after_search_panics() { + let mut mtud = default_mtud(); + drive_to_completion(&mut mtud, Instant::now(), 1500); + mtud.on_peer_max_udp_payload_size_received(1300); + } + + #[test] + fn mtu_discovery_with_1500_limit() { + let mut mtud = default_mtud(); + + let probed_sizes = drive_to_completion(&mut mtud, Instant::now(), 1500); + + let expected_probed_sizes = &[1326, 1389, 1420, 1452]; + assert_eq!(probed_sizes, expected_probed_sizes); + assert_eq!(mtud.current_mtu, 1452); + assert!(completed(&mtud)); + } + + #[test] + fn mtu_discovery_with_1500_limit_and_10000_upper_bound() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(10_000); + let mut mtud = MtuDiscovery::new(1_200, 1_200, None, config); + + let probed_sizes = drive_to_completion(&mut mtud, Instant::now(), 1500); + + let expected_probed_sizes = &[ + 5600, 5600, 5600, 3399, 3399, 3399, 2299, 2299, 2299, 1749, 1749, 1749, 1474, 1611, + 1611, 1611, 1542, 1542, 1542, 1507, 1507, 1507, + ]; + assert_eq!(probed_sizes, expected_probed_sizes); + assert_eq!(mtud.current_mtu, 1474); + assert!(completed(&mtud)); + } + + #[test] + fn mtu_discovery_no_lost_probes_finds_maximum_udp_payload() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(MAX_UDP_PAYLOAD); + let mut mtud = MtuDiscovery::new(1200, 1200, None, config); + + drive_to_completion(&mut mtud, Instant::now(), u16::MAX); + + assert_eq!(mtud.current_mtu, 65527); + assert!(completed(&mtud)); + } + + #[test] + fn mtu_discovery_lost_half_of_probes_finds_maximum_udp_payload() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(MAX_UDP_PAYLOAD); + let mut mtud = MtuDiscovery::new(1200, 1200, None, config); + + let now = Instant::now(); + let mut iterations = 0; + for i in 1..100 { + iterations += 1; + + let probe_pn = i * 2 - 1; + let other_pn = i * 2; + + let result = mtud.poll_transmit(Instant::now(), probe_pn); + + if completed(&mtud) { + break; + } + + // "Send" next probe + assert!(result.is_some()); + assert!(mtud.in_flight_mtu_probe().is_some()); + + // Nothing else to send while the probe is in-flight + assert_matches!(mtud.poll_transmit(now, other_pn), None); + + if i % 2 == 0 { + // ACK probe and ensure it results in an increase of current_mtu + let previous_max_size = mtud.current_mtu; + mtud.on_acked(SpaceId::Data, probe_pn, result.unwrap()); + println!( + "ACK packet {}. Previous MTU = {previous_max_size}. New MTU = {}", + result.unwrap(), + mtud.current_mtu + ); + // assert!(mtud.current_mtu > previous_max_size); + } else { + mtud.on_probe_lost(); + } + } + + assert_eq!(iterations, 25); + assert_eq!(mtud.current_mtu, 65527); + assert!(completed(&mtud)); + } + + #[test] + fn search_state_lower_bound_higher_than_upper_bound_clamps_upper_bound() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(1400); + + let state = SearchState::new(1500, u16::MAX, &config); + assert_eq!(state.lower_bound, 1500); + assert_eq!(state.upper_bound, 1500); + } + + #[test] + fn search_state_lower_bound_higher_than_peer_max_udp_payload_size_clamps_lower_bound() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(9000); + + let state = SearchState::new(1500, 1300, &config); + assert_eq!(state.lower_bound, 1300); + assert_eq!(state.upper_bound, 1300); + } + + #[test] + fn search_state_upper_bound_higher_than_peer_max_udp_payload_size_clamps_upper_bound() { + let mut config = MtuDiscoveryConfig::default(); + config.upper_bound(9000); + + let state = SearchState::new(1200, 1450, &config); + assert_eq!(state.lower_bound, 1200); + assert_eq!(state.upper_bound, 1450); + } + + // Loss of packets larger than have been acknowledged should indicate a black hole + #[test] + fn simple_black_hole_detection() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 2, 1300); + for i in 0..BLACK_HOLE_THRESHOLD { + bhd.on_non_probe_lost(i as u64 * 2, 1400); + } + // But not before `BLACK_HOLE_THRESHOLD + 1` bursts + assert!(!bhd.black_hole_detected()); + bhd.on_non_probe_lost(BLACK_HOLE_THRESHOLD as u64 * 2, 1400); + assert!(bhd.black_hole_detected()); + } + + // Loss of packets followed in transmission order by confirmation of a larger packet should not + // indicate a black hole + #[test] + fn non_suspicious_bursts() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 2, 1500); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost(i as u64 * 2, 1400); + } + assert!(!bhd.black_hole_detected()); + } + + // Loss of packets smaller than have been acknowledged previously should still indicate a black + // hole + #[test] + fn dynamic_mtu_reduction() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked(0, 1500); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost(i as u64 * 2, 1400); + } + assert!(bhd.black_hole_detected()); + } + + // Bursts containing heterogeneous packets are judged based on the smallest + #[test] + fn mixed_non_suspicious_bursts() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 3, 1400); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost(i as u64 * 3, 1500); + bhd.on_non_probe_lost(i as u64 * 3 + 1, 1300); + } + assert!(!bhd.black_hole_detected()); + } + + // Multi-packet bursts are only counted once + #[test] + fn bursts_count_once() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 3, 1400); + for i in 0..(BLACK_HOLE_THRESHOLD) { + bhd.on_non_probe_lost(i as u64 * 3, 1500); + bhd.on_non_probe_lost(i as u64 * 3 + 1, 1500); + } + assert!(!bhd.black_hole_detected()); + bhd.on_non_probe_lost(BLACK_HOLE_THRESHOLD as u64 * 3, 1500); + assert!(bhd.black_hole_detected()); + } + + // Non-suspicious bursts don't interfere with detection of suspicious bursts + #[test] + fn interleaved_bursts() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 4, 1400); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost(i as u64 * 4, 1500); + bhd.on_non_probe_lost(i as u64 * 4 + 2, 1300); + } + assert!(bhd.black_hole_detected()); + } + + // Bursts that are non-suspicious before a delivered packet become suspicious past it + #[test] + fn suspicious_after_acked() { + let mut bhd = BlackHoleDetector::new(1200); + bhd.on_non_probe_acked((BLACK_HOLE_THRESHOLD + 1) as u64 * 2, 1400); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost(i as u64 * 2, 1300); + } + assert!( + !bhd.black_hole_detected(), + "1300 byte losses preceding a 1400 byte delivery are not suspicious" + ); + for i in 0..(BLACK_HOLE_THRESHOLD + 1) { + bhd.on_non_probe_lost((BLACK_HOLE_THRESHOLD as u64 + 1 + i as u64) * 2, 1300); + } + assert!( + bhd.black_hole_detected(), + "1300 byte losses following a 1400 byte delivery are suspicious" + ); + } + + // Acknowledgment of a packet marks prior loss bursts with the same packet size as + // non-suspicious + #[test] + fn retroactively_non_suspicious() { + let mut bhd = BlackHoleDetector::new(1200); + for i in 0..BLACK_HOLE_THRESHOLD { + bhd.on_non_probe_lost(i as u64 * 2, 1400); + } + bhd.on_non_probe_acked(BLACK_HOLE_THRESHOLD as u64 * 2, 1400); + bhd.on_non_probe_lost(BLACK_HOLE_THRESHOLD as u64 * 2 + 1, 1400); + assert!(!bhd.black_hole_detected()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/pacing.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/pacing.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e469948cb2a30c5ba45642309ed34bb6accab95 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/pacing.rs @@ -0,0 +1,308 @@ +//! Pacing of packet transmissions. + +use crate::{Duration, Instant}; + +use tracing::warn; + +/// A simple token-bucket pacer +/// +/// The pacer's capacity is derived on a fraction of the congestion window +/// which can be sent in regular intervals +/// Once the bucket is empty, further transmission is blocked. +/// The bucket refills at a rate slightly faster +/// than one congestion window per RTT, as recommended in +/// +pub(super) struct Pacer { + capacity: u64, + last_window: u64, + last_mtu: u16, + tokens: u64, + prev: Instant, +} + +impl Pacer { + /// Obtains a new [`Pacer`]. + pub(super) fn new(smoothed_rtt: Duration, window: u64, mtu: u16, now: Instant) -> Self { + let capacity = optimal_capacity(smoothed_rtt, window, mtu); + Self { + capacity, + last_window: window, + last_mtu: mtu, + tokens: capacity, + prev: now, + } + } + + /// Record that a packet has been transmitted. + pub(super) fn on_transmit(&mut self, packet_length: u16) { + self.tokens = self.tokens.saturating_sub(packet_length.into()) + } + + /// Return how long we need to wait before sending `bytes_to_send` + /// + /// If we can send a packet right away, this returns `None`. Otherwise, returns `Some(d)`, + /// where `d` is the time before this function should be called again. + /// + /// The 5/4 ratio used here comes from the suggestion that N = 1.25 in the draft IETF RFC for + /// QUIC. + pub(super) fn delay( + &mut self, + smoothed_rtt: Duration, + bytes_to_send: u64, + mtu: u16, + window: u64, + now: Instant, + ) -> Option { + debug_assert_ne!( + window, 0, + "zero-sized congestion control window is nonsense" + ); + + if window != self.last_window || mtu != self.last_mtu { + self.capacity = optimal_capacity(smoothed_rtt, window, mtu); + + // Clamp the tokens + self.tokens = self.capacity.min(self.tokens); + self.last_window = window; + self.last_mtu = mtu; + } + + // if we can already send a packet, there is no need for delay + if self.tokens >= bytes_to_send { + return None; + } + + // we disable pacing for extremely large windows + if window > u64::from(u32::MAX) { + return None; + } + + let window = window as u32; + + let time_elapsed = now.checked_duration_since(self.prev).unwrap_or_else(|| { + warn!("received a timestamp early than a previous recorded time, ignoring"); + Default::default() + }); + + if smoothed_rtt.as_nanos() == 0 { + return None; + } + + let elapsed_rtts = time_elapsed.as_secs_f64() / smoothed_rtt.as_secs_f64(); + let new_tokens = window as f64 * 1.25 * elapsed_rtts; + self.tokens = self + .tokens + .saturating_add(new_tokens as _) + .min(self.capacity); + + self.prev = now; + + // if we can already send a packet, there is no need for delay + if self.tokens >= bytes_to_send { + return None; + } + + let unscaled_delay = smoothed_rtt + .checked_mul((bytes_to_send.max(self.capacity) - self.tokens) as _) + .unwrap_or(Duration::MAX) + / window; + + // divisions come before multiplications to prevent overflow + // this is the time at which the pacing window becomes empty + Some(self.prev + (unscaled_delay / 5) * 4) + } +} + +/// Calculates a pacer capacity for a certain window and RTT +/// +/// The goal is to emit a burst (of size `capacity`) in timer intervals +/// which compromise between +/// - ideally distributing datagrams over time +/// - constantly waking up the connection to produce additional datagrams +/// +/// Too short burst intervals means we will never meet them since the timer +/// accuracy in user-space is not high enough. If we miss the interval by more +/// than 25%, we will lose that part of the congestion window since no additional +/// tokens for the extra-elapsed time can be stored. +/// +/// Too long burst intervals make pacing less effective. +fn optimal_capacity(smoothed_rtt: Duration, window: u64, mtu: u16) -> u64 { + let rtt = smoothed_rtt.as_nanos().max(1); + + let capacity = ((window as u128 * BURST_INTERVAL_NANOS) / rtt) as u64; + + // Small bursts are less efficient (no GSO), could increase latency and don't effectively + // use the channel's buffer capacity. Large bursts might block the connection on sending. + capacity.clamp(MIN_BURST_SIZE * mtu as u64, MAX_BURST_SIZE * mtu as u64) +} + +/// The burst interval +/// +/// The capacity will we refilled in 4/5 of that time. +/// 2ms is chosen here since framework timers might have 1ms precision. +/// If kernel-level pacing is supported later a higher time here might be +/// more applicable. +const BURST_INTERVAL_NANOS: u128 = 2_000_000; // 2ms + +/// Allows some usage of GSO, and doesn't slow down the handshake. +const MIN_BURST_SIZE: u64 = 10; + +/// Creating 256 packets took 1ms in a benchmark, so larger bursts don't make sense. +const MAX_BURST_SIZE: u64 = 256; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn does_not_panic_on_bad_instant() { + let old_instant = Instant::now(); + let new_instant = old_instant + Duration::from_micros(15); + let rtt = Duration::from_micros(400); + + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 0, 1500, 1, old_instant) + .is_none() + ); + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 1600, 1500, 1, old_instant) + .is_none() + ); + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 1500, 1500, 3000, old_instant) + .is_none() + ); + } + + #[test] + fn derives_initial_capacity() { + let window = 2_000_000; + let mtu = 1500; + let rtt = Duration::from_millis(50); + let now = Instant::now(); + + let pacer = Pacer::new(rtt, window, mtu, now); + assert_eq!( + pacer.capacity, + (window as u128 * BURST_INTERVAL_NANOS / rtt.as_nanos()) as u64 + ); + assert_eq!(pacer.tokens, pacer.capacity); + + let pacer = Pacer::new(Duration::from_millis(0), window, mtu, now); + assert_eq!(pacer.capacity, MAX_BURST_SIZE * mtu as u64); + assert_eq!(pacer.tokens, pacer.capacity); + + let pacer = Pacer::new(rtt, 1, mtu, now); + assert_eq!(pacer.capacity, MIN_BURST_SIZE * mtu as u64); + assert_eq!(pacer.tokens, pacer.capacity); + } + + #[test] + fn adjusts_capacity() { + let window = 2_000_000; + let mtu = 1500; + let rtt = Duration::from_millis(50); + let now = Instant::now(); + + let mut pacer = Pacer::new(rtt, window, mtu, now); + assert_eq!( + pacer.capacity, + (window as u128 * BURST_INTERVAL_NANOS / rtt.as_nanos()) as u64 + ); + assert_eq!(pacer.tokens, pacer.capacity); + let initial_tokens = pacer.tokens; + + pacer.delay(rtt, mtu as u64, mtu, window * 2, now); + assert_eq!( + pacer.capacity, + (2 * window as u128 * BURST_INTERVAL_NANOS / rtt.as_nanos()) as u64 + ); + assert_eq!(pacer.tokens, initial_tokens); + + pacer.delay(rtt, mtu as u64, mtu, window / 2, now); + assert_eq!( + pacer.capacity, + (window as u128 / 2 * BURST_INTERVAL_NANOS / rtt.as_nanos()) as u64 + ); + assert_eq!(pacer.tokens, initial_tokens / 2); + + pacer.delay(rtt, mtu as u64, mtu * 2, window, now); + assert_eq!( + pacer.capacity, + (window as u128 * BURST_INTERVAL_NANOS / rtt.as_nanos()) as u64 + ); + + pacer.delay(rtt, mtu as u64, 20_000, window, now); + assert_eq!(pacer.capacity, 20_000_u64 * MIN_BURST_SIZE); + } + + #[test] + fn computes_pause_correctly() { + let window = 2_000_000u64; + let mtu = 1000; + let rtt = Duration::from_millis(50); + let old_instant = Instant::now(); + + let mut pacer = Pacer::new(rtt, window, mtu, old_instant); + let packet_capacity = pacer.capacity / mtu as u64; + + for _ in 0..packet_capacity { + assert_eq!( + pacer.delay(rtt, mtu as u64, mtu, window, old_instant), + None, + "When capacity is available packets should be sent immediately" + ); + + pacer.on_transmit(mtu); + } + + let pace_duration = Duration::from_nanos((BURST_INTERVAL_NANOS * 4 / 5) as u64); + + assert_eq!( + pacer + .delay(rtt, mtu as u64, mtu, window, old_instant) + .expect("Send must be delayed") + .duration_since(old_instant), + pace_duration + ); + + // Refill half of the tokens + assert_eq!( + pacer.delay( + rtt, + mtu as u64, + mtu, + window, + old_instant + pace_duration / 2 + ), + None + ); + assert_eq!(pacer.tokens, pacer.capacity / 2); + + for _ in 0..packet_capacity / 2 { + assert_eq!( + pacer.delay(rtt, mtu as u64, mtu, window, old_instant), + None, + "When capacity is available packets should be sent immediately" + ); + + pacer.on_transmit(mtu); + } + + // Refill all capacity by waiting more than the expected duration + assert_eq!( + pacer.delay( + rtt, + mtu as u64, + mtu, + window, + old_instant + pace_duration * 3 / 2 + ), + None + ); + assert_eq!(pacer.tokens, pacer.capacity); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_builder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc60177a994f0b33997393a4ced8208f517aa89b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_builder.rs @@ -0,0 +1,282 @@ +use bytes::Bytes; +use rand::Rng; +use tracing::{debug, trace, trace_span}; + +use super::{Connection, SentFrames, spaces::SentPacket}; +use crate::{ + ConnectionId, Instant, TransportError, TransportErrorCode, + connection::ConnectionSide, + frame::{self, Close}, + packet::{FIXED_BIT, Header, InitialHeader, LongType, PacketNumber, PartialEncode, SpaceId}, +}; + +pub(super) struct PacketBuilder { + pub(super) datagram_start: usize, + pub(super) space: SpaceId, + pub(super) partial_encode: PartialEncode, + pub(super) ack_eliciting: bool, + pub(super) exact_number: u64, + pub(super) short_header: bool, + /// Smallest absolute position in the associated buffer that must be occupied by this packet's + /// frames + pub(super) min_size: usize, + /// Largest absolute position in the associated buffer that may be occupied by this packet's + /// frames + pub(super) max_size: usize, + pub(super) tag_len: usize, + pub(super) _span: tracing::span::EnteredSpan, +} + +impl PacketBuilder { + /// Write a new packet header to `buffer` and determine the packet's properties + /// + /// Marks the connection drained and returns `None` if the confidentiality limit would be + /// violated. + pub(super) fn new( + now: Instant, + space_id: SpaceId, + dst_cid: ConnectionId, + buffer: &mut Vec, + buffer_capacity: usize, + datagram_start: usize, + ack_eliciting: bool, + conn: &mut Connection, + ) -> Option { + let version = conn.version; + // Initiate key update if we're approaching the confidentiality limit + let sent_with_keys = conn.spaces[space_id].sent_with_keys; + if space_id == SpaceId::Data { + if sent_with_keys >= conn.key_phase_size { + debug!("routine key update due to phase exhaustion"); + conn.force_key_update(); + } + } else { + let confidentiality_limit = conn.spaces[space_id] + .crypto + .as_ref() + .map_or_else( + || &conn.zero_rtt_crypto.as_ref().unwrap().packet, + |keys| &keys.packet.local, + ) + .confidentiality_limit(); + if sent_with_keys.saturating_add(1) == confidentiality_limit { + // We still have time to attempt a graceful close + conn.close_inner( + now, + Close::Connection(frame::ConnectionClose { + error_code: TransportErrorCode::AEAD_LIMIT_REACHED, + frame_type: None, + reason: Bytes::from_static(b"confidentiality limit reached"), + }), + ) + } else if sent_with_keys > confidentiality_limit { + // Confidentiality limited violated and there's nothing we can do + conn.kill( + TransportError::AEAD_LIMIT_REACHED("confidentiality limit reached").into(), + ); + return None; + } + } + + let space = &mut conn.spaces[space_id]; + let exact_number = match space_id { + SpaceId::Data => conn.packet_number_filter.allocate(&mut conn.rng, space), + _ => space.get_tx_number(), + }; + + let span = trace_span!("send", space = ?space_id, pn = exact_number).entered(); + + let number = PacketNumber::new(exact_number, space.largest_acked_packet.unwrap_or(0)); + let header = match space_id { + SpaceId::Data if space.crypto.is_some() => Header::Short { + dst_cid, + number, + spin: if conn.spin_enabled { + conn.spin + } else { + conn.rng.random() + }, + key_phase: conn.key_phase, + }, + SpaceId::Data => Header::Long { + ty: LongType::ZeroRtt, + src_cid: conn.handshake_cid, + dst_cid, + number, + version, + }, + SpaceId::Handshake => Header::Long { + ty: LongType::Handshake, + src_cid: conn.handshake_cid, + dst_cid, + number, + version, + }, + SpaceId::Initial => Header::Initial(InitialHeader { + src_cid: conn.handshake_cid, + dst_cid, + token: match &conn.side { + ConnectionSide::Client { token, .. } => token.clone(), + ConnectionSide::Server { .. } => Bytes::new(), + }, + number, + version, + }), + }; + let partial_encode = header.encode(buffer); + if conn.peer_params.grease_quic_bit && conn.rng.random() { + buffer[partial_encode.start] ^= FIXED_BIT; + } + + let (sample_size, tag_len) = if let Some(ref crypto) = space.crypto { + ( + crypto.header.local.sample_size(), + crypto.packet.local.tag_len(), + ) + } else if space_id == SpaceId::Data { + let zero_rtt = conn.zero_rtt_crypto.as_ref().unwrap(); + (zero_rtt.header.sample_size(), zero_rtt.packet.tag_len()) + } else { + unreachable!(); + }; + + // Each packet must be large enough for header protection sampling, i.e. the combined + // lengths of the encoded packet number and protected payload must be at least 4 bytes + // longer than the sample required for header protection. Further, each packet should be at + // least tag_len + 6 bytes larger than the destination CID on incoming packets so that the + // peer may send stateless resets that are indistinguishable from regular traffic. + + // pn_len + payload_len + tag_len >= sample_size + 4 + // payload_len >= sample_size + 4 - pn_len - tag_len + let min_size = Ord::max( + buffer.len() + (sample_size + 4).saturating_sub(number.len() + tag_len), + partial_encode.start + dst_cid.len() + 6, + ); + let max_size = buffer_capacity - tag_len; + debug_assert!(max_size >= min_size); + + Some(Self { + datagram_start, + space: space_id, + partial_encode, + exact_number, + short_header: header.is_short(), + min_size, + max_size, + tag_len, + ack_eliciting, + _span: span, + }) + } + + /// Append the minimum amount of padding to the packet such that, after encryption, the + /// enclosing datagram will occupy at least `min_size` bytes + pub(super) fn pad_to(&mut self, min_size: u16) { + // The datagram might already have a larger minimum size than the caller is requesting, if + // e.g. we're coalescing packets and have populated more than `min_size` bytes with packets + // already. + self.min_size = Ord::max( + self.min_size, + self.datagram_start + (min_size as usize) - self.tag_len, + ); + } + + pub(super) fn finish_and_track( + self, + now: Instant, + conn: &mut Connection, + sent: Option, + buffer: &mut Vec, + ) { + let ack_eliciting = self.ack_eliciting; + let exact_number = self.exact_number; + let space_id = self.space; + let (size, padded) = self.finish(conn, now, buffer); + let sent = match sent { + Some(sent) => sent, + None => return, + }; + + let size = match padded || ack_eliciting { + true => size as u16, + false => 0, + }; + + let packet = SentPacket { + path_generation: conn.path.generation(), + largest_acked: sent.largest_acked, + time_sent: now, + size, + ack_eliciting, + retransmits: sent.retransmits, + stream_frames: sent.stream_frames, + }; + + conn.path + .sent(exact_number, packet, &mut conn.spaces[space_id]); + conn.stats.path.sent_packets += 1; + conn.reset_keep_alive(now); + if size != 0 { + if ack_eliciting { + conn.spaces[space_id].time_of_last_ack_eliciting_packet = Some(now); + if conn.permit_idle_reset { + conn.reset_idle_timeout(now, space_id); + } + conn.permit_idle_reset = false; + } + conn.set_loss_detection_timer(now); + conn.path.pacing.on_transmit(size); + } + } + + /// Encrypt packet, returning the length of the packet and whether padding was added + pub(super) fn finish( + self, + conn: &mut Connection, + now: Instant, + buffer: &mut Vec, + ) -> (usize, bool) { + let pad = buffer.len() < self.min_size; + if pad { + trace!("PADDING * {}", self.min_size - buffer.len()); + buffer.resize(self.min_size, 0); + } + + let space = &conn.spaces[self.space]; + let (header_crypto, packet_crypto) = if let Some(ref crypto) = space.crypto { + (&*crypto.header.local, &*crypto.packet.local) + } else if self.space == SpaceId::Data { + let zero_rtt = conn.zero_rtt_crypto.as_ref().unwrap(); + (&*zero_rtt.header, &*zero_rtt.packet) + } else { + unreachable!("tried to send {:?} packet without keys", self.space); + }; + + debug_assert_eq!( + packet_crypto.tag_len(), + self.tag_len, + "Mismatching crypto tag len" + ); + + buffer.resize(buffer.len() + packet_crypto.tag_len(), 0); + let encode_start = self.partial_encode.start; + let packet_buf = &mut buffer[encode_start..]; + self.partial_encode.finish( + packet_buf, + header_crypto, + Some((self.exact_number, packet_crypto)), + ); + + let len = buffer.len() - encode_start; + conn.config.qlog_sink.emit_packet_sent( + self.exact_number, + len, + self.space, + self.space == SpaceId::Data && conn.spaces[SpaceId::Data].crypto.is_none(), + now, + conn.orig_rem_cid, + ); + + (len, pad) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_crypto.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_crypto.rs new file mode 100644 index 0000000000000000000000000000000000000000..0aff59c96d97eb8ef07b786a0afa5e0d822e2907 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/packet_crypto.rs @@ -0,0 +1,173 @@ +use tracing::{debug, trace}; + +use crate::Instant; +use crate::connection::spaces::PacketSpace; +use crate::crypto::{HeaderKey, KeyPair, PacketKey}; +use crate::packet::{Packet, PartialDecode, SpaceId}; +use crate::token::ResetToken; +use crate::{RESET_TOKEN_SIZE, TransportError}; + +/// Removes header protection of a packet, or returns `None` if the packet was dropped +pub(super) fn unprotect_header( + partial_decode: PartialDecode, + spaces: &[PacketSpace; 3], + zero_rtt_crypto: Option<&ZeroRttCrypto>, + stateless_reset_token: Option, +) -> Option { + let header_crypto = if partial_decode.is_0rtt() { + if let Some(crypto) = zero_rtt_crypto { + Some(&*crypto.header) + } else { + debug!("dropping unexpected 0-RTT packet"); + return None; + } + } else if let Some(space) = partial_decode.space() { + if let Some(ref crypto) = spaces[space].crypto { + Some(&*crypto.header.remote) + } else { + debug!( + "discarding unexpected {:?} packet ({} bytes)", + space, + partial_decode.len(), + ); + return None; + } + } else { + // Unprotected packet + None + }; + + let packet = partial_decode.data(); + let stateless_reset = packet.len() >= RESET_TOKEN_SIZE + 5 + && stateless_reset_token.as_deref() == Some(&packet[packet.len() - RESET_TOKEN_SIZE..]); + + match partial_decode.finish(header_crypto) { + Ok(packet) => Some(UnprotectHeaderResult { + packet: Some(packet), + stateless_reset, + }), + Err(_) if stateless_reset => Some(UnprotectHeaderResult { + packet: None, + stateless_reset: true, + }), + Err(e) => { + trace!("unable to complete packet decoding: {}", e); + None + } + } +} + +pub(super) struct UnprotectHeaderResult { + /// The packet with the now unprotected header (`None` in the case of stateless reset packets + /// that fail to be decoded) + pub(super) packet: Option, + /// Whether the packet was a stateless reset packet + pub(super) stateless_reset: bool, +} + +/// Decrypts a packet's body in-place +pub(super) fn decrypt_packet_body( + packet: &mut Packet, + spaces: &[PacketSpace; 3], + zero_rtt_crypto: Option<&ZeroRttCrypto>, + conn_key_phase: bool, + prev_crypto: Option<&PrevCrypto>, + next_crypto: Option<&KeyPair>>, +) -> Result, Option> { + if !packet.header.is_protected() { + // Unprotected packets also don't have packet numbers + return Ok(None); + } + let space = packet.header.space(); + let rx_packet = spaces[space].rx_packet; + let number = packet.header.number().ok_or(None)?.expand(rx_packet + 1); + let packet_key_phase = packet.header.key_phase(); + + let mut crypto_update = false; + let crypto = if packet.header.is_0rtt() { + &zero_rtt_crypto.unwrap().packet + } else if packet_key_phase == conn_key_phase || space != SpaceId::Data { + &spaces[space].crypto.as_ref().unwrap().packet.remote + } else if let Some(prev) = prev_crypto.and_then(|crypto| { + // If this packet comes prior to acknowledgment of the key update by the peer, + if crypto.end_packet.map_or(true, |(pn, _)| number < pn) { + // use the previous keys. + Some(crypto) + } else { + // Otherwise, this must be a remotely-initiated key update, so fall through to the + // final case. + None + } + }) { + &prev.crypto.remote + } else { + // We're in the Data space with a key phase mismatch and either there is no locally + // initiated key update or the locally initiated key update was acknowledged by a + // lower-numbered packet. The key phase mismatch must therefore represent a new + // remotely-initiated key update. + crypto_update = true; + &next_crypto.unwrap().remote + }; + + crypto + .decrypt(number, &packet.header_data, &mut packet.payload) + .map_err(|_| { + trace!("decryption failed with packet number {}", number); + None + })?; + + if !packet.reserved_bits_valid() { + return Err(Some(TransportError::PROTOCOL_VIOLATION( + "reserved bits set", + ))); + } + + let mut outgoing_key_update_acked = false; + if let Some(prev) = prev_crypto { + if prev.end_packet.is_none() && packet_key_phase == conn_key_phase { + outgoing_key_update_acked = true; + } + } + + if crypto_update { + // Validate incoming key update + if number <= rx_packet || prev_crypto.is_some_and(|x| x.update_unacked) { + return Err(Some(TransportError::KEY_UPDATE_ERROR(""))); + } + } + + Ok(Some(DecryptPacketResult { + number, + outgoing_key_update_acked, + incoming_key_update: crypto_update, + })) +} + +pub(super) struct DecryptPacketResult { + /// The packet number + pub(super) number: u64, + /// Whether a locally initiated key update has been acknowledged by the peer + pub(super) outgoing_key_update_acked: bool, + /// Whether the peer has initiated a key update + pub(super) incoming_key_update: bool, +} + +pub(super) struct PrevCrypto { + /// The keys used for the previous key phase, temporarily retained to decrypt packets sent by + /// the peer prior to its own key update. + pub(super) crypto: KeyPair>, + /// The incoming packet that ends the interval for which these keys are applicable, and the time + /// of its receipt. + /// + /// Incoming packets should be decrypted using these keys iff this is `None` or their packet + /// number is lower. `None` indicates that we have not yet received a packet using newer keys, + /// which implies that the update was locally initiated. + pub(super) end_packet: Option<(u64, Instant)>, + /// Whether the following key phase is from a remotely initiated update that we haven't acked + pub(super) update_unacked: bool, +} + +pub(super) struct ZeroRttCrypto { + pub(super) header: Box, + pub(super) packet: Box, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/paths.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/paths.rs new file mode 100644 index 0000000000000000000000000000000000000000..70582dfa2a02f98fd643efd42854ebae15694001 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/paths.rs @@ -0,0 +1,456 @@ +use std::{cmp, net::SocketAddr}; + +use tracing::trace; + +use super::{ + mtud::MtuDiscovery, + pacing::Pacer, + spaces::{PacketSpace, SentPacket}, +}; +use crate::{Duration, Instant, TIMER_GRANULARITY, TransportConfig, congestion, packet::SpaceId}; + +#[cfg(feature = "qlog")] +use qlog::events::quic::MetricsUpdated; + +/// Description of a particular network path +pub(super) struct PathData { + pub(super) remote: SocketAddr, + pub(super) rtt: RttEstimator, + /// Whether we're enabling ECN on outgoing packets + pub(super) sending_ecn: bool, + /// Congestion controller state + pub(super) congestion: Box, + /// Pacing state + pub(super) pacing: Pacer, + pub(super) challenge: Option, + pub(super) challenge_pending: bool, + /// Whether we're certain the peer can both send and receive on this address + /// + /// Initially equal to `use_stateless_retry` for servers, and becomes false again on every + /// migration. Always true for clients. + pub(super) validated: bool, + /// Total size of all UDP datagrams sent on this path + pub(super) total_sent: u64, + /// Total size of all UDP datagrams received on this path + pub(super) total_recvd: u64, + /// The state of the MTU discovery process + pub(super) mtud: MtuDiscovery, + /// Packet number of the first packet sent after an RTT sample was collected on this path + /// + /// Used in persistent congestion determination. + pub(super) first_packet_after_rtt_sample: Option<(SpaceId, u64)>, + pub(super) in_flight: InFlight, + /// Number of the first packet sent on this path + /// + /// Used to determine whether a packet was sent on an earlier path. Insufficient to determine if + /// a packet was sent on a later path. + first_packet: Option, + + /// Snapshot of the qlog recovery metrics + #[cfg(feature = "qlog")] + recovery_metrics: RecoveryMetrics, + + /// Tag uniquely identifying a path in a connection + generation: u64, +} + +impl PathData { + pub(super) fn new( + remote: SocketAddr, + allow_mtud: bool, + peer_max_udp_payload_size: Option, + generation: u64, + now: Instant, + config: &TransportConfig, + ) -> Self { + let congestion = config + .congestion_controller_factory + .clone() + .build(now, config.get_initial_mtu()); + Self { + remote, + rtt: RttEstimator::new(config.initial_rtt), + sending_ecn: true, + pacing: Pacer::new( + config.initial_rtt, + congestion.initial_window(), + config.get_initial_mtu(), + now, + ), + congestion, + challenge: None, + challenge_pending: false, + validated: false, + total_sent: 0, + total_recvd: 0, + mtud: config + .mtu_discovery_config + .as_ref() + .filter(|_| allow_mtud) + .map_or( + MtuDiscovery::disabled(config.get_initial_mtu(), config.min_mtu), + |mtud_config| { + MtuDiscovery::new( + config.get_initial_mtu(), + config.min_mtu, + peer_max_udp_payload_size, + mtud_config.clone(), + ) + }, + ), + first_packet_after_rtt_sample: None, + in_flight: InFlight::new(), + first_packet: None, + #[cfg(feature = "qlog")] + recovery_metrics: RecoveryMetrics::default(), + generation, + } + } + + pub(super) fn from_previous( + remote: SocketAddr, + prev: &Self, + generation: u64, + now: Instant, + ) -> Self { + let congestion = prev.congestion.clone_box(); + let smoothed_rtt = prev.rtt.get(); + Self { + remote, + rtt: prev.rtt, + pacing: Pacer::new(smoothed_rtt, congestion.window(), prev.current_mtu(), now), + sending_ecn: true, + congestion, + challenge: None, + challenge_pending: false, + validated: false, + total_sent: 0, + total_recvd: 0, + mtud: prev.mtud.clone(), + first_packet_after_rtt_sample: prev.first_packet_after_rtt_sample, + in_flight: InFlight::new(), + first_packet: None, + #[cfg(feature = "qlog")] + recovery_metrics: prev.recovery_metrics.clone(), + generation, + } + } + + /// Resets RTT, congestion control and MTU states. + /// + /// This is useful when it is known the underlying path has changed. + pub(super) fn reset(&mut self, now: Instant, config: &TransportConfig) { + self.rtt = RttEstimator::new(config.initial_rtt); + self.congestion = config + .congestion_controller_factory + .clone() + .build(now, config.get_initial_mtu()); + self.mtud.reset(config.get_initial_mtu(), config.min_mtu); + } + + /// Indicates whether we're a server that hasn't validated the peer's address and hasn't + /// received enough data from the peer to permit sending `bytes_to_send` additional bytes + pub(super) fn anti_amplification_blocked(&self, bytes_to_send: u64) -> bool { + !self.validated && self.total_recvd * 3 < self.total_sent + bytes_to_send + } + + /// Returns the path's current MTU + pub(super) fn current_mtu(&self) -> u16 { + self.mtud.current_mtu() + } + + /// Account for transmission of `packet` with number `pn` in `space` + pub(super) fn sent(&mut self, pn: u64, packet: SentPacket, space: &mut PacketSpace) { + self.in_flight.insert(&packet); + if self.first_packet.is_none() { + self.first_packet = Some(pn); + } + if let Some(forgotten) = space.sent(pn, packet) { + self.remove_in_flight(&forgotten); + } + } + + /// Remove `packet` with number `pn` from this path's congestion control counters, or return + /// `false` if `pn` was sent before this path was established. + pub(super) fn remove_in_flight(&mut self, packet: &SentPacket) -> bool { + if packet.path_generation != self.generation { + return false; + } + self.in_flight.remove(packet); + true + } + + #[cfg(feature = "qlog")] + pub(super) fn qlog_recovery_metrics(&mut self, pto_count: u32) -> Option { + let controller_metrics = self.congestion.metrics(); + + let metrics = RecoveryMetrics { + min_rtt: Some(self.rtt.min), + smoothed_rtt: Some(self.rtt.get()), + latest_rtt: Some(self.rtt.latest), + rtt_variance: Some(self.rtt.var), + pto_count: Some(pto_count), + bytes_in_flight: Some(self.in_flight.bytes), + packets_in_flight: Some(self.in_flight.ack_eliciting), + + congestion_window: Some(controller_metrics.congestion_window), + ssthresh: controller_metrics.ssthresh, + pacing_rate: controller_metrics.pacing_rate, + }; + + let event = metrics.to_qlog_event(&self.recovery_metrics); + self.recovery_metrics = metrics; + event + } + + pub(super) fn generation(&self) -> u64 { + self.generation + } +} + +/// Congestion metrics as described in [`recovery_metrics_updated`]. +/// +/// [`recovery_metrics_updated`]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-quic-events.html#name-recovery_metrics_updated +#[cfg(feature = "qlog")] +#[derive(Default, Clone, PartialEq)] +#[non_exhaustive] +struct RecoveryMetrics { + pub min_rtt: Option, + pub smoothed_rtt: Option, + pub latest_rtt: Option, + pub rtt_variance: Option, + pub pto_count: Option, + pub bytes_in_flight: Option, + pub packets_in_flight: Option, + pub congestion_window: Option, + pub ssthresh: Option, + pub pacing_rate: Option, +} + +#[cfg(feature = "qlog")] +impl RecoveryMetrics { + /// Retain only values that have been updated since the last snapshot. + fn retain_updated(&self, previous: &Self) -> Self { + macro_rules! keep_if_changed { + ($name:ident) => { + if previous.$name == self.$name { + None + } else { + self.$name + } + }; + } + + Self { + min_rtt: keep_if_changed!(min_rtt), + smoothed_rtt: keep_if_changed!(smoothed_rtt), + latest_rtt: keep_if_changed!(latest_rtt), + rtt_variance: keep_if_changed!(rtt_variance), + pto_count: keep_if_changed!(pto_count), + bytes_in_flight: keep_if_changed!(bytes_in_flight), + packets_in_flight: keep_if_changed!(packets_in_flight), + congestion_window: keep_if_changed!(congestion_window), + ssthresh: keep_if_changed!(ssthresh), + pacing_rate: keep_if_changed!(pacing_rate), + } + } + + /// Emit a `MetricsUpdated` event containing only updated values + fn to_qlog_event(&self, previous: &Self) -> Option { + let updated = self.retain_updated(previous); + + if updated == Self::default() { + return None; + } + + Some(MetricsUpdated { + min_rtt: updated.min_rtt.map(|rtt| rtt.as_secs_f32()), + smoothed_rtt: updated.smoothed_rtt.map(|rtt| rtt.as_secs_f32()), + latest_rtt: updated.latest_rtt.map(|rtt| rtt.as_secs_f32()), + rtt_variance: updated.rtt_variance.map(|rtt| rtt.as_secs_f32()), + pto_count: updated + .pto_count + .map(|count| count.try_into().unwrap_or(u16::MAX)), + bytes_in_flight: updated.bytes_in_flight, + packets_in_flight: updated.packets_in_flight, + congestion_window: updated.congestion_window, + ssthresh: updated.ssthresh, + pacing_rate: updated.pacing_rate, + }) + } +} + +/// RTT estimation for a particular network path +#[derive(Copy, Clone)] +pub struct RttEstimator { + /// The most recent RTT measurement made when receiving an ack for a previously unacked packet + latest: Duration, + /// The smoothed RTT of the connection, computed as described in RFC6298 + smoothed: Option, + /// The RTT variance, computed as described in RFC6298 + var: Duration, + /// The minimum RTT seen in the connection, ignoring ack delay. + min: Duration, +} + +impl RttEstimator { + fn new(initial_rtt: Duration) -> Self { + Self { + latest: initial_rtt, + smoothed: None, + var: initial_rtt / 2, + min: initial_rtt, + } + } + + /// The current best RTT estimation. + pub fn get(&self) -> Duration { + self.smoothed.unwrap_or(self.latest) + } + + /// Conservative estimate of RTT + /// + /// Takes the maximum of smoothed and latest RTT, as recommended + /// in 6.1.2 of the recovery spec (draft 29). + pub fn conservative(&self) -> Duration { + self.get().max(self.latest) + } + + /// Minimum RTT registered so far for this estimator. + pub fn min(&self) -> Duration { + self.min + } + + // PTO computed as described in RFC9002#6.2.1 + pub(crate) fn pto_base(&self) -> Duration { + self.get() + cmp::max(4 * self.var, TIMER_GRANULARITY) + } + + pub(crate) fn update(&mut self, ack_delay: Duration, rtt: Duration) { + self.latest = rtt; + // min_rtt ignores ack delay. + self.min = cmp::min(self.min, self.latest); + // Based on RFC6298. + if let Some(smoothed) = self.smoothed { + let adjusted_rtt = if self.min + ack_delay <= self.latest { + self.latest - ack_delay + } else { + self.latest + }; + let var_sample = if smoothed > adjusted_rtt { + smoothed - adjusted_rtt + } else { + adjusted_rtt - smoothed + }; + self.var = (3 * self.var + var_sample) / 4; + self.smoothed = Some((7 * smoothed + adjusted_rtt) / 8); + } else { + self.smoothed = Some(self.latest); + self.var = self.latest / 2; + self.min = self.latest; + } + } +} + +#[derive(Default)] +pub(crate) struct PathResponses { + pending: Vec, +} + +impl PathResponses { + pub(crate) fn push(&mut self, packet: u64, token: u64, remote: SocketAddr) { + /// Arbitrary permissive limit to prevent abuse + const MAX_PATH_RESPONSES: usize = 16; + let response = PathResponse { + packet, + token, + remote, + }; + let existing = self.pending.iter_mut().find(|x| x.remote == remote); + if let Some(existing) = existing { + // Update a queued response + if existing.packet <= packet { + *existing = response; + } + return; + } + if self.pending.len() < MAX_PATH_RESPONSES { + self.pending.push(response); + } else { + // We don't expect to ever hit this with well-behaved peers, so we don't bother dropping + // older challenges. + trace!("ignoring excessive PATH_CHALLENGE"); + } + } + + pub(crate) fn pop_off_path(&mut self, remote: SocketAddr) -> Option<(u64, SocketAddr)> { + let response = *self.pending.last()?; + if response.remote == remote { + // We don't bother searching further because we expect that the on-path response will + // get drained in the immediate future by a call to `pop_on_path` + return None; + } + self.pending.pop(); + Some((response.token, response.remote)) + } + + pub(crate) fn pop_on_path(&mut self, remote: SocketAddr) -> Option { + let response = *self.pending.last()?; + if response.remote != remote { + // We don't bother searching further because we expect that the off-path response will + // get drained in the immediate future by a call to `pop_off_path` + return None; + } + self.pending.pop(); + Some(response.token) + } + + pub(crate) fn is_empty(&self) -> bool { + self.pending.is_empty() + } +} + +#[derive(Copy, Clone)] +struct PathResponse { + /// The packet number the corresponding PATH_CHALLENGE was received in + packet: u64, + token: u64, + /// The address the corresponding PATH_CHALLENGE was received from + remote: SocketAddr, +} + +/// Summary statistics of packets that have been sent on a particular path, but which have not yet +/// been acked or deemed lost +pub(super) struct InFlight { + /// Sum of the sizes of all sent packets considered "in flight" by congestion control + /// + /// The size does not include IP or UDP overhead. Packets only containing ACK frames do not + /// count towards this to ensure congestion control does not impede congestion feedback. + pub(super) bytes: u64, + /// Number of packets in flight containing frames other than ACK and PADDING + /// + /// This can be 0 even when bytes is not 0 because PADDING frames cause a packet to be + /// considered "in flight" by congestion control. However, if this is nonzero, bytes will always + /// also be nonzero. + pub(super) ack_eliciting: u64, +} + +impl InFlight { + fn new() -> Self { + Self { + bytes: 0, + ack_eliciting: 0, + } + } + + fn insert(&mut self, packet: &SentPacket) { + self.bytes += u64::from(packet.size); + self.ack_eliciting += u64::from(packet.ack_eliciting); + } + + /// Update counters to account for a packet becoming acknowledged, lost, or abandoned + fn remove(&mut self, packet: &SentPacket) { + self.bytes -= u64::from(packet.size); + self.ack_eliciting -= u64::from(packet.ack_eliciting); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/qlog.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/qlog.rs new file mode 100644 index 0000000000000000000000000000000000000000..a324746f13a9aaf9edcb352fe94c00e647ee9e0a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/qlog.rs @@ -0,0 +1,190 @@ +// Function bodies in this module are regularly cfg'd out +#![allow(unused_variables)] + +#[cfg(feature = "qlog")] +use std::sync::{Arc, Mutex}; + +#[cfg(feature = "qlog")] +use qlog::{ + events::{ + Event, EventData, + quic::{ + PacketHeader, PacketLost, PacketLostTrigger, PacketReceived, PacketSent, PacketType, + }, + }, + streamer::QlogStreamer, +}; +#[cfg(feature = "qlog")] +use tracing::warn; + +use crate::{ + ConnectionId, Instant, + connection::{PathData, SentPacket}, + packet::SpaceId, +}; + +/// Shareable handle to a single qlog output stream +#[cfg(feature = "qlog")] +#[derive(Clone)] +pub struct QlogStream(pub(crate) Arc>); + +#[cfg(feature = "qlog")] +impl QlogStream { + fn emit_event(&self, orig_rem_cid: ConnectionId, event: EventData, now: Instant) { + // Time will be overwritten by `add_event_with_instant` + let mut event = Event::with_time(0.0, event); + event.group_id = Some(orig_rem_cid.to_string()); + + let mut qlog_streamer = self.0.lock().unwrap(); + if let Err(e) = qlog_streamer.add_event_with_instant(event, now) { + warn!("could not emit qlog event: {e}"); + } + } +} + +/// A [`QlogStream`] that may be either dynamically disabled or compiled out entirely +#[derive(Clone, Default)] +pub(crate) struct QlogSink { + #[cfg(feature = "qlog")] + stream: Option, +} + +impl QlogSink { + pub(crate) fn is_enabled(&self) -> bool { + #[cfg(feature = "qlog")] + { + self.stream.is_some() + } + #[cfg(not(feature = "qlog"))] + { + false + } + } + + pub(super) fn emit_recovery_metrics( + &self, + pto_count: u32, + path: &mut PathData, + now: Instant, + orig_rem_cid: ConnectionId, + ) { + #[cfg(feature = "qlog")] + { + let Some(stream) = self.stream.as_ref() else { + return; + }; + + let Some(metrics) = path.qlog_recovery_metrics(pto_count) else { + return; + }; + + stream.emit_event(orig_rem_cid, EventData::MetricsUpdated(metrics), now); + } + } + + pub(super) fn emit_packet_lost( + &self, + pn: u64, + info: &SentPacket, + lost_send_time: Instant, + space: SpaceId, + now: Instant, + orig_rem_cid: ConnectionId, + ) { + #[cfg(feature = "qlog")] + { + let Some(stream) = self.stream.as_ref() else { + return; + }; + + let event = PacketLost { + header: Some(PacketHeader { + packet_number: Some(pn), + packet_type: packet_type(space, false), + length: Some(info.size), + ..Default::default() + }), + frames: None, + trigger: Some(match info.time_sent <= lost_send_time { + true => PacketLostTrigger::TimeThreshold, + false => PacketLostTrigger::ReorderingThreshold, + }), + }; + + stream.emit_event(orig_rem_cid, EventData::PacketLost(event), now); + } + } + + pub(super) fn emit_packet_sent( + &self, + pn: u64, + len: usize, + space: SpaceId, + is_0rtt: bool, + now: Instant, + orig_rem_cid: ConnectionId, + ) { + #[cfg(feature = "qlog")] + { + let Some(stream) = self.stream.as_ref() else { + return; + }; + + let event = PacketSent { + header: PacketHeader { + packet_number: Some(pn), + packet_type: packet_type(space, is_0rtt), + length: Some(len as u16), + ..Default::default() + }, + ..Default::default() + }; + + stream.emit_event(orig_rem_cid, EventData::PacketSent(event), now); + } + } + + pub(super) fn emit_packet_received( + &self, + pn: u64, + space: SpaceId, + is_0rtt: bool, + now: Instant, + orig_rem_cid: ConnectionId, + ) { + #[cfg(feature = "qlog")] + { + let Some(stream) = self.stream.as_ref() else { + return; + }; + + let event = PacketReceived { + header: PacketHeader { + packet_number: Some(pn), + packet_type: packet_type(space, is_0rtt), + ..Default::default() + }, + ..Default::default() + }; + + stream.emit_event(orig_rem_cid, EventData::PacketReceived(event), now); + } + } +} + +#[cfg(feature = "qlog")] +impl From> for QlogSink { + fn from(stream: Option) -> Self { + Self { stream } + } +} + +#[cfg(feature = "qlog")] +fn packet_type(space: SpaceId, is_0rtt: bool) -> PacketType { + match space { + SpaceId::Initial => PacketType::Initial, + SpaceId::Handshake => PacketType::Handshake, + SpaceId::Data if is_0rtt => PacketType::ZeroRtt, + SpaceId::Data => PacketType::OneRtt, + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/send_buffer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/send_buffer.rs new file mode 100644 index 0000000000000000000000000000000000000000..53a7416efcdd5ed3060acdf15bfcabf5e85d11f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/send_buffer.rs @@ -0,0 +1,394 @@ +use std::{collections::VecDeque, ops::Range}; + +use bytes::{Buf, Bytes}; + +use crate::{VarInt, range_set::RangeSet}; + +/// Buffer of outgoing retransmittable stream data +#[derive(Default, Debug)] +pub(super) struct SendBuffer { + /// Data queued by the application but not yet acknowledged. May or may not have been sent. + unacked_segments: VecDeque, + /// Total size of `unacked_segments` + unacked_len: usize, + /// The first offset that hasn't been written by the application, i.e. the offset past the end of `unacked` + offset: u64, + /// The first offset that hasn't been sent + /// + /// Always lies in (offset - unacked.len())..offset + unsent: u64, + /// Acknowledged ranges which couldn't be discarded yet as they don't include the earliest + /// offset in `unacked` + // TODO: Recover storage from these by compacting (#700) + acks: RangeSet, + /// Previously transmitted ranges deemed lost + retransmits: RangeSet, +} + +impl SendBuffer { + /// Construct an empty buffer at the initial offset + pub(super) fn new() -> Self { + Self::default() + } + + /// Append application data to the end of the stream + pub(super) fn write(&mut self, data: Bytes) { + self.unacked_len += data.len(); + self.offset += data.len() as u64; + self.unacked_segments.push_back(data); + } + + /// Discard a range of acknowledged stream data + pub(super) fn ack(&mut self, mut range: Range) { + // Clamp the range to data which is still tracked + let base_offset = self.offset - self.unacked_len as u64; + range.start = base_offset.max(range.start); + range.end = base_offset.max(range.end); + + self.acks.insert(range); + + while self.acks.min() == Some(self.offset - self.unacked_len as u64) { + let prefix = self.acks.pop_min().unwrap(); + let mut to_advance = (prefix.end - prefix.start) as usize; + + self.unacked_len -= to_advance; + while to_advance > 0 { + let front = self + .unacked_segments + .front_mut() + .expect("Expected buffered data"); + + if front.len() <= to_advance { + to_advance -= front.len(); + self.unacked_segments.pop_front(); + + if self.unacked_segments.len() * 4 < self.unacked_segments.capacity() { + self.unacked_segments.shrink_to_fit(); + } + } else { + front.advance(to_advance); + to_advance = 0; + } + } + } + } + + /// Compute the next range to transmit on this stream and update state to account for that + /// transmission. + /// + /// `max_len` here includes the space which is available to transmit the + /// offset and length of the data to send. The caller has to guarantee that + /// there is at least enough space available to write maximum-sized metadata + /// (8 byte offset + 8 byte length). + /// + /// The method returns a tuple: + /// - The first return value indicates the range of data to send + /// - The second return value indicates whether the length needs to be encoded + /// in the STREAM frames metadata (`true`), or whether it can be omitted + /// since the selected range will fill the whole packet. + pub(super) fn poll_transmit(&mut self, mut max_len: usize) -> (Range, bool) { + debug_assert!(max_len >= 8 + 8); + let mut encode_length = false; + + if let Some(range) = self.retransmits.pop_min() { + // Retransmit sent data + + // When the offset is known, we know how many bytes are required to encode it. + // Offset 0 requires no space + if range.start != 0 { + max_len -= VarInt::size(unsafe { VarInt::from_u64_unchecked(range.start) }); + } + if range.end - range.start < max_len as u64 { + encode_length = true; + max_len -= 8; + } + + let end = range.end.min((max_len as u64).saturating_add(range.start)); + if end != range.end { + self.retransmits.insert(end..range.end); + } + return (range.start..end, encode_length); + } + + // Transmit new data + + // When the offset is known, we know how many bytes are required to encode it. + // Offset 0 requires no space + if self.unsent != 0 { + max_len -= VarInt::size(unsafe { VarInt::from_u64_unchecked(self.unsent) }); + } + if self.offset - self.unsent < max_len as u64 { + encode_length = true; + max_len -= 8; + } + + let end = self + .offset + .min((max_len as u64).saturating_add(self.unsent)); + let result = self.unsent..end; + self.unsent = end; + (result, encode_length) + } + + /// Returns data which is associated with a range + /// + /// This function can return a subset of the range, if the data is stored + /// in noncontiguous fashion in the send buffer. In this case callers + /// should call the function again with an incremented start offset to + /// retrieve more data. + pub(super) fn get(&self, offsets: Range) -> &[u8] { + let base_offset = self.offset - self.unacked_len as u64; + + let mut segment_offset = base_offset; + for segment in self.unacked_segments.iter() { + if offsets.start >= segment_offset + && offsets.start < segment_offset + segment.len() as u64 + { + let start = (offsets.start - segment_offset) as usize; + let end = (offsets.end - segment_offset) as usize; + + return &segment[start..end.min(segment.len())]; + } + segment_offset += segment.len() as u64; + } + + &[] + } + + /// Queue a range of sent but unacknowledged data to be retransmitted + pub(super) fn retransmit(&mut self, range: Range) { + debug_assert!(range.end <= self.unsent, "unsent data can't be lost"); + self.retransmits.insert(range); + } + + pub(super) fn retransmit_all_for_0rtt(&mut self) { + debug_assert_eq!(self.offset, self.unacked_len as u64); + self.unsent = 0; + } + + /// First stream offset unwritten by the application, i.e. the offset that the next write will + /// begin at + pub(super) fn offset(&self) -> u64 { + self.offset + } + + /// Whether all sent data has been acknowledged + pub(super) fn is_fully_acked(&self) -> bool { + self.unacked_len == 0 + } + + /// Whether there's data to send + /// + /// There may be sent unacknowledged data even when this is false. + pub(super) fn has_unsent_data(&self) -> bool { + self.unsent != self.offset || !self.retransmits.is_empty() + } + + /// Compute the amount of data that hasn't been acknowledged + pub(super) fn unacked(&self) -> u64 { + self.unacked_len as u64 - self.acks.iter().map(|x| x.end - x.start).sum::() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fragment_with_length() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world!"; + buf.write(MSG.into()); + // 0 byte offset => 19 bytes left => 13 byte data isn't enough + // with 8 bytes reserved for length 11 payload bytes will fit + assert_eq!(buf.poll_transmit(19), (0..11, true)); + assert_eq!( + buf.poll_transmit(MSG.len() + 16 - 11), + (11..MSG.len() as u64, true) + ); + assert_eq!( + buf.poll_transmit(58), + (MSG.len() as u64..MSG.len() as u64, true) + ); + } + + #[test] + fn fragment_without_length() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world with some extra data!"; + buf.write(MSG.into()); + // 0 byte offset => 19 bytes left => can be filled by 34 bytes payload + assert_eq!(buf.poll_transmit(19), (0..19, false)); + assert_eq!( + buf.poll_transmit(MSG.len() - 19 + 1), + (19..MSG.len() as u64, false) + ); + assert_eq!( + buf.poll_transmit(58), + (MSG.len() as u64..MSG.len() as u64, true) + ); + } + + #[test] + fn reserves_encoded_offset() { + let mut buf = SendBuffer::new(); + + // Pretend we have more than 1 GB of data in the buffer + let chunk: Bytes = Bytes::from_static(&[0; 1024 * 1024]); + for _ in 0..1025 { + buf.write(chunk.clone()); + } + + const SIZE1: u64 = 64; + const SIZE2: u64 = 16 * 1024; + const SIZE3: u64 = 1024 * 1024 * 1024; + + // Offset 0 requires no space + assert_eq!(buf.poll_transmit(16), (0..16, false)); + buf.retransmit(0..16); + assert_eq!(buf.poll_transmit(16), (0..16, false)); + let mut transmitted = 16u64; + + // Offset 16 requires 1 byte + assert_eq!( + buf.poll_transmit((SIZE1 - transmitted + 1) as usize), + (transmitted..SIZE1, false) + ); + buf.retransmit(transmitted..SIZE1); + assert_eq!( + buf.poll_transmit((SIZE1 - transmitted + 1) as usize), + (transmitted..SIZE1, false) + ); + transmitted = SIZE1; + + // Offset 64 requires 2 bytes + assert_eq!( + buf.poll_transmit((SIZE2 - transmitted + 2) as usize), + (transmitted..SIZE2, false) + ); + buf.retransmit(transmitted..SIZE2); + assert_eq!( + buf.poll_transmit((SIZE2 - transmitted + 2) as usize), + (transmitted..SIZE2, false) + ); + transmitted = SIZE2; + + // Offset 16384 requires requires 4 bytes + assert_eq!( + buf.poll_transmit((SIZE3 - transmitted + 4) as usize), + (transmitted..SIZE3, false) + ); + buf.retransmit(transmitted..SIZE3); + assert_eq!( + buf.poll_transmit((SIZE3 - transmitted + 4) as usize), + (transmitted..SIZE3, false) + ); + transmitted = SIZE3; + + // Offset 1GB requires 8 bytes + assert_eq!( + buf.poll_transmit(chunk.len() + 8), + (transmitted..transmitted + chunk.len() as u64, false) + ); + buf.retransmit(transmitted..transmitted + chunk.len() as u64); + assert_eq!( + buf.poll_transmit(chunk.len() + 8), + (transmitted..transmitted + chunk.len() as u64, false) + ); + } + + #[test] + fn multiple_segments() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world!"; + const MSG_LEN: u64 = MSG.len() as u64; + + const SEG1: &[u8] = b"He"; + buf.write(SEG1.into()); + const SEG2: &[u8] = b"llo,"; + buf.write(SEG2.into()); + const SEG3: &[u8] = b" w"; + buf.write(SEG3.into()); + const SEG4: &[u8] = b"o"; + buf.write(SEG4.into()); + const SEG5: &[u8] = b"rld!"; + buf.write(SEG5.into()); + + assert_eq!(aggregate_unacked(&buf), MSG); + + assert_eq!(buf.poll_transmit(16), (0..8, true)); + assert_eq!(buf.get(0..5), SEG1); + assert_eq!(buf.get(2..8), SEG2); + assert_eq!(buf.get(6..8), SEG3); + + assert_eq!(buf.poll_transmit(16), (8..MSG_LEN, true)); + assert_eq!(buf.get(8..MSG_LEN), SEG4); + assert_eq!(buf.get(9..MSG_LEN), SEG5); + + assert_eq!(buf.poll_transmit(42), (MSG_LEN..MSG_LEN, true)); + + // Now drain the segments + buf.ack(0..1); + assert_eq!(aggregate_unacked(&buf), &MSG[1..]); + buf.ack(0..3); + assert_eq!(aggregate_unacked(&buf), &MSG[3..]); + buf.ack(3..5); + assert_eq!(aggregate_unacked(&buf), &MSG[5..]); + buf.ack(7..9); + assert_eq!(aggregate_unacked(&buf), &MSG[5..]); + buf.ack(4..7); + assert_eq!(aggregate_unacked(&buf), &MSG[9..]); + buf.ack(0..MSG_LEN); + assert_eq!(aggregate_unacked(&buf), &[] as &[u8]); + } + + #[test] + fn retransmit() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world with extra data!"; + buf.write(MSG.into()); + // Transmit two frames + assert_eq!(buf.poll_transmit(16), (0..16, false)); + assert_eq!(buf.poll_transmit(16), (16..23, true)); + // Lose the first, but not the second + buf.retransmit(0..16); + // Ensure we only retransmit the lost frame, then continue sending fresh data + assert_eq!(buf.poll_transmit(16), (0..16, false)); + assert_eq!(buf.poll_transmit(16), (23..MSG.len() as u64, true)); + // Lose the second frame + buf.retransmit(16..23); + assert_eq!(buf.poll_transmit(16), (16..23, true)); + } + + #[test] + fn ack() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world!"; + buf.write(MSG.into()); + assert_eq!(buf.poll_transmit(16), (0..8, true)); + buf.ack(0..8); + assert_eq!(aggregate_unacked(&buf), &MSG[8..]); + } + + #[test] + fn reordered_ack() { + let mut buf = SendBuffer::new(); + const MSG: &[u8] = b"Hello, world with extra data!"; + buf.write(MSG.into()); + assert_eq!(buf.poll_transmit(16), (0..16, false)); + assert_eq!(buf.poll_transmit(16), (16..23, true)); + buf.ack(16..23); + assert_eq!(aggregate_unacked(&buf), MSG); + buf.ack(0..16); + assert_eq!(aggregate_unacked(&buf), &MSG[23..]); + assert!(buf.acks.is_empty()); + } + + fn aggregate_unacked(buf: &SendBuffer) -> Vec { + let mut result = Vec::new(); + for segment in buf.unacked_segments.iter() { + result.extend_from_slice(&segment[..]); + } + result + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/spaces.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/spaces.rs new file mode 100644 index 0000000000000000000000000000000000000000..8282f70b98e011f5a18957faa00abada131d6cfe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/spaces.rs @@ -0,0 +1,1088 @@ +use std::{ + cmp, + collections::{BTreeMap, VecDeque}, + mem, + ops::{Bound, Index, IndexMut}, +}; + +use rand::Rng; +use rustc_hash::FxHashSet; +use tracing::trace; + +use super::assembler::Assembler; +use crate::{ + Dir, Duration, Instant, SocketAddr, StreamId, TransportError, VarInt, connection::StreamsState, + crypto::Keys, frame, packet::SpaceId, range_set::ArrayRangeSet, shared::IssuedCid, +}; + +pub(super) struct PacketSpace { + pub(super) crypto: Option, + pub(super) dedup: Dedup, + /// Highest received packet number + pub(super) rx_packet: u64, + + /// Data to send + pub(super) pending: Retransmits, + /// Packet numbers to acknowledge + pub(super) pending_acks: PendingAcks, + + /// The packet number of the next packet that will be sent, if any. In the Data space, the + /// packet number stored here is sometimes skipped by [`PacketNumberFilter`] logic. + pub(super) next_packet_number: u64, + /// The largest packet number the remote peer acknowledged in an ACK frame. + pub(super) largest_acked_packet: Option, + pub(super) largest_acked_packet_sent: Instant, + /// The highest-numbered ACK-eliciting packet we've sent + pub(super) largest_ack_eliciting_sent: u64, + /// Number of packets in `sent_packets` with numbers above `largest_ack_eliciting_sent` + pub(super) unacked_non_ack_eliciting_tail: u64, + /// Transmitted but not acked + // We use a BTreeMap here so we can efficiently query by range on ACK and for loss detection + pub(super) sent_packets: BTreeMap, + /// Number of explicit congestion notification codepoints seen on incoming packets + pub(super) ecn_counters: frame::EcnCounts, + /// Recent ECN counters sent by the peer in ACK frames + /// + /// Updated (and inspected) whenever we receive an ACK with a new highest acked packet + /// number. Stored per-space to simplify verification, which would otherwise have difficulty + /// distinguishing between ECN bleaching and counts having been updated by a near-simultaneous + /// ACK already processed in another space. + pub(super) ecn_feedback: frame::EcnCounts, + + /// Incoming cryptographic handshake stream + pub(super) crypto_stream: Assembler, + /// Current offset of outgoing cryptographic handshake stream + pub(super) crypto_offset: u64, + + /// The time the most recently sent retransmittable packet was sent. + pub(super) time_of_last_ack_eliciting_packet: Option, + /// The time at which the earliest sent packet in this space will be considered lost based on + /// exceeding the reordering window in time. Only set for packets numbered prior to a packet + /// that has been acknowledged. + pub(super) loss_time: Option, + /// Number of tail loss probes to send + pub(super) loss_probes: u32, + pub(super) ping_pending: bool, + pub(super) immediate_ack_pending: bool, + /// Number of packets sent in the current key phase + pub(super) sent_with_keys: u64, +} + +impl PacketSpace { + pub(super) fn new(now: Instant) -> Self { + Self { + crypto: None, + dedup: Dedup::new(), + rx_packet: 0, + + pending: Retransmits::default(), + pending_acks: PendingAcks::new(), + + next_packet_number: 0, + largest_acked_packet: None, + largest_acked_packet_sent: now, + largest_ack_eliciting_sent: 0, + unacked_non_ack_eliciting_tail: 0, + sent_packets: BTreeMap::new(), + ecn_counters: frame::EcnCounts::ZERO, + ecn_feedback: frame::EcnCounts::ZERO, + + crypto_stream: Assembler::new(), + crypto_offset: 0, + + time_of_last_ack_eliciting_packet: None, + loss_time: None, + loss_probes: 0, + ping_pending: false, + immediate_ack_pending: false, + sent_with_keys: 0, + } + } + + /// Queue data for a tail loss probe (or anti-amplification deadlock prevention) packet + /// + /// Probes are sent similarly to normal packets when an expected ACK has not arrived. We never + /// deem a packet lost until we receive an ACK that should have included it, but if a trailing + /// run of packets (or their ACKs) are lost, this might not happen in a timely fashion. We send + /// probe packets to force an ACK, and exempt them from congestion control to prevent a deadlock + /// when the congestion window is filled with lost tail packets. + /// + /// We prefer to send new data, to make the most efficient use of bandwidth. If there's no data + /// waiting to be sent, then we retransmit in-flight data to reduce odds of loss. If there's no + /// in-flight data either, we're probably a client guarding against a handshake + /// anti-amplification deadlock and we just make something up. + pub(super) fn maybe_queue_probe( + &mut self, + request_immediate_ack: bool, + streams: &StreamsState, + ) { + if self.loss_probes == 0 { + return; + } + + if request_immediate_ack { + // The probe should be ACKed without delay (should only be used in the Data space and + // when the peer supports the acknowledgement frequency extension) + self.immediate_ack_pending = true; + } + + if !self.pending.is_empty(streams) { + // There's real data to send here, no need to make something up + return; + } + + // Retransmit the data of the oldest in-flight packet + for packet in self.sent_packets.values_mut() { + if !packet.retransmits.is_empty(streams) { + // Remove retransmitted data from the old packet so we don't end up retransmitting + // it *again* even if the copy we're sending now gets acknowledged. + self.pending |= mem::take(&mut packet.retransmits); + return; + } + } + + // Nothing new to send and nothing to retransmit, so fall back on a ping. This should only + // happen in rare cases during the handshake when the server becomes blocked by + // anti-amplification. + if !self.immediate_ack_pending { + self.ping_pending = true; + } + } + + /// Get the next outgoing packet number in this space + /// + /// In the Data space, the connection's [`PacketNumberFilter`] must be used rather than calling + /// this directly. + pub(super) fn get_tx_number(&mut self) -> u64 { + // TODO: Handle packet number overflow gracefully + assert!(self.next_packet_number < 2u64.pow(62)); + let x = self.next_packet_number; + self.next_packet_number += 1; + self.sent_with_keys += 1; + x + } + + pub(super) fn can_send(&self, streams: &StreamsState) -> SendableFrames { + let acks = self.pending_acks.can_send(); + let other = + !self.pending.is_empty(streams) || self.ping_pending || self.immediate_ack_pending; + + SendableFrames { acks, other } + } + + /// Verifies sanity of an ECN block and returns whether congestion was encountered. + pub(super) fn detect_ecn( + &mut self, + newly_acked: u64, + ecn: frame::EcnCounts, + ) -> Result { + let ect0_increase = ecn + .ect0 + .checked_sub(self.ecn_feedback.ect0) + .ok_or("peer ECT(0) count regression")?; + let ect1_increase = ecn + .ect1 + .checked_sub(self.ecn_feedback.ect1) + .ok_or("peer ECT(1) count regression")?; + let ce_increase = ecn + .ce + .checked_sub(self.ecn_feedback.ce) + .ok_or("peer CE count regression")?; + let total_increase = ect0_increase + ect1_increase + ce_increase; + if total_increase < newly_acked { + return Err("ECN bleaching"); + } + if (ect0_increase + ce_increase) < newly_acked || ect1_increase != 0 { + return Err("ECN corruption"); + } + // If total_increase > newly_acked (which happens when ACKs are lost), this is required by + // the draft so that long-term drift does not occur. If =, then the only question is whether + // to count CE packets as CE or ECT0. Recording them as CE is more consistent and keeps the + // congestion check obvious. + self.ecn_feedback = ecn; + Ok(ce_increase != 0) + } + + /// Stop tracking sent packet `number`, and return what we knew about it + pub(super) fn take(&mut self, number: u64) -> Option { + let packet = self.sent_packets.remove(&number)?; + if !packet.ack_eliciting && number > self.largest_ack_eliciting_sent { + self.unacked_non_ack_eliciting_tail = + self.unacked_non_ack_eliciting_tail.checked_sub(1).unwrap(); + } + Some(packet) + } + + /// May return a packet that should be forgotten + pub(super) fn sent(&mut self, number: u64, packet: SentPacket) -> Option { + // Retain state for at most this many non-ACK-eliciting packets sent after the most recently + // sent ACK-eliciting packet. We're never guaranteed to receive an ACK for those, and we + // can't judge them as lost without an ACK, so to limit memory in applications which receive + // packets but don't send ACK-eliciting data for long periods use we must eventually start + // forgetting about them, although it might also be reasonable to just kill the connection + // due to weird peer behavior. + const MAX_UNACKED_NON_ACK_ELICTING_TAIL: u64 = 1_000; + + let mut forgotten = None; + if packet.ack_eliciting { + self.unacked_non_ack_eliciting_tail = 0; + self.largest_ack_eliciting_sent = number; + } else if self.unacked_non_ack_eliciting_tail > MAX_UNACKED_NON_ACK_ELICTING_TAIL { + let oldest_after_ack_eliciting = *self + .sent_packets + .range(( + Bound::Excluded(self.largest_ack_eliciting_sent), + Bound::Unbounded, + )) + .next() + .unwrap() + .0; + // Per https://www.rfc-editor.org/rfc/rfc9000.html#name-frames-and-frame-types, + // non-ACK-eliciting packets must only contain PADDING, ACK, and CONNECTION_CLOSE + // frames, which require no special handling on ACK or loss beyond removal from + // in-flight counters if padded. + let packet = self + .sent_packets + .remove(&oldest_after_ack_eliciting) + .unwrap(); + debug_assert!(!packet.ack_eliciting); + forgotten = Some(packet); + } else { + self.unacked_non_ack_eliciting_tail += 1; + } + + self.sent_packets.insert(number, packet); + forgotten + } + + /// Whether any congestion-controlled packets in this space are not yet acknowledged or lost + pub(super) fn has_in_flight(&self) -> bool { + // The number of non-congestion-controlled (i.e. size == 0) packets in flight at a time + // should be small, since otherwise congestion control wouldn't be effective. Therefore, + // this shouldn't need to visit many packets before finishing one way or another. + self.sent_packets.values().any(|x| x.size != 0) + } +} + +impl Index for [PacketSpace; 3] { + type Output = PacketSpace; + fn index(&self, space: SpaceId) -> &PacketSpace { + &self.as_ref()[space as usize] + } +} + +impl IndexMut for [PacketSpace; 3] { + fn index_mut(&mut self, space: SpaceId) -> &mut PacketSpace { + &mut self.as_mut()[space as usize] + } +} + +/// Represents one or more packets subject to retransmission +#[derive(Debug, Clone)] +pub(super) struct SentPacket { + /// [`PathData::generation`](super::PathData::generation) of the path on which this packet was sent + pub(super) path_generation: u64, + /// The time the packet was sent. + pub(super) time_sent: Instant, + /// The number of bytes sent in the packet, not including UDP or IP overhead, but including QUIC + /// framing overhead. Zero if this packet is not counted towards congestion control, i.e. not an + /// "in flight" packet. + pub(super) size: u16, + /// Whether an acknowledgement is expected directly in response to this packet. + pub(super) ack_eliciting: bool, + /// The largest packet number acknowledged by this packet + pub(super) largest_acked: Option, + /// Data which needs to be retransmitted in case the packet is lost. + /// The data is boxed to minimize `SentPacket` size for the typical case of + /// packets only containing ACKs and STREAM frames. + pub(super) retransmits: ThinRetransmits, + /// Metadata for stream frames in a packet + /// + /// The actual application data is stored with the stream state. + pub(super) stream_frames: frame::StreamMetaVec, +} + +/// Retransmittable data queue +#[allow(unreachable_pub)] // fuzzing only +#[derive(Debug, Default, Clone)] +pub struct Retransmits { + pub(super) max_data: bool, + pub(super) max_stream_id: [bool; 2], + pub(super) reset_stream: Vec<(StreamId, VarInt)>, + pub(super) stop_sending: Vec, + pub(super) max_stream_data: FxHashSet, + pub(super) crypto: VecDeque, + pub(super) new_cids: Vec, + pub(super) retire_cids: Vec, + pub(super) ack_frequency: bool, + pub(super) handshake_done: bool, + /// For each enqueued NEW_TOKEN frame, a copy of the path's remote address + /// + /// There are 2 reasons this is unusual: + /// + /// - If the path changes, NEW_TOKEN frames bound for the old path are not retransmitted on the + /// new path. That is why this field stores the remote address: so that ones for old paths + /// can be filtered out. + /// - If a token is lost, a new randomly generated token is re-transmitted, rather than the + /// original. This is so that if both transmissions are received, the client won't risk + /// sending the same token twice. That is why this field does _not_ store any actual token. + /// + /// It is true that a QUIC endpoint will only want to effectively have NEW_TOKEN frames + /// enqueued for its current path at a given point in time. Based on that, we could conceivably + /// change this from a vector to an `Option<(SocketAddr, usize)>` or just a `usize` or + /// something. However, due to the architecture of Quinn, it is considerably simpler to not do + /// that; consider what such a change would mean for implementing `BitOrAssign` on Self. + pub(super) new_tokens: Vec, +} + +impl Retransmits { + pub(super) fn is_empty(&self, streams: &StreamsState) -> bool { + !self.max_data + && !self.max_stream_id.into_iter().any(|x| x) + && self.reset_stream.is_empty() + && self.stop_sending.is_empty() + && self + .max_stream_data + .iter() + .all(|&id| !streams.can_send_flow_control(id)) + && self.crypto.is_empty() + && self.new_cids.is_empty() + && self.retire_cids.is_empty() + && !self.ack_frequency + && !self.handshake_done + && self.new_tokens.is_empty() + } +} + +impl ::std::ops::BitOrAssign for Retransmits { + fn bitor_assign(&mut self, rhs: Self) { + // We reduce in-stream head-of-line blocking by queueing retransmits before other data for + // STREAM and CRYPTO frames. + self.max_data |= rhs.max_data; + for dir in Dir::iter() { + self.max_stream_id[dir as usize] |= rhs.max_stream_id[dir as usize]; + } + self.reset_stream.extend_from_slice(&rhs.reset_stream); + self.stop_sending.extend_from_slice(&rhs.stop_sending); + self.max_stream_data.extend(&rhs.max_stream_data); + for crypto in rhs.crypto.into_iter().rev() { + self.crypto.push_front(crypto); + } + self.new_cids.extend(&rhs.new_cids); + self.retire_cids.extend(rhs.retire_cids); + self.ack_frequency |= rhs.ack_frequency; + self.handshake_done |= rhs.handshake_done; + self.new_tokens.extend_from_slice(&rhs.new_tokens); + } +} + +impl ::std::ops::BitOrAssign for Retransmits { + fn bitor_assign(&mut self, rhs: ThinRetransmits) { + if let Some(retransmits) = rhs.retransmits { + self.bitor_assign(*retransmits) + } + } +} + +impl ::std::iter::FromIterator for Retransmits { + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut result = Self::default(); + for packet in iter { + result |= packet; + } + result + } +} + +/// A variant of `Retransmits` which only allocates storage when required +#[derive(Debug, Default, Clone)] +pub(super) struct ThinRetransmits { + retransmits: Option>, +} + +impl ThinRetransmits { + /// Returns `true` if no retransmits are necessary + pub(super) fn is_empty(&self, streams: &StreamsState) -> bool { + match &self.retransmits { + Some(retransmits) => retransmits.is_empty(streams), + None => true, + } + } + + /// Returns a reference to the retransmits stored in this box + pub(super) fn get(&self) -> Option<&Retransmits> { + self.retransmits.as_deref() + } + + /// Returns a mutable reference to the stored retransmits + /// + /// This function will allocate a backing storage if required. + pub(super) fn get_or_create(&mut self) -> &mut Retransmits { + if self.retransmits.is_none() { + self.retransmits = Some(Box::default()); + } + self.retransmits.as_deref_mut().unwrap() + } +} + +/// RFC4303-style sliding window packet number deduplicator. +/// +/// A contiguous bitfield, where each bit corresponds to a packet number and the rightmost bit is +/// always set. A set bit represents a packet that has been successfully authenticated. Bits left of +/// the window are assumed to be set. +/// +/// ```text +/// ...xxxxxxxxx 1 0 +/// ^ ^ ^ +/// window highest next +/// ``` +pub(super) struct Dedup { + window: Window, + /// Lowest packet number higher than all yet authenticated. + next: u64, +} + +/// Inner bitfield type. +/// +/// Because QUIC never reuses packet numbers, this only needs to be large enough to deal with +/// packets that are reordered but still delivered in a timely manner. +type Window = u128; + +/// Number of packets tracked by `Dedup`. +const WINDOW_SIZE: u64 = 1 + mem::size_of::() as u64 * 8; + +impl Dedup { + /// Construct an empty window positioned at the start. + pub(super) fn new() -> Self { + Self { window: 0, next: 0 } + } + + /// Highest packet number authenticated. + fn highest(&self) -> u64 { + self.next - 1 + } + + /// Record a newly authenticated packet number. + /// + /// Returns whether the packet might be a duplicate. + pub(super) fn insert(&mut self, packet: u64) -> bool { + if let Some(diff) = packet.checked_sub(self.next) { + // Right of window + self.window = ((self.window << 1) | 1) + .checked_shl(cmp::min(diff, u64::from(u32::MAX)) as u32) + .unwrap_or(0); + self.next = packet + 1; + false + } else if self.highest() - packet < WINDOW_SIZE { + // Within window + if let Some(bit) = (self.highest() - packet).checked_sub(1) { + // < highest + let mask = 1 << bit; + let duplicate = self.window & mask != 0; + self.window |= mask; + duplicate + } else { + // == highest + true + } + } else { + // Left of window + true + } + } + + /// Returns the packet number of the smallest packet missing between the provided interval + /// + /// If there are no missing packets, returns `None` + fn smallest_missing_in_interval(&self, lower_bound: u64, upper_bound: u64) -> Option { + debug_assert!(lower_bound <= upper_bound); + debug_assert!(upper_bound <= self.highest()); + const BITFIELD_SIZE: u64 = (mem::size_of::() * 8) as u64; + + // Since we already know the packets at the boundaries have been received, we only need to + // check those in between them (this removes the necessity of extra logic to deal with the + // highest packet, which is stored outside the bitfield) + let lower_bound = lower_bound + 1; + let upper_bound = upper_bound.saturating_sub(1); + + // Note: the offsets are counted from the right + // The highest packet is not included in the bitfield, so we subtract 1 to account for that + let start_offset = (self.highest() - upper_bound).max(1) - 1; + if start_offset >= BITFIELD_SIZE { + // The start offset is outside of the window. All packets outside of the window are + // considered to be received. + return None; + } + + let end_offset_exclusive = self.highest().saturating_sub(lower_bound); + + // The range is clamped at the edge of the window, because any earlier packets are + // considered to be received + let range_len = end_offset_exclusive + .saturating_sub(start_offset) + .min(BITFIELD_SIZE); + if range_len == 0 { + return None; + } + + // Ensure the shift is within bounds (we already know start_offset < BITFIELD_SIZE, + // because of the early return) + let mask = if range_len == BITFIELD_SIZE { + u128::MAX + } else { + ((1u128 << range_len) - 1) << start_offset + }; + let gaps = !self.window & mask; + + let smallest_missing_offset = 128 - gaps.leading_zeros() as u64; + let smallest_missing_packet = self.highest() - smallest_missing_offset; + + if smallest_missing_packet <= upper_bound { + Some(smallest_missing_packet) + } else { + None + } + } + + /// Returns true if there are any missing packets between the provided interval + /// + /// The provided packet numbers must have been received before calling this function + fn missing_in_interval(&self, lower_bound: u64, upper_bound: u64) -> bool { + self.smallest_missing_in_interval(lower_bound, upper_bound) + .is_some() + } +} + +/// Indicates which data is available for sending +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(super) struct SendableFrames { + pub(super) acks: bool, + pub(super) other: bool, +} + +impl SendableFrames { + /// Returns that no data is available for sending + pub(super) fn empty() -> Self { + Self { + acks: false, + other: false, + } + } + + /// Whether no data is sendable + pub(super) fn is_empty(&self) -> bool { + !self.acks && !self.other + } +} + +#[derive(Debug)] +pub(super) struct PendingAcks { + /// Whether we should send an ACK immediately, even if that means sending an ACK-only packet + /// + /// When `immediate_ack_required` is false, the normal behavior is to send ACK frames only when + /// there is other data to send, or when the `MaxAckDelay` timer expires. + immediate_ack_required: bool, + /// The number of ack-eliciting packets received since the last ACK frame was sent + /// + /// Once the count _exceeds_ `ack_eliciting_threshold`, an immediate ACK is required + ack_eliciting_since_last_ack_sent: u64, + non_ack_eliciting_since_last_ack_sent: u64, + ack_eliciting_threshold: u64, + /// The reordering threshold, controlling how we respond to out-of-order ack-eliciting packets + /// + /// Different values enable different behavior: + /// + /// * `0`: no special action is taken + /// * `1`: an ACK is immediately sent if it is out-of-order according to RFC 9000 + /// * `>1`: an ACK is immediately sent if it is out-of-order according to the ACK frequency draft + reordering_threshold: u64, + /// The earliest ack-eliciting packet since the last ACK was sent, used to calculate the moment + /// upon which `max_ack_delay` elapses + earliest_ack_eliciting_since_last_ack_sent: Option, + /// The packet number ranges of ack-eliciting packets the peer hasn't confirmed receipt of ACKs + /// for + ranges: ArrayRangeSet, + /// The packet with the largest packet number, and the time upon which it was received (used to + /// calculate ACK delay in [`PendingAcks::ack_delay`]) + largest_packet: Option<(u64, Instant)>, + /// The ack-eliciting packet we have received with the largest packet number + largest_ack_eliciting_packet: Option, + /// The largest acknowledged packet number sent in an ACK frame + largest_acked: Option, +} + +impl PendingAcks { + fn new() -> Self { + Self { + immediate_ack_required: false, + ack_eliciting_since_last_ack_sent: 0, + non_ack_eliciting_since_last_ack_sent: 0, + ack_eliciting_threshold: 1, + reordering_threshold: 1, + earliest_ack_eliciting_since_last_ack_sent: None, + ranges: ArrayRangeSet::default(), + largest_packet: None, + largest_ack_eliciting_packet: None, + largest_acked: None, + } + } + + pub(super) fn set_ack_frequency_params(&mut self, frame: &frame::AckFrequency) { + self.ack_eliciting_threshold = frame.ack_eliciting_threshold.into_inner(); + self.reordering_threshold = frame.reordering_threshold.into_inner(); + } + + pub(super) fn set_immediate_ack_required(&mut self) { + self.immediate_ack_required = true; + } + + pub(super) fn on_max_ack_delay_timeout(&mut self) { + self.immediate_ack_required = self.ack_eliciting_since_last_ack_sent > 0; + } + + pub(super) fn max_ack_delay_timeout(&self, max_ack_delay: Duration) -> Option { + self.earliest_ack_eliciting_since_last_ack_sent + .map(|earliest_unacked| earliest_unacked + max_ack_delay) + } + + /// Whether any ACK frames can be sent + pub(super) fn can_send(&self) -> bool { + self.immediate_ack_required && !self.ranges.is_empty() + } + + /// Returns the delay since the packet with the largest packet number was received + pub(super) fn ack_delay(&self, now: Instant) -> Duration { + self.largest_packet + .map_or(Duration::default(), |(_, received)| now - received) + } + + /// Handle receipt of a new packet + /// + /// Returns true if the max ack delay timer should be armed + pub(super) fn packet_received( + &mut self, + now: Instant, + packet_number: u64, + ack_eliciting: bool, + dedup: &Dedup, + ) -> bool { + if !ack_eliciting { + self.non_ack_eliciting_since_last_ack_sent += 1; + return false; + } + + let prev_largest_ack_eliciting = self.largest_ack_eliciting_packet.unwrap_or(0); + + // Track largest ack-eliciting packet + self.largest_ack_eliciting_packet = self + .largest_ack_eliciting_packet + .map(|pn| pn.max(packet_number)) + .or(Some(packet_number)); + + // Handle ack_eliciting_threshold + self.ack_eliciting_since_last_ack_sent += 1; + self.immediate_ack_required |= + self.ack_eliciting_since_last_ack_sent > self.ack_eliciting_threshold; + + // Handle out-of-order packets + self.immediate_ack_required |= + self.is_out_of_order(packet_number, prev_largest_ack_eliciting, dedup); + + // Arm max_ack_delay timer if necessary + if self.earliest_ack_eliciting_since_last_ack_sent.is_none() && !self.can_send() { + self.earliest_ack_eliciting_since_last_ack_sent = Some(now); + return true; + } + + false + } + + fn is_out_of_order( + &self, + packet_number: u64, + prev_largest_ack_eliciting: u64, + dedup: &Dedup, + ) -> bool { + match self.reordering_threshold { + 0 => false, + 1 => { + // From https://www.rfc-editor.org/rfc/rfc9000#section-13.2.1-7 + packet_number < prev_largest_ack_eliciting + || dedup.missing_in_interval(prev_largest_ack_eliciting, packet_number) + } + _ => { + // From acknowledgement frequency draft, section 6.1: send an ACK immediately if + // doing so would cause the sender to detect a new packet loss + let Some((largest_acked, largest_unacked)) = + self.largest_acked.zip(self.largest_ack_eliciting_packet) + else { + return false; + }; + if self.reordering_threshold > largest_acked { + return false; + } + // The largest packet number that could be declared lost without a new ACK being + // sent + let largest_reported = largest_acked - self.reordering_threshold + 1; + let Some(smallest_missing_unreported) = + dedup.smallest_missing_in_interval(largest_reported, largest_unacked) + else { + return false; + }; + largest_unacked - smallest_missing_unreported >= self.reordering_threshold + } + } + } + + /// Should be called whenever ACKs have been sent + /// + /// This will suppress sending further ACKs until additional ACK eliciting frames arrive + pub(super) fn acks_sent(&mut self) { + // It is possible (though unlikely) that the ACKs we just sent do not cover all the + // ACK-eliciting packets we have received (e.g. if there is not enough room in the packet to + // fit all the ranges). To keep things simple, however, we assume they do. If there are + // indeed some ACKs that weren't covered, the packets might be ACKed later anyway, because + // they are still contained in `self.ranges`. If we somehow fail to send the ACKs at a later + // moment, the peer will assume the packets got lost and will retransmit their frames in a + // new packet, which is suboptimal, because we already received them. Our assumption here is + // that simplicity results in code that is more performant, even in the presence of + // occasional redundant retransmits. + self.immediate_ack_required = false; + self.ack_eliciting_since_last_ack_sent = 0; + self.non_ack_eliciting_since_last_ack_sent = 0; + self.earliest_ack_eliciting_since_last_ack_sent = None; + self.largest_acked = self.largest_ack_eliciting_packet; + } + + /// Insert one packet that needs to be acknowledged + pub(super) fn insert_one(&mut self, packet: u64, now: Instant) { + self.ranges.insert_one(packet); + + if self.largest_packet.map_or(true, |(pn, _)| packet > pn) { + self.largest_packet = Some((packet, now)); + } + + if self.ranges.len() > MAX_ACK_BLOCKS { + self.ranges.pop_min(); + } + } + + /// Remove ACKs of packets numbered at or below `max` from the set of pending ACKs + pub(super) fn subtract_below(&mut self, max: u64) { + self.ranges.remove(0..(max + 1)); + } + + /// Returns the set of currently pending ACK ranges + pub(super) fn ranges(&self) -> &ArrayRangeSet { + &self.ranges + } + + /// Queue an ACK if a significant number of non-ACK-eliciting packets have not yet been + /// acknowledged + /// + /// Should be called immediately before a non-probing packet is composed, when we've already + /// committed to sending a packet regardless. + pub(super) fn maybe_ack_non_eliciting(&mut self) { + // If we're going to send a packet anyway, and we've received a significant number of + // non-ACK-eliciting packets, then include an ACK to help the peer perform timely loss + // detection even if they're not sending any ACK-eliciting packets themselves. Exact + // threshold chosen somewhat arbitrarily. + const LAZY_ACK_THRESHOLD: u64 = 10; + if self.non_ack_eliciting_since_last_ack_sent > LAZY_ACK_THRESHOLD { + self.immediate_ack_required = true; + } + } +} + +/// Helper for mitigating [optimistic ACK attacks] +/// +/// A malicious peer could prompt the local application to begin a large data transfer, and then +/// send ACKs without first waiting for data to be received. This could defeat congestion control, +/// allowing the connection to consume disproportionate resources. We therefore occasionally skip +/// packet numbers, and classify any ACK referencing a skipped packet number as a transport error. +/// +/// Skipped packet numbers occur only in the application data space (where costly transfers might +/// take place) and are distributed exponentially to reflect the reduced likelihood and impact of +/// bad behavior from a peer that has been well-behaved for an extended period. +/// +/// ACKs for packet numbers that have not yet been allocated are also a transport error, but an +/// attacker with knowledge of the congestion control algorithm in use could time falsified ACKs to +/// arrive after the packets they reference are sent. +/// +/// [optimistic ACK attacks]: https://www.rfc-editor.org/rfc/rfc9000.html#name-optimistic-ack-attack +pub(super) struct PacketNumberFilter { + /// Next outgoing packet number to skip + next_skipped_packet_number: u64, + /// Most recently skipped packet number + prev_skipped_packet_number: Option, + /// Next packet number to skip is randomly selected from 2^n..2^n+1 + exponent: u32, +} + +impl PacketNumberFilter { + pub(super) fn new(rng: &mut (impl Rng + ?Sized)) -> Self { + // First skipped PN is in 0..64 + let exponent = 6; + Self { + next_skipped_packet_number: rng.random_range(0..2u64.saturating_pow(exponent)), + prev_skipped_packet_number: None, + exponent, + } + } + + #[cfg(test)] + pub(super) fn disabled() -> Self { + Self { + next_skipped_packet_number: u64::MAX, + prev_skipped_packet_number: None, + exponent: u32::MAX, + } + } + + pub(super) fn peek(&self, space: &PacketSpace) -> u64 { + let n = space.next_packet_number; + if n != self.next_skipped_packet_number { + return n; + } + n + 1 + } + + pub(super) fn allocate( + &mut self, + rng: &mut (impl Rng + ?Sized), + space: &mut PacketSpace, + ) -> u64 { + let n = space.get_tx_number(); + if n != self.next_skipped_packet_number { + return n; + } + + trace!("skipping pn {n}"); + // Skip this packet number, and choose the next one to skip + self.prev_skipped_packet_number = Some(self.next_skipped_packet_number); + let next_exponent = self.exponent.saturating_add(1); + self.next_skipped_packet_number = rng + .random_range(2u64.saturating_pow(self.exponent)..2u64.saturating_pow(next_exponent)); + self.exponent = next_exponent; + + space.get_tx_number() + } + + pub(super) fn check_ack( + &self, + space_id: SpaceId, + range: std::ops::RangeInclusive, + ) -> Result<(), TransportError> { + if space_id == SpaceId::Data + && self + .prev_skipped_packet_number + .is_some_and(|x| range.contains(&x)) + { + return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked")); + } + Ok(()) + } +} + +/// Ensures we can always fit all our ACKs in a single minimum-MTU packet with room to spare +const MAX_ACK_BLOCKS: usize = 64; + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn sanity() { + let mut dedup = Dedup::new(); + assert!(!dedup.insert(0)); + assert_eq!(dedup.next, 1); + assert_eq!(dedup.window, 0b1); + assert!(dedup.insert(0)); + assert_eq!(dedup.next, 1); + assert_eq!(dedup.window, 0b1); + assert!(!dedup.insert(1)); + assert_eq!(dedup.next, 2); + assert_eq!(dedup.window, 0b11); + assert!(!dedup.insert(2)); + assert_eq!(dedup.next, 3); + assert_eq!(dedup.window, 0b111); + assert!(!dedup.insert(4)); + assert_eq!(dedup.next, 5); + assert_eq!(dedup.window, 0b11110); + assert!(!dedup.insert(7)); + assert_eq!(dedup.next, 8); + assert_eq!(dedup.window, 0b1111_0100); + assert!(dedup.insert(4)); + assert!(!dedup.insert(3)); + assert_eq!(dedup.next, 8); + assert_eq!(dedup.window, 0b1111_1100); + assert!(!dedup.insert(6)); + assert_eq!(dedup.next, 8); + assert_eq!(dedup.window, 0b1111_1101); + assert!(!dedup.insert(5)); + assert_eq!(dedup.next, 8); + assert_eq!(dedup.window, 0b1111_1111); + } + + #[test] + fn happypath() { + let mut dedup = Dedup::new(); + for i in 0..(2 * WINDOW_SIZE) { + assert!(!dedup.insert(i)); + for j in 0..=i { + assert!(dedup.insert(j)); + } + } + } + + #[test] + fn jump() { + let mut dedup = Dedup::new(); + dedup.insert(2 * WINDOW_SIZE); + assert!(dedup.insert(WINDOW_SIZE)); + assert_eq!(dedup.next, 2 * WINDOW_SIZE + 1); + assert_eq!(dedup.window, 0); + assert!(!dedup.insert(WINDOW_SIZE + 1)); + assert_eq!(dedup.next, 2 * WINDOW_SIZE + 1); + assert_eq!(dedup.window, 1 << (WINDOW_SIZE - 2)); + } + + #[test] + fn dedup_has_missing() { + let mut dedup = Dedup::new(); + + dedup.insert(0); + assert!(!dedup.missing_in_interval(0, 0)); + + dedup.insert(1); + assert!(!dedup.missing_in_interval(0, 1)); + + dedup.insert(3); + assert!(dedup.missing_in_interval(1, 3)); + + dedup.insert(4); + assert!(!dedup.missing_in_interval(3, 4)); + assert!(dedup.missing_in_interval(0, 4)); + + dedup.insert(2); + assert!(!dedup.missing_in_interval(0, 4)); + } + + #[test] + fn dedup_outside_of_window_has_missing() { + let mut dedup = Dedup::new(); + + for i in 0..140 { + dedup.insert(i); + } + + // 0 and 4 are outside of the window + assert!(!dedup.missing_in_interval(0, 4)); + dedup.insert(160); + assert!(!dedup.missing_in_interval(0, 4)); + assert!(!dedup.missing_in_interval(0, 140)); + assert!(dedup.missing_in_interval(0, 160)); + } + + #[test] + fn dedup_smallest_missing() { + let mut dedup = Dedup::new(); + + dedup.insert(0); + assert_eq!(dedup.smallest_missing_in_interval(0, 0), None); + + dedup.insert(1); + assert_eq!(dedup.smallest_missing_in_interval(0, 1), None); + + dedup.insert(5); + dedup.insert(7); + assert_eq!(dedup.smallest_missing_in_interval(0, 7), Some(2)); + assert_eq!(dedup.smallest_missing_in_interval(5, 7), Some(6)); + + dedup.insert(2); + assert_eq!(dedup.smallest_missing_in_interval(1, 7), Some(3)); + + dedup.insert(170); + dedup.insert(172); + dedup.insert(300); + assert_eq!(dedup.smallest_missing_in_interval(170, 172), None); + + dedup.insert(500); + assert_eq!(dedup.smallest_missing_in_interval(0, 500), Some(372)); + assert_eq!(dedup.smallest_missing_in_interval(0, 373), Some(372)); + assert_eq!(dedup.smallest_missing_in_interval(0, 372), None); + } + + #[test] + fn pending_acks_first_packet_is_not_considered_reordered() { + let mut acks = PendingAcks::new(); + let mut dedup = Dedup::new(); + dedup.insert(0); + acks.packet_received(Instant::now(), 0, true, &dedup); + assert!(!acks.immediate_ack_required); + } + + #[test] + fn pending_acks_after_immediate_ack_set() { + let mut acks = PendingAcks::new(); + let mut dedup = Dedup::new(); + + // Receive ack-eliciting packet + dedup.insert(0); + let now = Instant::now(); + acks.insert_one(0, now); + acks.packet_received(now, 0, true, &dedup); + + // Sanity check + assert!(!acks.ranges.is_empty()); + assert!(!acks.can_send()); + + // Can send ACK after max_ack_delay exceeded + acks.set_immediate_ack_required(); + assert!(acks.can_send()); + } + + #[test] + fn pending_acks_ack_delay() { + let mut acks = PendingAcks::new(); + let mut dedup = Dedup::new(); + + let t1 = Instant::now(); + let t2 = t1 + Duration::from_millis(2); + let t3 = t2 + Duration::from_millis(5); + assert_eq!(acks.ack_delay(t1), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(t2), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(t3), Duration::from_millis(0)); + + // In-order packet + dedup.insert(0); + acks.insert_one(0, t1); + acks.packet_received(t1, 0, true, &dedup); + assert_eq!(acks.ack_delay(t1), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(t2), Duration::from_millis(2)); + assert_eq!(acks.ack_delay(t3), Duration::from_millis(7)); + + // Out of order (higher than expected) + dedup.insert(3); + acks.insert_one(3, t2); + acks.packet_received(t2, 3, true, &dedup); + assert_eq!(acks.ack_delay(t2), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(t3), Duration::from_millis(5)); + + // Out of order (lower than expected, so previous instant is kept) + dedup.insert(2); + acks.insert_one(2, t3); + acks.packet_received(t3, 2, true, &dedup); + assert_eq!(acks.ack_delay(t3), Duration::from_millis(5)); + } + + #[test] + fn sent_packet_size() { + // The tracking state of sent packets should be minimal, and not grow + // over time. + assert!(std::mem::size_of::() <= 128); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/stats.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/stats.rs new file mode 100644 index 0000000000000000000000000000000000000000..f62e62ee6c648f1c71396f65b21ebac0f79eefb6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/stats.rs @@ -0,0 +1,174 @@ +//! Connection statistics + +use crate::{Dir, Duration, frame::Frame}; + +/// Statistics about UDP datagrams transmitted or received on a connection +/// +/// All QUIC packets are carried by UDP datagrams. Hence, these statistics cover all traffic on a connection. +#[derive(Default, Debug, Copy, Clone)] +#[non_exhaustive] +pub struct UdpStats { + /// The amount of UDP datagrams observed + pub datagrams: u64, + /// The total amount of bytes which have been transferred inside UDP datagrams + pub bytes: u64, + /// The amount of I/O operations executed + /// + /// Can be less than `datagrams` when GSO, GRO, and/or batched system calls are in use. + pub ios: u64, +} + +impl UdpStats { + pub(crate) fn on_sent(&mut self, datagrams: u64, bytes: usize) { + self.datagrams += datagrams; + self.bytes += bytes as u64; + self.ios += 1; + } +} + +/// Number of frames transmitted or received of each frame type +#[derive(Default, Copy, Clone)] +#[non_exhaustive] +#[allow(missing_docs)] +pub struct FrameStats { + pub acks: u64, + pub ack_frequency: u64, + pub crypto: u64, + pub connection_close: u64, + pub data_blocked: u64, + pub datagram: u64, + pub handshake_done: u8, + pub immediate_ack: u64, + pub max_data: u64, + pub max_stream_data: u64, + pub max_streams_bidi: u64, + pub max_streams_uni: u64, + pub new_connection_id: u64, + pub new_token: u64, + pub path_challenge: u64, + pub path_response: u64, + pub ping: u64, + pub reset_stream: u64, + pub retire_connection_id: u64, + pub stream_data_blocked: u64, + pub streams_blocked_bidi: u64, + pub streams_blocked_uni: u64, + pub stop_sending: u64, + pub stream: u64, +} + +impl FrameStats { + pub(crate) fn record(&mut self, frame: &Frame) { + match frame { + Frame::Padding => {} + Frame::Ping => self.ping += 1, + Frame::Ack(_) => self.acks += 1, + Frame::ResetStream(_) => self.reset_stream += 1, + Frame::StopSending(_) => self.stop_sending += 1, + Frame::Crypto(_) => self.crypto += 1, + Frame::Datagram(_) => self.datagram += 1, + Frame::NewToken(_) => self.new_token += 1, + Frame::MaxData(_) => self.max_data += 1, + Frame::MaxStreamData { .. } => self.max_stream_data += 1, + Frame::MaxStreams { dir, .. } => { + if *dir == Dir::Bi { + self.max_streams_bidi += 1; + } else { + self.max_streams_uni += 1; + } + } + Frame::DataBlocked { .. } => self.data_blocked += 1, + Frame::Stream(_) => self.stream += 1, + Frame::StreamDataBlocked { .. } => self.stream_data_blocked += 1, + Frame::StreamsBlocked { dir, .. } => { + if *dir == Dir::Bi { + self.streams_blocked_bidi += 1; + } else { + self.streams_blocked_uni += 1; + } + } + Frame::NewConnectionId(_) => self.new_connection_id += 1, + Frame::RetireConnectionId { .. } => self.retire_connection_id += 1, + Frame::PathChallenge(_) => self.path_challenge += 1, + Frame::PathResponse(_) => self.path_response += 1, + Frame::Close(_) => self.connection_close += 1, + Frame::AckFrequency(_) => self.ack_frequency += 1, + Frame::ImmediateAck => self.immediate_ack += 1, + Frame::HandshakeDone => self.handshake_done = self.handshake_done.saturating_add(1), + } + } +} + +impl std::fmt::Debug for FrameStats { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("FrameStats") + .field("ACK", &self.acks) + .field("ACK_FREQUENCY", &self.ack_frequency) + .field("CONNECTION_CLOSE", &self.connection_close) + .field("CRYPTO", &self.crypto) + .field("DATA_BLOCKED", &self.data_blocked) + .field("DATAGRAM", &self.datagram) + .field("HANDSHAKE_DONE", &self.handshake_done) + .field("IMMEDIATE_ACK", &self.immediate_ack) + .field("MAX_DATA", &self.max_data) + .field("MAX_STREAM_DATA", &self.max_stream_data) + .field("MAX_STREAMS_BIDI", &self.max_streams_bidi) + .field("MAX_STREAMS_UNI", &self.max_streams_uni) + .field("NEW_CONNECTION_ID", &self.new_connection_id) + .field("NEW_TOKEN", &self.new_token) + .field("PATH_CHALLENGE", &self.path_challenge) + .field("PATH_RESPONSE", &self.path_response) + .field("PING", &self.ping) + .field("RESET_STREAM", &self.reset_stream) + .field("RETIRE_CONNECTION_ID", &self.retire_connection_id) + .field("STREAM_DATA_BLOCKED", &self.stream_data_blocked) + .field("STREAMS_BLOCKED_BIDI", &self.streams_blocked_bidi) + .field("STREAMS_BLOCKED_UNI", &self.streams_blocked_uni) + .field("STOP_SENDING", &self.stop_sending) + .field("STREAM", &self.stream) + .finish() + } +} + +/// Statistics related to a transmission path +#[derive(Debug, Default, Copy, Clone)] +#[non_exhaustive] +pub struct PathStats { + /// Current best estimate of this connection's latency (round-trip-time) + pub rtt: Duration, + /// Current congestion window of the connection + pub cwnd: u64, + /// Congestion events on the connection + pub congestion_events: u64, + /// The amount of packets lost on this path + pub lost_packets: u64, + /// The amount of bytes lost on this path + pub lost_bytes: u64, + /// The amount of packets sent on this path + pub sent_packets: u64, + /// The amount of PLPMTUD probe packets sent on this path (also counted by `sent_packets`) + pub sent_plpmtud_probes: u64, + /// The amount of PLPMTUD probe packets lost on this path (ignored by `lost_packets` and + /// `lost_bytes`) + pub lost_plpmtud_probes: u64, + /// The number of times a black hole was detected in the path + pub black_holes_detected: u64, + /// Largest UDP payload size the path currently supports + pub current_mtu: u16, +} + +/// Connection statistics +#[derive(Debug, Default, Copy, Clone)] +#[non_exhaustive] +pub struct ConnectionStats { + /// Statistics about UDP datagrams transmitted on a connection + pub udp_tx: UdpStats, + /// Statistics about UDP datagrams received on a connection + pub udp_rx: UdpStats, + /// Statistics about frames transmitted on a connection + pub frame_tx: FrameStats, + /// Statistics about frames received on a connection + pub frame_rx: FrameStats, + /// Statistics related to the current transmission path + pub path: PathStats, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e769c74f9dc72f4415f398f503392325fead8c3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/mod.rs @@ -0,0 +1,528 @@ +use std::{ + collections::{BinaryHeap, hash_map}, + io, +}; + +use bytes::Bytes; +use thiserror::Error; +use tracing::trace; + +use super::spaces::{Retransmits, ThinRetransmits}; +use crate::{ + Dir, StreamId, VarInt, + connection::streams::state::{get_or_insert_recv, get_or_insert_send}, + frame, +}; + +mod recv; +use recv::Recv; +pub use recv::{Chunks, ReadError, ReadableError}; + +mod send; +pub(crate) use send::{ByteSlice, BytesArray}; +use send::{BytesSource, Send, SendState}; +pub use send::{FinishError, WriteError, Written}; + +mod state; +#[allow(unreachable_pub)] // fuzzing only +pub use state::StreamsState; + +/// Access to streams +pub struct Streams<'a> { + pub(super) state: &'a mut StreamsState, + pub(super) conn_state: &'a super::State, +} + +#[allow(clippy::needless_lifetimes)] // Needed for cfg(fuzzing) +impl<'a> Streams<'a> { + #[cfg(fuzzing)] + pub fn new(state: &'a mut StreamsState, conn_state: &'a super::State) -> Self { + Self { state, conn_state } + } + + /// Open a single stream if possible + /// + /// Returns `None` if the streams in the given direction are currently exhausted. + pub fn open(&mut self, dir: Dir) -> Option { + if self.conn_state.is_closed() { + return None; + } + + // TODO: Queue STREAM_ID_BLOCKED if this fails + if self.state.next[dir as usize] >= self.state.max[dir as usize] { + return None; + } + + self.state.next[dir as usize] += 1; + let id = StreamId::new(self.state.side, dir, self.state.next[dir as usize] - 1); + self.state.insert(false, id); + self.state.send_streams += 1; + Some(id) + } + + /// Accept a remotely initiated stream of a certain directionality, if possible + /// + /// Returns `None` if there are no new incoming streams for this connection. + /// Has no impact on the data flow-control or stream concurrency limits. + pub fn accept(&mut self, dir: Dir) -> Option { + if self.state.next_remote[dir as usize] == self.state.next_reported_remote[dir as usize] { + return None; + } + + let x = self.state.next_reported_remote[dir as usize]; + self.state.next_reported_remote[dir as usize] = x + 1; + if dir == Dir::Bi { + self.state.send_streams += 1; + } + + Some(StreamId::new(!self.state.side, dir, x)) + } + + #[cfg(fuzzing)] + pub fn state(&mut self) -> &mut StreamsState { + self.state + } + + /// The number of streams that may have unacknowledged data. + pub fn send_streams(&self) -> usize { + self.state.send_streams + } + + /// The number of remotely initiated open streams of a certain directionality. + /// + /// Includes remotely initiated streams, which have not been accepted via [`accept`](Self::accept). + /// These streams count against the respective concurrency limit reported by + /// [`Connection::max_concurrent_streams`](super::Connection::max_concurrent_streams). + pub fn remote_open_streams(&self, dir: Dir) -> u64 { + // total opened - total closed = total opened - ( total permitted - total permitted unclosed ) + self.state.next_remote[dir as usize] + - (self.state.max_remote[dir as usize] + - self.state.allocated_remote_count[dir as usize]) + } +} + +/// Access to streams +pub struct RecvStream<'a> { + pub(super) id: StreamId, + pub(super) state: &'a mut StreamsState, + pub(super) pending: &'a mut Retransmits, +} + +impl RecvStream<'_> { + /// Read from the given recv stream + /// + /// `max_length` limits the maximum size of the returned `Bytes` value; passing `usize::MAX` + /// will yield the best performance. `ordered` will make sure the returned chunk's offset will + /// have an offset exactly equal to the previously returned offset plus the previously returned + /// bytes' length. + /// + /// Yields `Ok(None)` if the stream was finished. Otherwise, yields a segment of data and its + /// offset in the stream. If `ordered` is `false`, segments may be received in any order, and + /// the `Chunk`'s `offset` field can be used to determine ordering in the caller. + /// + /// While most applications will prefer to consume stream data in order, unordered reads can + /// improve performance when packet loss occurs and data cannot be retransmitted before the flow + /// control window is filled. On any given stream, you can switch from ordered to unordered + /// reads, but ordered reads on streams that have seen previous unordered reads will return + /// `ReadError::IllegalOrderedRead`. + pub fn read(&mut self, ordered: bool) -> Result, ReadableError> { + Chunks::new(self.id, ordered, self.state, self.pending) + } + + /// Stop accepting data on the given receive stream + /// + /// Discards unread data and notifies the peer to stop transmitting. Once stopped, further + /// attempts to operate on a stream will yield `ClosedStream` errors. + pub fn stop(&mut self, error_code: VarInt) -> Result<(), ClosedStream> { + let mut entry = match self.state.recv.entry(self.id) { + hash_map::Entry::Occupied(s) => s, + hash_map::Entry::Vacant(_) => return Err(ClosedStream { _private: () }), + }; + let stream = get_or_insert_recv(self.state.stream_receive_window)(entry.get_mut()); + + let (read_credits, stop_sending) = stream.stop()?; + if stop_sending.should_transmit() { + self.pending.stop_sending.push(frame::StopSending { + id: self.id, + error_code, + }); + } + + // We need to keep stopped streams around until they're finished or reset so we can update + // connection-level flow control to account for discarded data. Otherwise, we can discard + // state immediately. + if !stream.final_offset_unknown() { + let recv = entry.remove().expect("must have recv when stopping"); + self.state.stream_recv_freed(self.id, recv); + } + + if self.state.add_read_credits(read_credits).should_transmit() { + self.pending.max_data = true; + } + + Ok(()) + } + + /// Check whether this stream has been reset by the peer, returning the reset error code if so + /// + /// After returning `Ok(Some(_))` once, stream state will be discarded and all future calls will + /// return `Err(ClosedStream)`. + pub fn received_reset(&mut self) -> Result, ClosedStream> { + let hash_map::Entry::Occupied(entry) = self.state.recv.entry(self.id) else { + return Err(ClosedStream { _private: () }); + }; + let Some(s) = entry.get().as_ref().and_then(|s| s.as_open_recv()) else { + return Ok(None); + }; + if s.stopped { + return Err(ClosedStream { _private: () }); + } + let Some(code) = s.reset_code() else { + return Ok(None); + }; + + // Clean up state after application observes the reset, since there's no reason for the + // application to attempt to read or stop the stream once it knows it's reset + let (_, recv) = entry.remove_entry(); + self.state + .stream_recv_freed(self.id, recv.expect("must have recv on reset")); + self.state.queue_max_stream_id(self.pending); + + Ok(Some(code)) + } +} + +/// Access to streams +pub struct SendStream<'a> { + pub(super) id: StreamId, + pub(super) state: &'a mut StreamsState, + pub(super) pending: &'a mut Retransmits, + pub(super) conn_state: &'a super::State, +} + +#[allow(clippy::needless_lifetimes)] // Needed for cfg(fuzzing) +impl<'a> SendStream<'a> { + #[cfg(fuzzing)] + pub fn new( + id: StreamId, + state: &'a mut StreamsState, + pending: &'a mut Retransmits, + conn_state: &'a super::State, + ) -> Self { + Self { + id, + state, + pending, + conn_state, + } + } + + /// Send data on the given stream + /// + /// Returns the number of bytes successfully written. + pub fn write(&mut self, data: &[u8]) -> Result { + Ok(self.write_source(&mut ByteSlice::from_slice(data))?.bytes) + } + + /// Send data on the given stream + /// + /// Returns the number of bytes and chunks successfully written. + /// Note that this method might also write a partial chunk. In this case + /// [`Written::chunks`] will not count this chunk as fully written. However + /// the chunk will be advanced and contain only non-written data after the call. + pub fn write_chunks(&mut self, data: &mut [Bytes]) -> Result { + self.write_source(&mut BytesArray::from_chunks(data)) + } + + fn write_source(&mut self, source: &mut B) -> Result { + if self.conn_state.is_closed() { + trace!(%self.id, "write blocked; connection draining"); + return Err(WriteError::Blocked); + } + + let limit = self.state.write_limit(); + + let max_send_data = self.state.max_send_data(self.id); + + let stream = self + .state + .send + .get_mut(&self.id) + .map(get_or_insert_send(max_send_data)) + .ok_or(WriteError::ClosedStream)?; + + if limit == 0 { + trace!( + stream = %self.id, max_data = self.state.max_data, data_sent = self.state.data_sent, + "write blocked by connection-level flow control or send window" + ); + if !stream.connection_blocked { + stream.connection_blocked = true; + self.state.connection_blocked.push(self.id); + } + return Err(WriteError::Blocked); + } + + let was_pending = stream.is_pending(); + let written = stream.write(source, limit)?; + self.state.data_sent += written.bytes as u64; + self.state.unacked_data += written.bytes as u64; + trace!(stream = %self.id, "wrote {} bytes", written.bytes); + if !was_pending { + self.state.pending.push_pending(self.id, stream.priority); + } + Ok(written) + } + + /// Check if this stream was stopped, get the reason if it was + pub fn stopped(&self) -> Result, ClosedStream> { + match self.state.send.get(&self.id).as_ref() { + Some(Some(s)) => Ok(s.stop_reason), + Some(None) => Ok(None), + None => Err(ClosedStream { _private: () }), + } + } + + /// Finish a send stream, signalling that no more data will be sent. + /// + /// If this fails, no [`StreamEvent::Finished`] will be generated. + /// + /// [`StreamEvent::Finished`]: crate::StreamEvent::Finished + pub fn finish(&mut self) -> Result<(), FinishError> { + let max_send_data = self.state.max_send_data(self.id); + let stream = self + .state + .send + .get_mut(&self.id) + .map(get_or_insert_send(max_send_data)) + .ok_or(FinishError::ClosedStream)?; + + let was_pending = stream.is_pending(); + stream.finish()?; + if !was_pending { + self.state.pending.push_pending(self.id, stream.priority); + } + + Ok(()) + } + + /// Abandon transmitting data on a stream + /// + /// # Panics + /// - when applied to a receive stream + pub fn reset(&mut self, error_code: VarInt) -> Result<(), ClosedStream> { + let max_send_data = self.state.max_send_data(self.id); + let stream = self + .state + .send + .get_mut(&self.id) + .map(get_or_insert_send(max_send_data)) + .ok_or(ClosedStream { _private: () })?; + + if matches!(stream.state, SendState::ResetSent) { + // Redundant reset call + return Err(ClosedStream { _private: () }); + } + + // Restore the portion of the send window consumed by the data that we aren't about to + // send. We leave flow control alone because the peer's responsible for issuing additional + // credit based on the final offset communicated in the RESET_STREAM frame we send. + self.state.unacked_data -= stream.pending.unacked(); + stream.reset(); + self.pending.reset_stream.push((self.id, error_code)); + + // Don't reopen an already-closed stream we haven't forgotten yet + Ok(()) + } + + /// Set the priority of a stream + /// + /// # Panics + /// - when applied to a receive stream + pub fn set_priority(&mut self, priority: i32) -> Result<(), ClosedStream> { + let max_send_data = self.state.max_send_data(self.id); + let stream = self + .state + .send + .get_mut(&self.id) + .map(get_or_insert_send(max_send_data)) + .ok_or(ClosedStream { _private: () })?; + + stream.priority = priority; + Ok(()) + } + + /// Get the priority of a stream + /// + /// # Panics + /// - when applied to a receive stream + pub fn priority(&self) -> Result { + let stream = self + .state + .send + .get(&self.id) + .ok_or(ClosedStream { _private: () })?; + + Ok(stream.as_ref().map(|s| s.priority).unwrap_or_default()) + } +} + +/// A queue of streams with pending outgoing data, sorted by priority +struct PendingStreamsQueue { + streams: BinaryHeap, + /// The next stream to write out. This is `Some` when `TransportConfig::send_fairness(false)` and writing a stream is + /// interrupted while the stream still has some pending data. See `reinsert_pending()`. + next: Option, + /// A monotonically decreasing counter, used to implement round-robin scheduling for streams of the same priority. + /// Underflowing is not a practical concern, as it is initialized to u64::MAX and only decremented by 1 in `push_pending` + recency: u64, +} + +impl PendingStreamsQueue { + fn new() -> Self { + Self { + streams: BinaryHeap::new(), + next: None, + recency: u64::MAX, + } + } + + /// Reinsert a stream that was pending and still contains unsent data. + fn reinsert_pending(&mut self, id: StreamId, priority: i32) { + assert!(self.next.is_none()); + + self.next = Some(PendingStream { + priority, + recency: self.recency, // the value here doesn't really matter + id, + }); + } + + /// Push a pending stream ID with the given priority, queued after any already-queued streams for the priority + fn push_pending(&mut self, id: StreamId, priority: i32) { + // Note that in the case where fairness is disabled, if we have a reinserted stream we don't + // bump it even if priority > next.priority. In order to minimize fragmentation we + // always try to complete a stream once part of it has been written. + + // As the recency counter is monotonically decreasing, we know that using its value to sort this stream will queue it + // after all other queued streams of the same priority. + // This is enough to implement round-robin scheduling for streams that are still pending even after being handled, + // as in that case they are removed from the `BinaryHeap`, handled, and then immediately reinserted. + self.recency -= 1; + self.streams.push(PendingStream { + priority, + recency: self.recency, + id, + }); + } + + fn pop(&mut self) -> Option { + self.next.take().or_else(|| self.streams.pop()) + } + + fn clear(&mut self) { + self.next = None; + self.streams.clear(); + } + + fn iter(&self) -> impl Iterator { + self.next.iter().chain(self.streams.iter()) + } + + #[cfg(test)] + fn len(&self) -> usize { + self.streams.len() + self.next.is_some() as usize + } +} + +/// The [`StreamId`] of a stream with pending data queued, ordered by its priority and recency +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +struct PendingStream { + /// The priority of the stream + // Note that this field should be kept above the `recency` field, in order for the `Ord` derive to be correct + // (See https://doc.rust-lang.org/stable/std/cmp/trait.Ord.html#derivable) + priority: i32, + /// A tie-breaker for streams of the same priority, used to improve fairness by implementing round-robin scheduling: + /// Larger values are prioritized, so it is initialised to `u64::MAX`, and when a stream writes data, we know + /// that it currently has the highest recency value, so it is deprioritized by setting its recency to 1 less than the + /// previous lowest recency value, such that all other streams of this priority will get processed once before we get back + /// round to this one + recency: u64, + /// The ID of the stream + // The way this type is used ensures that every instance has a unique `recency` value, so this field should be kept below + // the `priority` and `recency` fields, so that it does not interfere with the behaviour of the `Ord` derive + id: StreamId, +} + +/// Application events about streams +#[derive(Debug, PartialEq, Eq)] +pub enum StreamEvent { + /// One or more new streams has been opened and might be readable + Opened { + /// Directionality for which streams have been opened + dir: Dir, + }, + /// A currently open stream likely has data or errors waiting to be read + Readable { + /// Which stream is now readable + id: StreamId, + }, + /// A formerly write-blocked stream might be ready for a write or have been stopped + /// + /// Only generated for streams that are currently open. + Writable { + /// Which stream is now writable + id: StreamId, + }, + /// A finished stream has been fully acknowledged or stopped + Finished { + /// Which stream has been finished + id: StreamId, + }, + /// The peer asked us to stop sending on an outgoing stream + Stopped { + /// Which stream has been stopped + id: StreamId, + /// Error code supplied by the peer + error_code: VarInt, + }, + /// At least one new stream of a certain directionality may be opened + Available { + /// Directionality for which streams are newly available + dir: Dir, + }, +} + +/// Indicates whether a frame needs to be transmitted +/// +/// This type wraps around bool and uses the `#[must_use]` attribute in order +/// to prevent accidental loss of the frame transmission requirement. +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +#[must_use = "A frame might need to be enqueued"] +pub struct ShouldTransmit(bool); + +impl ShouldTransmit { + /// Returns whether a frame should be transmitted + pub fn should_transmit(self) -> bool { + self.0 + } +} + +/// Error indicating that a stream has not been opened or has already been finished or reset +#[derive(Debug, Default, Error, Clone, PartialEq, Eq)] +#[error("closed stream")] +pub struct ClosedStream { + _private: (), +} + +impl From for io::Error { + fn from(x: ClosedStream) -> Self { + Self::new(io::ErrorKind::NotConnected, x) + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum StreamHalf { + Send, + Recv, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/recv.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/recv.rs new file mode 100644 index 0000000000000000000000000000000000000000..1aee535439e608e86498f8923e7a042325212571 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/recv.rs @@ -0,0 +1,543 @@ +use std::collections::hash_map::Entry; +use std::mem; + +use thiserror::Error; +use tracing::debug; + +use super::state::get_or_insert_recv; +use super::{ClosedStream, Retransmits, ShouldTransmit, StreamId, StreamsState}; +use crate::connection::assembler::{Assembler, Chunk, IllegalOrderedRead}; +use crate::connection::streams::state::StreamRecv; +use crate::{TransportError, VarInt, frame}; + +#[derive(Debug, Default)] +pub(super) struct Recv { + // NB: when adding or removing fields, remember to update `reinit`. + state: RecvState, + pub(super) assembler: Assembler, + sent_max_stream_data: u64, + pub(super) end: u64, + pub(super) stopped: bool, +} + +impl Recv { + pub(super) fn new(initial_max_data: u64) -> Box { + Box::new(Self { + state: RecvState::default(), + assembler: Assembler::new(), + sent_max_stream_data: initial_max_data, + end: 0, + stopped: false, + }) + } + + /// Reset to the initial state + pub(super) fn reinit(&mut self, initial_max_data: u64) { + self.state = RecvState::default(); + self.assembler.reinit(); + self.sent_max_stream_data = initial_max_data; + self.end = 0; + self.stopped = false; + } + + /// Process a STREAM frame + /// + /// Return value is `(number_of_new_bytes_ingested, stream_is_closed)` + pub(super) fn ingest( + &mut self, + frame: frame::Stream, + payload_len: usize, + received: u64, + max_data: u64, + ) -> Result<(u64, bool), TransportError> { + let end = frame.offset + frame.data.len() as u64; + if end >= 2u64.pow(62) { + return Err(TransportError::FLOW_CONTROL_ERROR( + "maximum stream offset too large", + )); + } + + if let Some(final_offset) = self.final_offset() { + if end > final_offset || (frame.fin && end != final_offset) { + debug!(end, final_offset, "final size error"); + return Err(TransportError::FINAL_SIZE_ERROR("")); + } + } + + let new_bytes = self.credit_consumed_by(end, received, max_data)?; + + // Stopped streams don't need to wait for the actual data, they just need to know + // how much there was. + if frame.fin && !self.stopped { + if let RecvState::Recv { ref mut size } = self.state { + *size = Some(end); + } + } + + self.end = self.end.max(end); + // Don't bother storing data or releasing stream-level flow control credit if the stream's + // already stopped + if !self.stopped { + self.assembler.insert(frame.offset, frame.data, payload_len); + } + + Ok((new_bytes, frame.fin && self.stopped)) + } + + pub(super) fn stop(&mut self) -> Result<(u64, ShouldTransmit), ClosedStream> { + if self.stopped { + return Err(ClosedStream { _private: () }); + } + + self.stopped = true; + self.assembler.clear(); + // Issue flow control credit for unread data + let read_credits = self.end - self.assembler.bytes_read(); + // This may send a spurious STOP_SENDING if we've already received all data, but it's a bit + // fiddly to distinguish that from the case where we've received a FIN but are missing some + // data that the peer might still be trying to retransmit, in which case a STOP_SENDING is + // still useful. + Ok((read_credits, ShouldTransmit(self.is_receiving()))) + } + + /// Returns the window that should be advertised in a `MAX_STREAM_DATA` frame + /// + /// The method returns a tuple which consists of the window that should be + /// announced, as well as a boolean parameter which indicates if a new + /// transmission of the value is recommended. If the boolean value is + /// `false` the new window should only be transmitted if a previous transmission + /// had failed. + pub(super) fn max_stream_data(&mut self, stream_receive_window: u64) -> (u64, ShouldTransmit) { + let max_stream_data = self.assembler.bytes_read() + stream_receive_window; + + // Only announce a window update if it's significant enough + // to make it worthwhile sending a MAX_STREAM_DATA frame. + // We use here a fraction of the configured stream receive window to make + // the decision, and accommodate for streams using bigger windows requiring + // less updates. A fixed size would also work - but it would need to be + // smaller than `stream_receive_window` in order to make sure the stream + // does not get stuck. + let diff = max_stream_data - self.sent_max_stream_data; + let transmit = self.can_send_flow_control() && diff >= (stream_receive_window / 8); + (max_stream_data, ShouldTransmit(transmit)) + } + + /// Records that a `MAX_STREAM_DATA` announcing a certain window was sent + /// + /// This will suppress enqueuing further `MAX_STREAM_DATA` frames unless + /// either the previous transmission was not acknowledged or the window + /// further increased. + pub(super) fn record_sent_max_stream_data(&mut self, sent_value: u64) { + if sent_value > self.sent_max_stream_data { + self.sent_max_stream_data = sent_value; + } + } + + /// Whether the total amount of data that the peer will send on this stream is unknown + /// + /// True until we've received either a reset or the final frame. + /// + /// Implies that the sender might benefit from stream-level flow control updates, and we might + /// need to issue connection-level flow control updates due to flow control budget use by this + /// stream in the future, even if it's been stopped. + pub(super) fn final_offset_unknown(&self) -> bool { + matches!(self.state, RecvState::Recv { size: None }) + } + + /// Whether stream-level flow control updates should be sent for this stream + pub(super) fn can_send_flow_control(&self) -> bool { + // Stream-level flow control is redundant if the sender has already sent the whole stream, + // and moot if we no longer want data on this stream. + self.final_offset_unknown() && !self.stopped + } + + /// Whether data is still being accepted from the peer + pub(super) fn is_receiving(&self) -> bool { + matches!(self.state, RecvState::Recv { .. }) + } + + fn final_offset(&self) -> Option { + match self.state { + RecvState::Recv { size } => size, + RecvState::ResetRecvd { size, .. } => Some(size), + } + } + + /// Returns `false` iff the reset was redundant + pub(super) fn reset( + &mut self, + error_code: VarInt, + final_offset: VarInt, + received: u64, + max_data: u64, + ) -> Result { + // Validate final_offset + if let Some(offset) = self.final_offset() { + if offset != final_offset.into_inner() { + return Err(TransportError::FINAL_SIZE_ERROR("inconsistent value")); + } + } else if self.end > u64::from(final_offset) { + return Err(TransportError::FINAL_SIZE_ERROR( + "lower than high water mark", + )); + } + self.credit_consumed_by(final_offset.into(), received, max_data)?; + + if matches!(self.state, RecvState::ResetRecvd { .. }) { + return Ok(false); + } + self.state = RecvState::ResetRecvd { + size: final_offset.into(), + error_code, + }; + // Nuke buffers so that future reads fail immediately, which ensures future reads don't + // issue flow control credit redundant to that already issued. We could instead special-case + // reset streams during read, but it's unclear if there's any benefit to retaining data for + // reset streams. + self.assembler.clear(); + Ok(true) + } + + pub(super) fn reset_code(&self) -> Option { + match self.state { + RecvState::ResetRecvd { error_code, .. } => Some(error_code), + _ => None, + } + } + + /// Compute the amount of flow control credit consumed, or return an error if more was consumed + /// than issued + fn credit_consumed_by( + &self, + offset: u64, + received: u64, + max_data: u64, + ) -> Result { + let prev_end = self.end; + let new_bytes = offset.saturating_sub(prev_end); + if offset > self.sent_max_stream_data || received + new_bytes > max_data { + debug!( + received, + new_bytes, + max_data, + offset, + stream_max_data = self.sent_max_stream_data, + "flow control error" + ); + return Err(TransportError::FLOW_CONTROL_ERROR("")); + } + + Ok(new_bytes) + } +} + +/// Chunks returned from [`RecvStream::read()`][crate::RecvStream::read]. +/// +/// ### Note: Finalization Needed +/// Bytes read from the stream are not released from the congestion window until +/// either [`Self::finalize()`] is called, or this type is dropped. +/// +/// It is recommended that you call [`Self::finalize()`] because it returns a flag +/// telling you whether reading from the stream has resulted in the need to transmit a packet. +/// +/// If this type is leaked, the stream will remain blocked on the remote peer until +/// another read from the stream is done. +pub struct Chunks<'a> { + id: StreamId, + ordered: bool, + streams: &'a mut StreamsState, + pending: &'a mut Retransmits, + state: ChunksState, + read: u64, +} + +impl<'a> Chunks<'a> { + pub(super) fn new( + id: StreamId, + ordered: bool, + streams: &'a mut StreamsState, + pending: &'a mut Retransmits, + ) -> Result { + let mut entry = match streams.recv.entry(id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(_) => return Err(ReadableError::ClosedStream), + }; + + let mut recv = + match get_or_insert_recv(streams.stream_receive_window)(entry.get_mut()).stopped { + true => return Err(ReadableError::ClosedStream), + false => entry.remove().unwrap().into_inner(), // this can't fail due to the previous get_or_insert_with + }; + + recv.assembler.ensure_ordering(ordered)?; + Ok(Self { + id, + ordered, + streams, + pending, + state: ChunksState::Readable(recv), + read: 0, + }) + } + + /// Next + /// + /// Should call finalize() when done calling this. + pub fn next(&mut self, max_length: usize) -> Result, ReadError> { + let rs = match self.state { + ChunksState::Readable(ref mut rs) => rs, + ChunksState::Reset(error_code) => { + return Err(ReadError::Reset(error_code)); + } + ChunksState::Finished => { + return Ok(None); + } + ChunksState::Finalized => panic!("must not call next() after finalize()"), + }; + + if let Some(chunk) = rs.assembler.read(max_length, self.ordered) { + self.read += chunk.bytes.len() as u64; + return Ok(Some(chunk)); + } + + match rs.state { + RecvState::ResetRecvd { error_code, .. } => { + debug_assert_eq!(self.read, 0, "reset streams have empty buffers"); + let state = mem::replace(&mut self.state, ChunksState::Reset(error_code)); + // At this point if we have `rs` self.state must be `ChunksState::Readable` + let recv = match state { + ChunksState::Readable(recv) => StreamRecv::Open(recv), + _ => unreachable!("state must be ChunkState::Readable"), + }; + self.streams.stream_recv_freed(self.id, recv); + Err(ReadError::Reset(error_code)) + } + RecvState::Recv { size } => { + if size == Some(rs.end) && rs.assembler.bytes_read() == rs.end { + let state = mem::replace(&mut self.state, ChunksState::Finished); + // At this point if we have `rs` self.state must be `ChunksState::Readable` + let recv = match state { + ChunksState::Readable(recv) => StreamRecv::Open(recv), + _ => unreachable!("state must be ChunkState::Readable"), + }; + self.streams.stream_recv_freed(self.id, recv); + Ok(None) + } else { + // We don't need a distinct `ChunksState` variant for a blocked stream because + // retrying a read harmlessly re-traces our steps back to returning + // `Err(Blocked)` again. The buffers can't refill and the stream's own state + // can't change so long as this `Chunks` exists. + Err(ReadError::Blocked) + } + } + } + } + + /// Mark the read data as consumed from the stream. + /// + /// The number of read bytes will be released from the congestion window, + /// allowing the remote peer to send more data if it was previously blocked. + /// + /// If [`ShouldTransmit::should_transmit()`] returns `true`, + /// a packet needs to be sent to the peer informing them that the stream is unblocked. + /// This means that you should call [`Connection::poll_transmit()`][crate::Connection::poll_transmit] + /// and send the returned packet as soon as is reasonable, to unblock the remote peer. + pub fn finalize(mut self) -> ShouldTransmit { + self.finalize_inner() + } + + fn finalize_inner(&mut self) -> ShouldTransmit { + let state = mem::replace(&mut self.state, ChunksState::Finalized); + if let ChunksState::Finalized = state { + // Noop on repeated calls + return ShouldTransmit(false); + } + + // We issue additional stream ID credit after the application is notified that a previously + // open stream has finished or been reset and we've therefore disposed of its state, as + // recorded by `stream_freed` calls in `next`. + let mut should_transmit = self.streams.queue_max_stream_id(self.pending); + + // If the stream hasn't finished, we may need to issue stream-level flow control credit + if let ChunksState::Readable(mut rs) = state { + let (_, max_stream_data) = rs.max_stream_data(self.streams.stream_receive_window); + should_transmit |= max_stream_data.0; + if max_stream_data.0 { + self.pending.max_stream_data.insert(self.id); + } + // Return the stream to storage for future use + self.streams + .recv + .insert(self.id, Some(StreamRecv::Open(rs))); + } + + // Issue connection-level flow control credit for any data we read regardless of state + let max_data = self.streams.add_read_credits(self.read); + self.pending.max_data |= max_data.0; + should_transmit |= max_data.0; + ShouldTransmit(should_transmit) + } +} + +impl Drop for Chunks<'_> { + fn drop(&mut self) { + let _ = self.finalize_inner(); + } +} + +enum ChunksState { + Readable(Box), + Reset(VarInt), + Finished, + Finalized, +} + +/// Errors triggered when reading from a recv stream +#[derive(Debug, Error, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum ReadError { + /// No more data is currently available on this stream. + /// + /// If more data on this stream is received from the peer, an `Event::StreamReadable` will be + /// generated for this stream, indicating that retrying the read might succeed. + #[error("blocked")] + Blocked, + /// The peer abandoned transmitting data on this stream. + /// + /// Carries an application-defined error code. + #[error("reset by peer: code {0}")] + Reset(VarInt), +} + +/// Errors triggered when opening a recv stream for reading +#[derive(Debug, Error, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum ReadableError { + /// The stream has not been opened or was already stopped, finished, or reset + #[error("closed stream")] + ClosedStream, + /// Attempted an ordered read following an unordered read + /// + /// Performing an unordered read allows discontinuities to arise in the receive buffer of a + /// stream which cannot be recovered, making further ordered reads impossible. + #[error("ordered read after unordered read")] + IllegalOrderedRead, +} + +impl From for ReadableError { + fn from(_: IllegalOrderedRead) -> Self { + Self::IllegalOrderedRead + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum RecvState { + Recv { size: Option }, + ResetRecvd { size: u64, error_code: VarInt }, +} + +impl Default for RecvState { + fn default() -> Self { + Self::Recv { size: None } + } +} + +#[cfg(test)] +mod tests { + use bytes::Bytes; + + use crate::{Dir, Side}; + + use super::*; + + #[test] + fn reordered_frames_while_stopped() { + const INITIAL_BYTES: u64 = 3; + const INITIAL_OFFSET: u64 = 3; + const RECV_WINDOW: u64 = 8; + let mut s = Recv::new(RECV_WINDOW); + let mut data_recvd = 0; + // Receive bytes 3..6 + let (new_bytes, is_closed) = s + .ingest( + frame::Stream { + id: StreamId::new(Side::Client, Dir::Uni, 0), + offset: INITIAL_OFFSET, + fin: false, + data: Bytes::from_static(&[0; INITIAL_BYTES as usize]), + }, + 123, + data_recvd, + data_recvd + 1024, + ) + .unwrap(); + data_recvd += new_bytes; + assert_eq!(new_bytes, INITIAL_OFFSET + INITIAL_BYTES); + assert!(!is_closed); + + let (credits, transmit) = s.stop().unwrap(); + assert!(transmit.should_transmit()); + assert_eq!( + credits, + INITIAL_OFFSET + INITIAL_BYTES, + "full connection flow control credit is issued by stop" + ); + + let (max_stream_data, transmit) = s.max_stream_data(RECV_WINDOW); + assert!(!transmit.should_transmit()); + assert_eq!( + max_stream_data, RECV_WINDOW, + "stream flow control credit isn't issued by stop" + ); + + // Receive byte 7 + let (new_bytes, is_closed) = s + .ingest( + frame::Stream { + id: StreamId::new(Side::Client, Dir::Uni, 0), + offset: RECV_WINDOW - 1, + fin: false, + data: Bytes::from_static(&[0; 1]), + }, + 123, + data_recvd, + data_recvd + 1024, + ) + .unwrap(); + data_recvd += new_bytes; + assert_eq!(new_bytes, RECV_WINDOW - (INITIAL_OFFSET + INITIAL_BYTES)); + assert!(!is_closed); + + let (max_stream_data, transmit) = s.max_stream_data(RECV_WINDOW); + assert!(!transmit.should_transmit()); + assert_eq!( + max_stream_data, RECV_WINDOW, + "stream flow control credit isn't issued after stop" + ); + + // Receive bytes 0..3 + let (new_bytes, is_closed) = s + .ingest( + frame::Stream { + id: StreamId::new(Side::Client, Dir::Uni, 0), + offset: 0, + fin: false, + data: Bytes::from_static(&[0; INITIAL_OFFSET as usize]), + }, + 123, + data_recvd, + data_recvd + 1024, + ) + .unwrap(); + assert_eq!( + new_bytes, 0, + "reordered frames don't issue connection-level flow control for stopped streams" + ); + assert!(!is_closed); + + let (max_stream_data, transmit) = s.max_stream_data(RECV_WINDOW); + assert!(!transmit.should_transmit()); + assert_eq!( + max_stream_data, RECV_WINDOW, + "stream flow control credit isn't issued after stop" + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/send.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/send.rs new file mode 100644 index 0000000000000000000000000000000000000000..7b3db809a129fdf4359e497ef995fbdea62a58d5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/send.rs @@ -0,0 +1,402 @@ +use bytes::Bytes; +use thiserror::Error; + +use crate::{VarInt, connection::send_buffer::SendBuffer, frame}; + +#[derive(Debug)] +pub(super) struct Send { + pub(super) max_data: u64, + pub(super) state: SendState, + pub(super) pending: SendBuffer, + pub(super) priority: i32, + /// Whether a frame containing a FIN bit must be transmitted, even if we don't have any new data + pub(super) fin_pending: bool, + /// Whether this stream is in the `connection_blocked` list of `Streams` + pub(super) connection_blocked: bool, + /// The reason the peer wants us to stop, if `STOP_SENDING` was received + pub(super) stop_reason: Option, +} + +impl Send { + pub(super) fn new(max_data: VarInt) -> Box { + Box::new(Self { + max_data: max_data.into(), + state: SendState::Ready, + pending: SendBuffer::new(), + priority: 0, + fin_pending: false, + connection_blocked: false, + stop_reason: None, + }) + } + + /// Whether the stream has been reset + pub(super) fn is_reset(&self) -> bool { + matches!(self.state, SendState::ResetSent) + } + + pub(super) fn finish(&mut self) -> Result<(), FinishError> { + if let Some(error_code) = self.stop_reason { + Err(FinishError::Stopped(error_code)) + } else if self.state == SendState::Ready { + self.state = SendState::DataSent { + finish_acked: false, + }; + self.fin_pending = true; + Ok(()) + } else { + Err(FinishError::ClosedStream) + } + } + + pub(super) fn write( + &mut self, + source: &mut S, + limit: u64, + ) -> Result { + if !self.is_writable() { + return Err(WriteError::ClosedStream); + } + if let Some(error_code) = self.stop_reason { + return Err(WriteError::Stopped(error_code)); + } + let budget = self.max_data - self.pending.offset(); + if budget == 0 { + return Err(WriteError::Blocked); + } + let mut limit = limit.min(budget) as usize; + + let mut result = Written::default(); + loop { + let (chunk, chunks_consumed) = source.pop_chunk(limit); + result.chunks += chunks_consumed; + result.bytes += chunk.len(); + + if chunk.is_empty() { + break; + } + + limit -= chunk.len(); + self.pending.write(chunk); + } + + Ok(result) + } + + /// Update stream state due to a reset sent by the local application + pub(super) fn reset(&mut self) { + use SendState::*; + if let DataSent { .. } | Ready = self.state { + self.state = ResetSent; + } + } + + /// Handle STOP_SENDING + /// + /// Returns true if the stream was stopped due to this frame, and false + /// if it had been stopped before + pub(super) fn try_stop(&mut self, error_code: VarInt) -> bool { + if self.stop_reason.is_none() { + self.stop_reason = Some(error_code); + true + } else { + false + } + } + + /// Returns whether the stream has been finished and all data has been acknowledged by the peer + pub(super) fn ack(&mut self, frame: frame::StreamMeta) -> bool { + self.pending.ack(frame.offsets); + match self.state { + SendState::DataSent { + ref mut finish_acked, + } => { + *finish_acked |= frame.fin; + *finish_acked && self.pending.is_fully_acked() + } + _ => false, + } + } + + /// Handle increase to stream-level flow control limit + /// + /// Returns whether the stream was unblocked + pub(super) fn increase_max_data(&mut self, offset: u64) -> bool { + if offset <= self.max_data || self.state != SendState::Ready { + return false; + } + let was_blocked = self.pending.offset() == self.max_data; + self.max_data = offset; + was_blocked + } + + pub(super) fn offset(&self) -> u64 { + self.pending.offset() + } + + pub(super) fn is_pending(&self) -> bool { + self.pending.has_unsent_data() || self.fin_pending + } + + pub(super) fn is_writable(&self) -> bool { + matches!(self.state, SendState::Ready) + } +} + +/// A [`BytesSource`] implementation for `&'a mut [Bytes]` +/// +/// The type allows to dequeue [`Bytes`] chunks from an array of chunks, up to +/// a configured limit. +pub(crate) struct BytesArray<'a> { + /// The wrapped slice of `Bytes` + chunks: &'a mut [Bytes], + /// The amount of chunks consumed from this source + consumed: usize, +} + +impl<'a> BytesArray<'a> { + pub(crate) fn from_chunks(chunks: &'a mut [Bytes]) -> Self { + Self { + chunks, + consumed: 0, + } + } +} + +impl BytesSource for BytesArray<'_> { + fn pop_chunk(&mut self, limit: usize) -> (Bytes, usize) { + // The loop exists to skip empty chunks while still marking them as + // consumed + let mut chunks_consumed = 0; + + while self.consumed < self.chunks.len() { + let chunk = &mut self.chunks[self.consumed]; + + if chunk.len() <= limit { + let chunk = std::mem::take(chunk); + self.consumed += 1; + chunks_consumed += 1; + if chunk.is_empty() { + continue; + } + return (chunk, chunks_consumed); + } else if limit > 0 { + let chunk = chunk.split_to(limit); + return (chunk, chunks_consumed); + } else { + break; + } + } + + (Bytes::new(), chunks_consumed) + } +} + +/// A [`BytesSource`] implementation for `&[u8]` +/// +/// The type allows to dequeue a single [`Bytes`] chunk, which will be lazily +/// created from a reference. This allows to defer the allocation until it is +/// known how much data needs to be copied. +pub(crate) struct ByteSlice<'a> { + /// The wrapped byte slice + data: &'a [u8], +} + +impl<'a> ByteSlice<'a> { + pub(crate) fn from_slice(data: &'a [u8]) -> Self { + Self { data } + } +} + +impl BytesSource for ByteSlice<'_> { + fn pop_chunk(&mut self, limit: usize) -> (Bytes, usize) { + let limit = limit.min(self.data.len()); + if limit == 0 { + return (Bytes::new(), 0); + } + + let chunk = Bytes::from(self.data[..limit].to_owned()); + self.data = &self.data[chunk.len()..]; + + let chunks_consumed = usize::from(self.data.is_empty()); + (chunk, chunks_consumed) + } +} + +/// A source of one or more buffers which can be converted into `Bytes` buffers on demand +/// +/// The purpose of this data type is to defer conversion as long as possible, +/// so that no heap allocation is required in case no data is writable. +pub(super) trait BytesSource { + /// Returns the next chunk from the source of owned chunks. + /// + /// This method will consume parts of the source. + /// Calling it will yield `Bytes` elements up to the configured `limit`. + /// + /// The method returns a tuple: + /// - The first item is the yielded `Bytes` element. The element will be + /// empty if the limit is zero or no more data is available. + /// - The second item returns how many complete chunks inside the source had + /// had been consumed. This can be less than 1, if a chunk inside the + /// source had been truncated in order to adhere to the limit. It can also + /// be more than 1, if zero-length chunks had been skipped. + fn pop_chunk(&mut self, limit: usize) -> (Bytes, usize); +} + +/// Indicates how many bytes and chunks had been transferred in a write operation +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] +pub struct Written { + /// The amount of bytes which had been written + pub bytes: usize, + /// The amount of full chunks which had been written + /// + /// If a chunk was only partially written, it will not be counted by this field. + pub chunks: usize, +} + +/// Errors triggered while writing to a send stream +#[derive(Debug, Error, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum WriteError { + /// The peer is not able to accept additional data, or the connection is congested. + /// + /// If the peer issues additional flow control credit, a [`StreamEvent::Writable`] event will + /// be generated, indicating that retrying the write might succeed. + /// + /// [`StreamEvent::Writable`]: crate::StreamEvent::Writable + #[error("unable to accept further writes")] + Blocked, + /// The peer is no longer accepting data on this stream, and it has been implicitly reset. The + /// stream cannot be finished or further written to. + /// + /// Carries an application-defined error code. + /// + /// [`StreamEvent::Finished`]: crate::StreamEvent::Finished + #[error("stopped by peer: code {0}")] + Stopped(VarInt), + /// The stream has not been opened or has already been finished or reset + #[error("closed stream")] + ClosedStream, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(super) enum SendState { + /// Sending new data + Ready, + /// Stream was finished; now sending retransmits only + DataSent { finish_acked: bool }, + /// Sent RESET + ResetSent, +} + +/// Reasons why attempting to finish a stream might fail +#[derive(Debug, Error, Clone, PartialEq, Eq)] +pub enum FinishError { + /// The peer is no longer accepting data on this stream. No + /// [`StreamEvent::Finished`] event will be emitted for this stream. + /// + /// Carries an application-defined error code. + /// + /// [`StreamEvent::Finished`]: crate::StreamEvent::Finished + #[error("stopped by peer: code {0}")] + Stopped(VarInt), + /// The stream has not been opened or was already finished or reset + #[error("closed stream")] + ClosedStream, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytes_array() { + let full = b"Hello World 123456789 ABCDEFGHJIJKLMNOPQRSTUVWXYZ".to_owned(); + for limit in 0..full.len() { + let mut chunks = [ + Bytes::from_static(b""), + Bytes::from_static(b"Hello "), + Bytes::from_static(b"Wo"), + Bytes::from_static(b""), + Bytes::from_static(b"r"), + Bytes::from_static(b"ld"), + Bytes::from_static(b""), + Bytes::from_static(b" 12345678"), + Bytes::from_static(b"9 ABCDE"), + Bytes::from_static(b"F"), + Bytes::from_static(b"GHJIJKLMNOPQRSTUVWXYZ"), + ]; + let num_chunks = chunks.len(); + let last_chunk_len = chunks[chunks.len() - 1].len(); + + let mut array = BytesArray::from_chunks(&mut chunks); + + let mut buf = Vec::new(); + let mut chunks_popped = 0; + let mut chunks_consumed = 0; + let mut remaining = limit; + loop { + let (chunk, consumed) = array.pop_chunk(remaining); + chunks_consumed += consumed; + + if !chunk.is_empty() { + buf.extend_from_slice(&chunk); + remaining -= chunk.len(); + chunks_popped += 1; + } else { + break; + } + } + + assert_eq!(&buf[..], &full[..limit]); + + if limit == full.len() { + // Full consumption of the last chunk + assert_eq!(chunks_consumed, num_chunks); + // Since there are empty chunks, we consume more than there are popped + assert_eq!(chunks_consumed, chunks_popped + 3); + } else if limit > full.len() - last_chunk_len { + // Partial consumption of the last chunk + assert_eq!(chunks_consumed, num_chunks - 1); + assert_eq!(chunks_consumed, chunks_popped + 2); + } + } + } + + #[test] + fn byte_slice() { + let full = b"Hello World 123456789 ABCDEFGHJIJKLMNOPQRSTUVWXYZ".to_owned(); + for limit in 0..full.len() { + let mut array = ByteSlice::from_slice(&full[..]); + + let mut buf = Vec::new(); + let mut chunks_popped = 0; + let mut chunks_consumed = 0; + let mut remaining = limit; + loop { + let (chunk, consumed) = array.pop_chunk(remaining); + chunks_consumed += consumed; + + if !chunk.is_empty() { + buf.extend_from_slice(&chunk); + remaining -= chunk.len(); + chunks_popped += 1; + } else { + break; + } + } + + assert_eq!(&buf[..], &full[..limit]); + if limit != 0 { + assert_eq!(chunks_popped, 1); + } else { + assert_eq!(chunks_popped, 0); + } + + if limit == full.len() { + assert_eq!(chunks_consumed, 1); + } else { + assert_eq!(chunks_consumed, 0); + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/state.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/state.rs new file mode 100644 index 0000000000000000000000000000000000000000..09644fea089fed12faff305e29f05ffae31a1e12 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/streams/state.rs @@ -0,0 +1,2102 @@ +use std::{ + collections::{VecDeque, hash_map}, + convert::TryFrom, + mem, +}; + +use bytes::BufMut; +use rustc_hash::FxHashMap; +use tracing::{debug, trace}; + +use super::{ + PendingStreamsQueue, Recv, Retransmits, Send, SendState, ShouldTransmit, StreamEvent, + StreamHalf, ThinRetransmits, +}; +use crate::{ + Dir, MAX_STREAM_COUNT, Side, StreamId, TransportError, VarInt, + coding::BufMutExt, + connection::stats::FrameStats, + frame::{self, FrameStruct, StreamMetaVec}, + transport_parameters::TransportParameters, +}; + +/// Wrapper around `Recv` that facilitates reusing `Recv` instances +#[derive(Debug)] +pub(super) enum StreamRecv { + /// A `Recv` that is ready to be opened + Free(Box), + /// A `Recv` that has been opened + Open(Box), +} + +impl StreamRecv { + /// Returns a reference to the inner `Recv` if the stream is open + pub(super) fn as_open_recv(&self) -> Option<&Recv> { + match self { + Self::Open(r) => Some(r), + _ => None, + } + } + + // Returns a mutable reference to the inner `Recv` if the stream is open + pub(super) fn as_open_recv_mut(&mut self) -> Option<&mut Recv> { + match self { + Self::Open(r) => Some(r), + _ => None, + } + } + + // Returns the inner `Recv` + pub(super) fn into_inner(self) -> Box { + match self { + Self::Free(r) | Self::Open(r) => r, + } + } + + // Reinitialize the stream so the inner `Recv` can be reused + pub(super) fn free(self, initial_max_data: u64) -> Self { + match self { + Self::Free(_) => unreachable!("Self::Free on reinit()"), + Self::Open(mut recv) => { + recv.reinit(initial_max_data); + Self::Free(recv) + } + } + } +} + +#[allow(unreachable_pub)] // fuzzing only +pub struct StreamsState { + pub(super) side: Side, + // Set of streams that are currently open, or could be immediately opened by the peer + pub(super) send: FxHashMap>>, + pub(super) recv: FxHashMap>, + pub(super) free_recv: Vec, + pub(super) next: [u64; 2], + /// Maximum number of locally-initiated streams that may be opened over the lifetime of the + /// connection so far, per direction + pub(super) max: [u64; 2], + /// Maximum number of remotely-initiated streams that may be opened over the lifetime of the + /// connection so far, per direction + pub(super) max_remote: [u64; 2], + /// Value of `max_remote` most recently transmitted to the peer in a `MAX_STREAMS` frame + sent_max_remote: [u64; 2], + /// Number of streams that we've given the peer permission to open and which aren't fully closed + pub(super) allocated_remote_count: [u64; 2], + /// Size of the desired stream flow control window. May be smaller than `allocated_remote_count` + /// due to `set_max_concurrent` calls. + max_concurrent_remote_count: [u64; 2], + /// Whether `max_concurrent_remote_count` has ever changed + flow_control_adjusted: bool, + /// Lowest remotely-initiated stream index that haven't actually been opened by the peer + pub(super) next_remote: [u64; 2], + /// Whether the remote endpoint has opened any streams the application doesn't know about yet, + /// per directionality + opened: [bool; 2], + // Next to report to the application, once opened + pub(super) next_reported_remote: [u64; 2], + /// Number of outbound streams + /// + /// This differs from `self.send.len()` in that it does not include streams that the peer is + /// permitted to open but which have not yet been opened. + pub(super) send_streams: usize, + /// Streams with outgoing data queued, sorted by priority + pub(super) pending: PendingStreamsQueue, + + events: VecDeque, + /// Streams blocked on connection-level flow control or stream window space + /// + /// Streams are only added to this list when a write fails. + pub(super) connection_blocked: Vec, + /// Connection-level flow control budget dictated by the peer + pub(super) max_data: u64, + /// The initial receive window + receive_window: u64, + /// Limit on incoming data, which is transmitted through `MAX_DATA` frames + local_max_data: u64, + /// The last value of `MAX_DATA` which had been queued for transmission in + /// an outgoing `MAX_DATA` frame + sent_max_data: VarInt, + /// Sum of current offsets of all send streams. + pub(super) data_sent: u64, + /// Sum of end offsets of all receive streams. Includes gaps, so it's an upper bound. + data_recvd: u64, + /// Total quantity of unacknowledged outgoing data + pub(super) unacked_data: u64, + /// Configured upper bound for `unacked_data`. + /// + /// Note this may be less than `unacked_data` if the user has set a new value. + pub(super) send_window: u64, + /// Configured upper bound for how much unacked data the peer can send us per stream + pub(super) stream_receive_window: u64, + + // Pertinent state from the TransportParameters supplied by the peer + initial_max_stream_data_uni: VarInt, + initial_max_stream_data_bidi_local: VarInt, + initial_max_stream_data_bidi_remote: VarInt, + + /// The shrink to be applied to local_max_data when receive_window is shrunk + receive_window_shrink_debt: u64, +} + +impl StreamsState { + #[allow(unreachable_pub)] // fuzzing only + pub fn new( + side: Side, + max_remote_uni: VarInt, + max_remote_bi: VarInt, + send_window: u64, + receive_window: VarInt, + stream_receive_window: VarInt, + ) -> Self { + let mut this = Self { + side, + send: FxHashMap::default(), + recv: FxHashMap::default(), + free_recv: Vec::new(), + next: [0, 0], + max: [0, 0], + max_remote: [max_remote_bi.into(), max_remote_uni.into()], + sent_max_remote: [max_remote_bi.into(), max_remote_uni.into()], + allocated_remote_count: [max_remote_bi.into(), max_remote_uni.into()], + max_concurrent_remote_count: [max_remote_bi.into(), max_remote_uni.into()], + flow_control_adjusted: false, + next_remote: [0, 0], + opened: [false, false], + next_reported_remote: [0, 0], + send_streams: 0, + pending: PendingStreamsQueue::new(), + events: VecDeque::new(), + connection_blocked: Vec::new(), + max_data: 0, + receive_window: receive_window.into(), + local_max_data: receive_window.into(), + sent_max_data: receive_window, + data_sent: 0, + data_recvd: 0, + unacked_data: 0, + send_window, + stream_receive_window: stream_receive_window.into(), + initial_max_stream_data_uni: 0u32.into(), + initial_max_stream_data_bidi_local: 0u32.into(), + initial_max_stream_data_bidi_remote: 0u32.into(), + receive_window_shrink_debt: 0, + }; + + for dir in Dir::iter() { + for i in 0..this.max_remote[dir as usize] { + this.insert(true, StreamId::new(!side, dir, i)); + } + } + + this + } + + pub(crate) fn set_params(&mut self, params: &TransportParameters) { + self.initial_max_stream_data_uni = params.initial_max_stream_data_uni; + self.initial_max_stream_data_bidi_local = params.initial_max_stream_data_bidi_local; + self.initial_max_stream_data_bidi_remote = params.initial_max_stream_data_bidi_remote; + self.max[Dir::Bi as usize] = params.initial_max_streams_bidi.into(); + self.max[Dir::Uni as usize] = params.initial_max_streams_uni.into(); + self.received_max_data(params.initial_max_data); + for i in 0..self.max_remote[Dir::Bi as usize] { + let id = StreamId::new(!self.side, Dir::Bi, i); + if let Some(s) = self.send.get_mut(&id).and_then(|s| s.as_mut()) { + s.max_data = params.initial_max_stream_data_bidi_local.into(); + } + } + } + + /// Ensure we have space for at least a full flow control window of remotely-initiated streams + /// to be open, and notify the peer if the window has moved + fn ensure_remote_streams(&mut self, dir: Dir) { + let new_count = self.max_concurrent_remote_count[dir as usize] + .saturating_sub(self.allocated_remote_count[dir as usize]); + for i in 0..new_count { + let id = StreamId::new(!self.side, dir, self.max_remote[dir as usize] + i); + self.insert(true, id); + } + self.allocated_remote_count[dir as usize] += new_count; + self.max_remote[dir as usize] += new_count; + } + + pub(crate) fn zero_rtt_rejected(&mut self) { + // Revert to initial state for outgoing streams + for dir in Dir::iter() { + for i in 0..self.next[dir as usize] { + // We don't bother calling `stream_freed` here because we explicitly reset affected + // counters below. + let id = StreamId::new(self.side, dir, i); + self.send.remove(&id).unwrap(); + if let Dir::Bi = dir { + self.recv.remove(&id).unwrap(); + } + } + self.next[dir as usize] = 0; + + // If 0-RTT was rejected, any flow control frames we sent were lost. + if self.flow_control_adjusted { + // Conservative approximation of whatever we sent in transport parameters + self.sent_max_remote[dir as usize] = 0; + } + } + + self.pending.clear(); + self.send_streams = 0; + self.data_sent = 0; + self.connection_blocked.clear(); + } + + /// Process incoming stream frame + /// + /// If successful, returns whether a `MAX_DATA` frame needs to be transmitted + pub(crate) fn received( + &mut self, + frame: frame::Stream, + payload_len: usize, + ) -> Result { + let id = frame.id; + self.validate_receive_id(id).map_err(|e| { + debug!("received illegal STREAM frame"); + e + })?; + + let rs = match self + .recv + .get_mut(&id) + .map(get_or_insert_recv(self.stream_receive_window)) + { + Some(rs) => rs, + None => { + trace!("dropping frame for closed stream"); + return Ok(ShouldTransmit(false)); + } + }; + + if !rs.is_receiving() { + trace!("dropping frame for finished stream"); + return Ok(ShouldTransmit(false)); + } + + let (new_bytes, closed) = + rs.ingest(frame, payload_len, self.data_recvd, self.local_max_data)?; + self.data_recvd = self.data_recvd.saturating_add(new_bytes); + + if !rs.stopped { + self.on_stream_frame(true, id); + return Ok(ShouldTransmit(false)); + } + + // Stopped streams become closed instantly on FIN, so check whether we need to clean up + if closed { + let rs = self.recv.remove(&id).flatten().unwrap(); + self.stream_recv_freed(id, rs); + } + + // We don't buffer data on stopped streams, so issue flow control credit immediately + Ok(self.add_read_credits(new_bytes)) + } + + /// Process incoming RESET_STREAM frame + /// + /// If successful, returns whether a `MAX_DATA` frame needs to be transmitted + #[allow(unreachable_pub)] // fuzzing only + pub fn received_reset( + &mut self, + frame: frame::ResetStream, + ) -> Result { + let frame::ResetStream { + id, + error_code, + final_offset, + } = frame; + self.validate_receive_id(id).map_err(|e| { + debug!("received illegal RESET_STREAM frame"); + e + })?; + + let rs = match self + .recv + .get_mut(&id) + .map(get_or_insert_recv(self.stream_receive_window)) + { + Some(stream) => stream, + None => { + trace!("received RESET_STREAM on closed stream"); + return Ok(ShouldTransmit(false)); + } + }; + + // State transition + if !rs.reset( + error_code, + final_offset, + self.data_recvd, + self.local_max_data, + )? { + // Redundant reset + return Ok(ShouldTransmit(false)); + } + let bytes_read = rs.assembler.bytes_read(); + let stopped = rs.stopped; + let end = rs.end; + if stopped { + // Stopped streams should be disposed immediately on reset + let rs = self.recv.remove(&id).flatten().unwrap(); + self.stream_recv_freed(id, rs); + } + self.on_stream_frame(!stopped, id); + + // Update connection-level flow control + Ok(if bytes_read != final_offset.into_inner() { + // bytes_read is always <= end, so this won't underflow. + self.data_recvd = self + .data_recvd + .saturating_add(u64::from(final_offset) - end); + self.add_read_credits(u64::from(final_offset) - bytes_read) + } else { + ShouldTransmit(false) + }) + } + + /// Process incoming `STOP_SENDING` frame + #[allow(unreachable_pub)] // fuzzing only + pub fn received_stop_sending(&mut self, id: StreamId, error_code: VarInt) { + let max_send_data = self.max_send_data(id); + let stream = match self + .send + .get_mut(&id) + .map(get_or_insert_send(max_send_data)) + { + Some(ss) => ss, + None => return, + }; + + if stream.try_stop(error_code) { + self.events + .push_back(StreamEvent::Stopped { id, error_code }); + self.on_stream_frame(false, id); + } + } + + pub(crate) fn reset_acked(&mut self, id: StreamId) { + match self.send.entry(id) { + hash_map::Entry::Vacant(_) => {} + hash_map::Entry::Occupied(e) => { + if let Some(SendState::ResetSent) = e.get().as_ref().map(|s| s.state) { + e.remove_entry(); + self.stream_freed(id, StreamHalf::Send); + } + } + } + } + + /// Whether any stream data is queued, regardless of control frames + pub(crate) fn can_send_stream_data(&self) -> bool { + // Reset streams may linger in the pending stream list, but will never produce stream frames + self.pending.iter().any(|stream| { + self.send + .get(&stream.id) + .and_then(|s| s.as_ref()) + .is_some_and(|s| !s.is_reset()) + }) + } + + /// Whether MAX_STREAM_DATA frames could be sent for stream `id` + pub(crate) fn can_send_flow_control(&self, id: StreamId) -> bool { + self.recv + .get(&id) + .and_then(|s| s.as_ref()) + .and_then(|s| s.as_open_recv()) + .is_some_and(|s| s.can_send_flow_control()) + } + + pub(in crate::connection) fn write_control_frames( + &mut self, + buf: &mut Vec, + pending: &mut Retransmits, + retransmits: &mut ThinRetransmits, + stats: &mut FrameStats, + max_size: usize, + ) { + // RESET_STREAM + while buf.len() + frame::ResetStream::SIZE_BOUND < max_size { + let (id, error_code) = match pending.reset_stream.pop() { + Some(x) => x, + None => break, + }; + let stream = match self.send.get_mut(&id).and_then(|s| s.as_mut()) { + Some(x) => x, + None => continue, + }; + trace!(stream = %id, "RESET_STREAM"); + retransmits + .get_or_create() + .reset_stream + .push((id, error_code)); + frame::ResetStream { + id, + error_code, + final_offset: VarInt::try_from(stream.offset()).expect("impossibly large offset"), + } + .encode(buf); + stats.reset_stream += 1; + } + + // STOP_SENDING + while buf.len() + frame::StopSending::SIZE_BOUND < max_size { + let frame = match pending.stop_sending.pop() { + Some(x) => x, + None => break, + }; + // We may need to transmit STOP_SENDING even for streams whose state we have discarded, + // because we are able to discard local state for stopped streams immediately upon + // receiving FIN, even if the peer still has arbitrarily large amounts of data to + // (re)transmit due to loss or unconventional sending strategy. We could fine-tune this + // a little by dropping the frame if we specifically know the stream's been reset by the + // peer, but we discard that information as soon as the application consumes it, so it + // can't be relied upon regardless. + trace!(stream = %frame.id, "STOP_SENDING"); + frame.encode(buf); + retransmits.get_or_create().stop_sending.push(frame); + stats.stop_sending += 1; + } + + // MAX_DATA + if pending.max_data && buf.len() + 9 < max_size { + pending.max_data = false; + + // `local_max_data` can grow bigger than `VarInt`. + // For transmission inside QUIC frames we need to clamp it to the + // maximum allowed `VarInt` size. + let max = VarInt::try_from(self.local_max_data).unwrap_or(VarInt::MAX); + + trace!(value = max.into_inner(), "MAX_DATA"); + if max > self.sent_max_data { + // Record that a `MAX_DATA` announcing a certain window was sent. This will + // suppress enqueuing further `MAX_DATA` frames unless either the previous + // transmission was not acknowledged or the window further increased. + self.sent_max_data = max; + } + + retransmits.get_or_create().max_data = true; + buf.write(frame::FrameType::MAX_DATA); + buf.write(max); + stats.max_data += 1; + } + + // MAX_STREAM_DATA + while buf.len() + 17 < max_size { + let id = match pending.max_stream_data.iter().next() { + Some(x) => *x, + None => break, + }; + pending.max_stream_data.remove(&id); + let rs = match self + .recv + .get_mut(&id) + .and_then(|s| s.as_mut()) + .and_then(|s| s.as_open_recv_mut()) + { + Some(x) => x, + None => continue, + }; + if !rs.can_send_flow_control() { + continue; + } + retransmits.get_or_create().max_stream_data.insert(id); + + let (max, _) = rs.max_stream_data(self.stream_receive_window); + rs.record_sent_max_stream_data(max); + + trace!(stream = %id, max = max, "MAX_STREAM_DATA"); + buf.write(frame::FrameType::MAX_STREAM_DATA); + buf.write(id); + buf.write_var(max); + stats.max_stream_data += 1; + } + + // MAX_STREAMS + for dir in Dir::iter() { + if !pending.max_stream_id[dir as usize] || buf.len() + 9 >= max_size { + continue; + } + + pending.max_stream_id[dir as usize] = false; + retransmits.get_or_create().max_stream_id[dir as usize] = true; + self.sent_max_remote[dir as usize] = self.max_remote[dir as usize]; + trace!( + value = self.max_remote[dir as usize], + "MAX_STREAMS ({:?})", dir + ); + buf.write(match dir { + Dir::Uni => frame::FrameType::MAX_STREAMS_UNI, + Dir::Bi => frame::FrameType::MAX_STREAMS_BIDI, + }); + buf.write_var(self.max_remote[dir as usize]); + match dir { + Dir::Uni => stats.max_streams_uni += 1, + Dir::Bi => stats.max_streams_bidi += 1, + } + } + } + + pub(crate) fn write_stream_frames( + &mut self, + buf: &mut Vec, + max_buf_size: usize, + fair: bool, + ) -> StreamMetaVec { + let mut stream_frames = StreamMetaVec::new(); + while buf.len() + frame::Stream::SIZE_BOUND < max_buf_size { + if max_buf_size + .checked_sub(buf.len() + frame::Stream::SIZE_BOUND) + .is_none() + { + break; + } + + // Pop the stream of the highest priority that currently has pending data + // If the stream still has some pending data left after writing, it will be reinserted, otherwise not + let Some(stream) = self.pending.pop() else { + break; + }; + + let id = stream.id; + + let stream = match self.send.get_mut(&id).and_then(|s| s.as_mut()) { + Some(s) => s, + // Stream was reset with pending data and the reset was acknowledged + None => continue, + }; + + // Reset streams aren't removed from the pending list and still exist while the peer + // hasn't acknowledged the reset, but should not generate STREAM frames, so we need to + // check for them explicitly. + if stream.is_reset() { + continue; + } + + // Now that we know the `StreamId`, we can better account for how many bytes + // are required to encode it. + let max_buf_size = max_buf_size - buf.len() - 1 - VarInt::size(id.into()); + let (offsets, encode_length) = stream.pending.poll_transmit(max_buf_size); + let fin = offsets.end == stream.pending.offset() + && matches!(stream.state, SendState::DataSent { .. }); + if fin { + stream.fin_pending = false; + } + + if stream.is_pending() { + // If the stream still has pending data, reinsert it, possibly with an updated priority value + // Fairness with other streams is achieved by implementing round-robin scheduling, + // so that the other streams will have a chance to write data + // before we touch this stream again. + if fair { + self.pending.push_pending(id, stream.priority); + } else { + self.pending.reinsert_pending(id, stream.priority); + } + } + + let meta = frame::StreamMeta { id, offsets, fin }; + trace!(id = %meta.id, off = meta.offsets.start, len = meta.offsets.end - meta.offsets.start, fin = meta.fin, "STREAM"); + meta.encode(encode_length, buf); + + // The range might not be retrievable in a single `get` if it is + // stored in noncontiguous fashion. Therefore this loop iterates + // until the range is fully copied into the frame. + let mut offsets = meta.offsets.clone(); + while offsets.start != offsets.end { + let data = stream.pending.get(offsets.clone()); + offsets.start += data.len() as u64; + buf.put_slice(data); + } + stream_frames.push(meta); + } + + stream_frames + } + + /// Notify the application that new streams were opened or a stream became readable. + fn on_stream_frame(&mut self, notify_readable: bool, stream: StreamId) { + if stream.initiator() == self.side { + // Notifying about the opening of locally-initiated streams would be redundant. + if notify_readable { + self.events.push_back(StreamEvent::Readable { id: stream }); + } + return; + } + let next = &mut self.next_remote[stream.dir() as usize]; + if stream.index() >= *next { + *next = stream.index() + 1; + self.opened[stream.dir() as usize] = true; + } else if notify_readable { + self.events.push_back(StreamEvent::Readable { id: stream }); + } + } + + pub(crate) fn received_ack_of(&mut self, frame: frame::StreamMeta) { + let mut entry = match self.send.entry(frame.id) { + hash_map::Entry::Vacant(_) => return, + hash_map::Entry::Occupied(e) => e, + }; + + let stream = match entry.get_mut().as_mut() { + Some(s) => s, + None => { + // Because we only call this after sending data on this stream, + // this closure should be unreachable. If we did somehow screw that up, + // then we might hit an underflow below with unpredictable effects down + // the line. Best to short-circuit. + return; + } + }; + + if stream.is_reset() { + // We account for outstanding data on reset streams at time of reset + return; + } + let id = frame.id; + self.unacked_data -= frame.offsets.end - frame.offsets.start; + if !stream.ack(frame) { + // The stream is unfinished or may still need retransmits + return; + } + + entry.remove_entry(); + self.stream_freed(id, StreamHalf::Send); + self.events.push_back(StreamEvent::Finished { id }); + } + + pub(crate) fn retransmit(&mut self, frame: frame::StreamMeta) { + let stream = match self.send.get_mut(&frame.id).and_then(|s| s.as_mut()) { + // Loss of data on a closed stream is a noop + None => return, + Some(x) => x, + }; + if !stream.is_pending() { + self.pending.push_pending(frame.id, stream.priority); + } + stream.fin_pending |= frame.fin; + stream.pending.retransmit(frame.offsets); + } + + pub(crate) fn retransmit_all_for_0rtt(&mut self) { + for dir in Dir::iter() { + for index in 0..self.next[dir as usize] { + let id = StreamId::new(Side::Client, dir, index); + let stream = match self.send.get_mut(&id).and_then(|s| s.as_mut()) { + Some(stream) => stream, + None => continue, + }; + if stream.pending.is_fully_acked() && !stream.fin_pending { + // Stream data can't be acked in 0-RTT, so we must not have sent anything on + // this stream + continue; + } + if !stream.is_pending() { + self.pending.push_pending(id, stream.priority); + } + stream.pending.retransmit_all_for_0rtt(); + } + } + } + + pub(crate) fn received_max_streams( + &mut self, + dir: Dir, + count: u64, + ) -> Result<(), TransportError> { + if count > MAX_STREAM_COUNT { + return Err(TransportError::FRAME_ENCODING_ERROR( + "unrepresentable stream limit", + )); + } + + let current = &mut self.max[dir as usize]; + if count > *current { + *current = count; + self.events.push_back(StreamEvent::Available { dir }); + } + + Ok(()) + } + + /// Handle increase to connection-level flow control limit + pub(crate) fn received_max_data(&mut self, n: VarInt) { + self.max_data = self.max_data.max(n.into()); + } + + pub(crate) fn received_max_stream_data( + &mut self, + id: StreamId, + offset: u64, + ) -> Result<(), TransportError> { + if id.initiator() != self.side && id.dir() == Dir::Uni { + debug!("got MAX_STREAM_DATA on recv-only {}", id); + return Err(TransportError::STREAM_STATE_ERROR( + "MAX_STREAM_DATA on recv-only stream", + )); + } + + let write_limit = self.write_limit(); + let max_send_data = self.max_send_data(id); + if let Some(ss) = self + .send + .get_mut(&id) + .map(get_or_insert_send(max_send_data)) + { + if ss.increase_max_data(offset) { + if write_limit > 0 { + self.events.push_back(StreamEvent::Writable { id }); + } else if !ss.connection_blocked { + // The stream is still blocked on the connection flow control + // window. In order to get unblocked when the window relaxes + // it needs to be in the connection blocked list. + ss.connection_blocked = true; + self.connection_blocked.push(id); + } + } + } else if id.initiator() == self.side && self.is_local_unopened(id) { + debug!("got MAX_STREAM_DATA on unopened {}", id); + return Err(TransportError::STREAM_STATE_ERROR( + "MAX_STREAM_DATA on unopened stream", + )); + } + + self.on_stream_frame(false, id); + Ok(()) + } + + /// Returns the maximum amount of data this is allowed to be written on the connection + pub(crate) fn write_limit(&self) -> u64 { + (self.max_data - self.data_sent) + // `send_window` can be set after construction to something *less* than `unacked_data` + .min(self.send_window.saturating_sub(self.unacked_data)) + } + + /// Yield stream events + pub(crate) fn poll(&mut self) -> Option { + if let Some(dir) = Dir::iter().find(|&i| mem::replace(&mut self.opened[i as usize], false)) + { + return Some(StreamEvent::Opened { dir }); + } + + if self.write_limit() > 0 { + while let Some(id) = self.connection_blocked.pop() { + let stream = match self.send.get_mut(&id).and_then(|s| s.as_mut()) { + None => continue, + Some(s) => s, + }; + + debug_assert!(stream.connection_blocked); + stream.connection_blocked = false; + + // If it's no longer sensible to write to a stream (even to detect an error) then don't + // report it. + if stream.is_writable() && stream.max_data > stream.offset() { + return Some(StreamEvent::Writable { id }); + } + } + } + + self.events.pop_front() + } + + /// Queues MAX_STREAM_ID frames in `pending` if needed + /// + /// Returns whether any frames were queued. + pub(crate) fn queue_max_stream_id(&mut self, pending: &mut Retransmits) -> bool { + let mut queued = false; + for dir in Dir::iter() { + let diff = self.max_remote[dir as usize] - self.sent_max_remote[dir as usize]; + // To reduce traffic, only announce updates if at least 1/8 of the flow control window + // has been consumed. + if diff > self.max_concurrent_remote_count[dir as usize] / 8 { + pending.max_stream_id[dir as usize] = true; + queued = true; + } + } + queued + } + + /// Check for errors entailed by the peer's use of `id` as a send stream + fn validate_receive_id(&mut self, id: StreamId) -> Result<(), TransportError> { + if self.side == id.initiator() { + match id.dir() { + Dir::Uni => { + return Err(TransportError::STREAM_STATE_ERROR( + "illegal operation on send-only stream", + )); + } + Dir::Bi if id.index() >= self.next[Dir::Bi as usize] => { + return Err(TransportError::STREAM_STATE_ERROR( + "operation on unopened stream", + )); + } + Dir::Bi => {} + }; + } else { + let limit = self.max_remote[id.dir() as usize]; + if id.index() >= limit { + return Err(TransportError::STREAM_LIMIT_ERROR("")); + } + } + Ok(()) + } + + /// Whether a locally initiated stream has never been open + pub(crate) fn is_local_unopened(&self, id: StreamId) -> bool { + id.index() >= self.next[id.dir() as usize] + } + + pub(crate) fn set_max_concurrent(&mut self, dir: Dir, count: VarInt) { + self.flow_control_adjusted = true; + self.max_concurrent_remote_count[dir as usize] = count.into(); + self.ensure_remote_streams(dir); + } + + pub(crate) fn max_concurrent(&self, dir: Dir) -> u64 { + self.allocated_remote_count[dir as usize] + } + + pub(crate) fn set_send_window(&mut self, send_window: u64) { + self.send_window = send_window; + } + + /// Set the receive_window and returns whether the receive_window has been + /// expanded or shrunk: true if expanded, false if shrunk. + pub(crate) fn set_receive_window(&mut self, receive_window: VarInt) -> bool { + let receive_window = receive_window.into(); + let mut expanded = false; + if receive_window > self.receive_window { + self.local_max_data = self + .local_max_data + .saturating_add(receive_window - self.receive_window); + expanded = true; + } else { + let diff = self.receive_window - receive_window; + self.receive_window_shrink_debt = self.receive_window_shrink_debt.saturating_add(diff); + } + self.receive_window = receive_window; + expanded + } + + pub(super) fn insert(&mut self, remote: bool, id: StreamId) { + let bi = id.dir() == Dir::Bi; + // bidirectional OR (unidirectional AND NOT remote) + if bi || !remote { + assert!(self.send.insert(id, None).is_none()); + } + // bidirectional OR (unidirectional AND remote) + if bi || remote { + let recv = self.free_recv.pop(); + assert!(self.recv.insert(id, recv).is_none()); + } + } + + /// Adds credits to the connection flow control window + /// + /// Returns whether a `MAX_DATA` frame should be enqueued as soon as possible. + /// This will only be the case if the window update would is significant + /// enough. As soon as a window update with a `MAX_DATA` frame has been + /// queued, the [`Recv::record_sent_max_stream_data`] function should be called to + /// suppress sending further updates until the window increases significantly + /// again. + pub(super) fn add_read_credits(&mut self, credits: u64) -> ShouldTransmit { + if credits > self.receive_window_shrink_debt { + let net_credits = credits - self.receive_window_shrink_debt; + self.local_max_data = self.local_max_data.saturating_add(net_credits); + self.receive_window_shrink_debt = 0; + } else { + self.receive_window_shrink_debt -= credits; + } + + if self.local_max_data > VarInt::MAX.into_inner() { + return ShouldTransmit(false); + } + + // Only announce a window update if it's significant enough + // to make it worthwhile sending a MAX_DATA frame. + // We use a fraction of the configured connection receive window to make + // the decision, to accommodate for connection using bigger windows requiring + // less updates. + let diff = self.local_max_data - self.sent_max_data.into_inner(); + ShouldTransmit(diff >= (self.receive_window / 8)) + } + + /// Update counters for removal of a stream + pub(super) fn stream_freed(&mut self, id: StreamId, half: StreamHalf) { + if id.initiator() != self.side { + let fully_free = id.dir() == Dir::Uni + || match half { + StreamHalf::Send => !self.recv.contains_key(&id), + StreamHalf::Recv => !self.send.contains_key(&id), + }; + if fully_free { + self.allocated_remote_count[id.dir() as usize] -= 1; + self.ensure_remote_streams(id.dir()); + } + } + if half == StreamHalf::Send { + self.send_streams -= 1; + } + } + + pub(super) fn stream_recv_freed(&mut self, id: StreamId, recv: StreamRecv) { + self.free_recv.push(recv.free(self.stream_receive_window)); + self.stream_freed(id, StreamHalf::Recv); + } + + pub(super) fn max_send_data(&self, id: StreamId) -> VarInt { + let remote = self.side != id.initiator(); + match id.dir() { + Dir::Uni => self.initial_max_stream_data_uni, + // Remote/local appear reversed here because the transport parameters are named from + // the perspective of the peer. + Dir::Bi if remote => self.initial_max_stream_data_bidi_local, + Dir::Bi => self.initial_max_stream_data_bidi_remote, + } + } +} + +#[inline] +pub(super) fn get_or_insert_send( + max_data: VarInt, +) -> impl Fn(&mut Option>) -> &mut Box { + move |opt| opt.get_or_insert_with(|| Send::new(max_data)) +} + +#[inline] +pub(super) fn get_or_insert_recv( + initial_max_data: u64, +) -> impl FnMut(&mut Option) -> &mut Recv { + move |opt| { + *opt = opt.take().map(|s| match s { + StreamRecv::Free(recv) => StreamRecv::Open(recv), + s => s, + }); + opt.get_or_insert_with(|| StreamRecv::Open(Recv::new(initial_max_data))) + .as_open_recv_mut() + .unwrap() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + ReadableError, RecvStream, SendStream, TransportErrorCode, WriteError, + connection::State as ConnState, connection::Streams, + }; + use bytes::Bytes; + + fn make(side: Side) -> StreamsState { + StreamsState::new( + side, + 128u32.into(), + 128u32.into(), + 1024 * 1024, + (1024 * 1024u32).into(), + (1024 * 1024u32).into(), + ) + } + + #[test] + fn trivial_flow_control() { + let mut client = StreamsState::new( + Side::Client, + 1u32.into(), + 1u32.into(), + 1024 * 1024, + (1024 * 1024u32).into(), + (1024 * 1024u32).into(), + ); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + let initial_max = client.local_max_data; + const MESSAGE_SIZE: usize = 2048; + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 0, + fin: true, + data: Bytes::from_static(&[0; MESSAGE_SIZE]), + }, + 2048 + ) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 2048); + assert_eq!(client.local_max_data - initial_max, 0); + + let mut pending = Retransmits::default(); + let mut recv = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + + let mut chunks = recv.read(true).unwrap(); + assert_eq!( + chunks.next(MESSAGE_SIZE).unwrap().unwrap().bytes.len(), + MESSAGE_SIZE + ); + assert!(chunks.next(0).unwrap().is_none()); + let should_transmit = chunks.finalize(); + assert!(should_transmit.0); + assert!(pending.max_stream_id[Dir::Uni as usize]); + assert_eq!(client.local_max_data - initial_max, MESSAGE_SIZE as u64); + } + + #[test] + fn reset_flow_control() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + let initial_max = client.local_max_data; + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 0, + fin: false, + data: Bytes::from_static(&[0; 2048]), + }, + 2048 + ) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 2048); + assert_eq!(client.local_max_data - initial_max, 0); + + let mut pending = Retransmits::default(); + let mut recv = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + + let mut chunks = recv.read(true).unwrap(); + chunks.next(1024).unwrap(); + let _ = chunks.finalize(); + assert_eq!(client.local_max_data - initial_max, 1024); + assert_eq!( + client + .received_reset(frame::ResetStream { + id, + error_code: 0u32.into(), + final_offset: 4096u32.into(), + }) + .unwrap(), + ShouldTransmit(false) + ); + + assert_eq!(client.data_recvd, 4096); + assert_eq!(client.local_max_data - initial_max, 4096); + + // Ensure reading after a reset doesn't issue redundant credit + let mut recv = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + let mut chunks = recv.read(true).unwrap(); + assert_eq!( + chunks.next(1024).unwrap_err(), + crate::ReadError::Reset(0u32.into()) + ); + let _ = chunks.finalize(); + assert_eq!(client.data_recvd, 4096); + assert_eq!(client.local_max_data - initial_max, 4096); + } + + #[test] + fn reset_after_empty_frame_flow_control() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + let initial_max = client.local_max_data; + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 4096, + fin: false, + data: Bytes::from_static(&[0; 0]), + }, + 0 + ) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 4096); + assert_eq!(client.local_max_data - initial_max, 0); + assert_eq!( + client + .received_reset(frame::ResetStream { + id, + error_code: 0u32.into(), + final_offset: 4096u32.into(), + }) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 4096); + assert_eq!(client.local_max_data - initial_max, 4096); + } + + #[test] + fn duplicate_reset_flow_control() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + assert_eq!( + client + .received_reset(frame::ResetStream { + id, + error_code: 0u32.into(), + final_offset: 4096u32.into(), + }) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 4096); + assert_eq!( + client + .received_reset(frame::ResetStream { + id, + error_code: 0u32.into(), + final_offset: 4096u32.into(), + }) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.data_recvd, 4096); + } + + #[test] + fn recv_stopped() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + let initial_max = client.local_max_data; + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 0, + fin: false, + data: Bytes::from_static(&[0; 32]), + }, + 32 + ) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.local_max_data, initial_max); + + let mut pending = Retransmits::default(); + let mut recv = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + + recv.stop(0u32.into()).unwrap(); + assert_eq!(recv.pending.stop_sending.len(), 1); + assert!(!recv.pending.max_data); + + assert!(recv.stop(0u32.into()).is_err()); + assert_eq!(recv.read(true).err(), Some(ReadableError::ClosedStream)); + assert_eq!(recv.read(false).err(), Some(ReadableError::ClosedStream)); + + assert_eq!(client.local_max_data - initial_max, 32); + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 32, + fin: true, + data: Bytes::from_static(&[0; 16]), + }, + 16 + ) + .unwrap(), + ShouldTransmit(false) + ); + assert_eq!(client.local_max_data - initial_max, 48); + assert!(!client.recv.contains_key(&id)); + } + + #[test] + fn stopped_reset() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + // Server opens stream + assert_eq!( + client + .received( + frame::Stream { + id, + offset: 0, + fin: false, + data: Bytes::from_static(&[0; 32]) + }, + 32 + ) + .unwrap(), + ShouldTransmit(false) + ); + + let mut pending = Retransmits::default(); + let mut recv = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + + recv.stop(0u32.into()).unwrap(); + assert_eq!(pending.stop_sending.len(), 1); + assert!(!pending.max_data); + + // Server complies + let prev_max = client.max_remote[Dir::Uni as usize]; + assert_eq!( + client + .received_reset(frame::ResetStream { + id, + error_code: 0u32.into(), + final_offset: 32u32.into(), + }) + .unwrap(), + ShouldTransmit(false) + ); + assert!(!client.recv.contains_key(&id), "stream state is freed"); + assert_eq!(client.max_remote[Dir::Uni as usize], prev_max + 1); + } + + #[test] + fn send_stopped() { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_uni: 1u32.into(), + initial_max_data: 42u32.into(), + initial_max_stream_data_uni: 42u32.into(), + ..TransportParameters::default() + }); + + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let id = Streams { + state: &mut server, + conn_state: &state, + } + .open(Dir::Uni) + .unwrap(); + + let mut stream = SendStream { + id, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + + let error_code = 0u32.into(); + stream.state.received_stop_sending(id, error_code); + assert!( + stream + .state + .events + .contains(&StreamEvent::Stopped { id, error_code }) + ); + stream.state.events.clear(); + + assert_eq!(stream.write(&[]), Err(WriteError::Stopped(error_code))); + + stream.reset(0u32.into()).unwrap(); + assert_eq!(stream.write(&[]), Err(WriteError::ClosedStream)); + + // A duplicate frame is a no-op + stream.state.received_stop_sending(id, error_code); + assert!(stream.state.events.is_empty()); + } + + #[test] + fn final_offset_flow_control() { + let mut client = make(Side::Client); + assert_eq!( + client + .received_reset(frame::ResetStream { + id: StreamId::new(Side::Server, Dir::Uni, 0), + error_code: 0u32.into(), + final_offset: VarInt::MAX, + }) + .unwrap_err() + .code, + TransportErrorCode::FLOW_CONTROL_ERROR + ); + } + + #[test] + fn stream_priority() { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_bidi: 3u32.into(), + initial_max_data: 10u32.into(), + initial_max_stream_data_bidi_remote: 10u32.into(), + ..TransportParameters::default() + }); + + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let mut streams = Streams { + state: &mut server, + conn_state: &state, + }; + + let id_high = streams.open(Dir::Bi).unwrap(); + let id_mid = streams.open(Dir::Bi).unwrap(); + let id_low = streams.open(Dir::Bi).unwrap(); + + let mut mid = SendStream { + id: id_mid, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + mid.write(b"mid").unwrap(); + + let mut low = SendStream { + id: id_low, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + low.set_priority(-1).unwrap(); + low.write(b"low").unwrap(); + + let mut high = SendStream { + id: id_high, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + high.set_priority(1).unwrap(); + high.write(b"high").unwrap(); + + let mut buf = Vec::with_capacity(40); + let meta = server.write_stream_frames(&mut buf, 40, true); + assert_eq!(meta[0].id, id_high); + assert_eq!(meta[1].id, id_mid); + assert_eq!(meta[2].id, id_low); + + assert!(!server.can_send_stream_data()); + assert_eq!(server.pending.len(), 0); + } + + #[test] + fn requeue_stream_priority() { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_bidi: 3u32.into(), + initial_max_data: 1000u32.into(), + initial_max_stream_data_bidi_remote: 1000u32.into(), + ..TransportParameters::default() + }); + + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let mut streams = Streams { + state: &mut server, + conn_state: &state, + }; + + let id_high = streams.open(Dir::Bi).unwrap(); + let id_mid = streams.open(Dir::Bi).unwrap(); + + let mut mid = SendStream { + id: id_mid, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + assert_eq!(mid.write(b"mid").unwrap(), 3); + assert_eq!(server.pending.len(), 1); + + let mut high = SendStream { + id: id_high, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + high.set_priority(1).unwrap(); + assert_eq!(high.write(&[0; 200]).unwrap(), 200); + assert_eq!(server.pending.len(), 2); + + // Requeue the high priority stream to lowest priority. The initial send + // still uses high priority since it's queued that way. After that it will + // switch to low priority + let mut high = SendStream { + id: id_high, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + high.set_priority(-1).unwrap(); + + let mut buf = Vec::with_capacity(1000); + let meta = server.write_stream_frames(&mut buf, 40, true); + assert_eq!(meta.len(), 1); + assert_eq!(meta[0].id, id_high); + + // After requeuing we should end up with 2 priorities - not 3 + assert_eq!(server.pending.len(), 2); + + // Send the remaining data. The initial mid priority one should go first now + let meta = server.write_stream_frames(&mut buf, 1000, true); + assert_eq!(meta.len(), 2); + assert_eq!(meta[0].id, id_mid); + assert_eq!(meta[1].id, id_high); + + assert!(!server.can_send_stream_data()); + assert_eq!(server.pending.len(), 0); + } + + #[test] + fn same_stream_priority() { + for fair in [true, false] { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_bidi: 3u32.into(), + initial_max_data: 300u32.into(), + initial_max_stream_data_bidi_remote: 300u32.into(), + ..TransportParameters::default() + }); + + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let mut streams = Streams { + state: &mut server, + conn_state: &state, + }; + + // a, b and c all have the same priority + let id_a = streams.open(Dir::Bi).unwrap(); + let id_b = streams.open(Dir::Bi).unwrap(); + let id_c = streams.open(Dir::Bi).unwrap(); + + let mut stream_a = SendStream { + id: id_a, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_a.write(&[b'a'; 100]).unwrap(); + + let mut stream_b = SendStream { + id: id_b, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_b.write(&[b'b'; 100]).unwrap(); + + let mut stream_c = SendStream { + id: id_c, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_c.write(&[b'c'; 100]).unwrap(); + + let mut metas = vec![]; + let mut buf = Vec::with_capacity(1024); + + // loop until all the streams are written + loop { + let buf_len = buf.len(); + let meta = server.write_stream_frames(&mut buf, buf_len + 40, fair); + if meta.is_empty() { + break; + } + metas.extend(meta); + } + + assert!(!server.can_send_stream_data()); + assert_eq!(server.pending.len(), 0); + + let stream_ids = metas.iter().map(|m| m.id).collect::>(); + if fair { + // When fairness is enabled, if we run out of buffer space to write out a stream, + // the stream is re-queued after all the streams with the same priority. + assert_eq!( + stream_ids, + vec![id_a, id_b, id_c, id_a, id_b, id_c, id_a, id_b, id_c] + ); + } else { + // When fairness is disabled the stream is re-queued before all the other streams + // with the same priority. + assert_eq!( + stream_ids, + vec![id_a, id_a, id_a, id_b, id_b, id_b, id_c, id_c, id_c] + ); + } + } + } + + #[test] + fn unfair_priority_bump() { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_bidi: 3u32.into(), + initial_max_data: 300u32.into(), + initial_max_stream_data_bidi_remote: 300u32.into(), + ..TransportParameters::default() + }); + + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let mut streams = Streams { + state: &mut server, + conn_state: &state, + }; + + // a, and b have the same priority, c has higher priority + let id_a = streams.open(Dir::Bi).unwrap(); + let id_b = streams.open(Dir::Bi).unwrap(); + let id_c = streams.open(Dir::Bi).unwrap(); + + let mut stream_a = SendStream { + id: id_a, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_a.write(&[b'a'; 100]).unwrap(); + + let mut stream_b = SendStream { + id: id_b, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_b.write(&[b'b'; 100]).unwrap(); + + let mut metas = vec![]; + let mut buf = Vec::with_capacity(1024); + + // Write the first chunk of stream_a + let buf_len = buf.len(); + let meta = server.write_stream_frames(&mut buf, buf_len + 40, false); + assert!(!meta.is_empty()); + metas.extend(meta); + + // Queue stream_c which has higher priority + let mut stream_c = SendStream { + id: id_c, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream_c.set_priority(1).unwrap(); + stream_c.write(&[b'b'; 100]).unwrap(); + + // loop until all the streams are written + loop { + let buf_len = buf.len(); + let meta = server.write_stream_frames(&mut buf, buf_len + 40, false); + if meta.is_empty() { + break; + } + metas.extend(meta); + } + + assert!(!server.can_send_stream_data()); + assert_eq!(server.pending.len(), 0); + + let stream_ids = metas.iter().map(|m| m.id).collect::>(); + assert_eq!( + stream_ids, + // stream_c bumps stream_b but doesn't bump stream_a which had already been partly + // written out + vec![id_a, id_a, id_a, id_c, id_c, id_c, id_b, id_b, id_b] + ); + } + + #[test] + fn stop_finished() { + let mut client = make(Side::Client); + let id = StreamId::new(Side::Server, Dir::Uni, 0); + // Server finishes stream + let _ = client + .received( + frame::Stream { + id, + offset: 0, + fin: true, + data: Bytes::from_static(&[0; 32]), + }, + 32, + ) + .unwrap(); + let mut pending = Retransmits::default(); + let mut stream = RecvStream { + id, + state: &mut client, + pending: &mut pending, + }; + stream.stop(0u32.into()).unwrap(); + assert!(client.recv.get_mut(&id).is_none(), "stream is freed"); + } + + // Verify that a stream that's been reset doesn't cause the appearance of pending data + #[test] + fn reset_stream_cannot_send() { + let mut server = make(Side::Server); + server.set_params(&TransportParameters { + initial_max_streams_uni: 1u32.into(), + initial_max_data: 42u32.into(), + initial_max_stream_data_uni: 42u32.into(), + ..TransportParameters::default() + }); + let (mut pending, state) = (Retransmits::default(), ConnState::Established); + let mut streams = Streams { + state: &mut server, + conn_state: &state, + }; + + let id = streams.open(Dir::Uni).unwrap(); + let mut stream = SendStream { + id, + state: &mut server, + pending: &mut pending, + conn_state: &state, + }; + stream.write(b"hello").unwrap(); + stream.reset(0u32.into()).unwrap(); + + assert_eq!(pending.reset_stream, &[(id, 0u32.into())]); + assert!(!server.can_send_stream_data()); + } + + #[test] + fn stream_limit_fixed() { + let mut client = make(Side::Client); + // Open streams 0-127 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 127), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + // Try to open stream 128, exceeding limit + assert_eq!( + client + .received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ) + .unwrap_err() + .code, + TransportErrorCode::STREAM_LIMIT_ERROR + ); + + // Free stream 127 + let mut pending = Retransmits::default(); + let mut stream = RecvStream { + id: StreamId::new(Side::Server, Dir::Uni, 127), + state: &mut client, + pending: &mut pending, + }; + stream.stop(0u32.into()).unwrap(); + + // Open stream 128 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + } + + #[test] + fn stream_limit_grows() { + let mut client = make(Side::Client); + // Open streams 0-127 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 127), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + // Try to open stream 128, exceeding limit + assert_eq!( + client + .received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ) + .unwrap_err() + .code, + TransportErrorCode::STREAM_LIMIT_ERROR + ); + + // Relax limit by one + client.set_max_concurrent(Dir::Uni, 129u32.into()); + + // Open stream 128 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + } + + #[test] + fn stream_limit_shrinks() { + let mut client = make(Side::Client); + // Open streams 0-127 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 127), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + + // Tighten limit by one + client.set_max_concurrent(Dir::Uni, 127u32.into()); + + // Free stream 127 + let mut pending = Retransmits::default(); + let mut stream = RecvStream { + id: StreamId::new(Side::Server, Dir::Uni, 127), + state: &mut client, + pending: &mut pending, + }; + stream.stop(0u32.into()).unwrap(); + + // Try to open stream 128, still exceeding limit + assert_eq!( + client + .received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ) + .unwrap_err() + .code, + TransportErrorCode::STREAM_LIMIT_ERROR + ); + + // Free stream 126 + assert_eq!( + client.received_reset(frame::ResetStream { + id: StreamId::new(Side::Server, Dir::Uni, 126), + error_code: 0u32.into(), + final_offset: 0u32.into(), + }), + Ok(ShouldTransmit(false)) + ); + let mut pending = Retransmits::default(); + let mut stream = RecvStream { + id: StreamId::new(Side::Server, Dir::Uni, 126), + state: &mut client, + pending: &mut pending, + }; + stream.stop(0u32.into()).unwrap(); + + // Open stream 128 + assert_eq!( + client.received( + frame::Stream { + id: StreamId::new(Side::Server, Dir::Uni, 128), + offset: 0, + fin: true, + data: Bytes::from_static(&[]), + }, + 0 + ), + Ok(ShouldTransmit(false)) + ); + } + + #[test] + fn remote_stream_capacity() { + let mut client = make(Side::Client); + for _ in 0..2 { + client.set_max_concurrent(Dir::Uni, 200u32.into()); + client.set_max_concurrent(Dir::Bi, 201u32.into()); + assert_eq!(client.recv.len(), 200 + 201); + assert_eq!(client.max_remote[Dir::Uni as usize], 200); + assert_eq!(client.max_remote[Dir::Bi as usize], 201); + } + } + + #[test] + fn expand_receive_window() { + let mut server = make(Side::Server); + let new_receive_window = 2 * server.receive_window as u32; + let expanded = server.set_receive_window(new_receive_window.into()); + assert!(expanded); + assert_eq!(server.receive_window, new_receive_window as u64); + assert_eq!(server.local_max_data, new_receive_window as u64); + assert_eq!(server.receive_window_shrink_debt, 0); + let prev_local_max_data = server.local_max_data; + + // credit, expecting all of them added to local_max_data + let credits = 1024u64; + let should_transmit = server.add_read_credits(credits); + assert_eq!(server.receive_window_shrink_debt, 0); + assert_eq!(server.local_max_data, prev_local_max_data + credits); + assert!(should_transmit.should_transmit()); + } + + #[test] + fn shrink_receive_window() { + let mut server = make(Side::Server); + let new_receive_window = server.receive_window as u32 / 2; + let prev_local_max_data = server.local_max_data; + + // shrink the receive_winbow, local_max_data is not expected to be changed + let shrink_diff = server.receive_window - new_receive_window as u64; + let expanded = server.set_receive_window(new_receive_window.into()); + assert!(!expanded); + assert_eq!(server.receive_window, new_receive_window as u64); + assert_eq!(server.local_max_data, prev_local_max_data); + assert_eq!(server.receive_window_shrink_debt, shrink_diff); + let prev_local_max_data = server.local_max_data; + + // credit twice, local_max_data does not change as it is absorbed by receive_window_shrink_debt + let credits = 1024u64; + for _ in 0..2 { + let expected_receive_window_shrink_debt = server.receive_window_shrink_debt - credits; + let should_transmit = server.add_read_credits(credits); + assert_eq!( + server.receive_window_shrink_debt, + expected_receive_window_shrink_debt + ); + assert_eq!(server.local_max_data, prev_local_max_data); + assert!(!should_transmit.should_transmit()); + } + + // credit again which exceeds all remaining expected_receive_window_shrink_debt + let credits = 1024 * 512; + let prev_local_max_data = server.local_max_data; + let expected_local_max_data = + server.local_max_data + (credits - server.receive_window_shrink_debt); + let _should_transmit = server.add_read_credits(credits); + assert_eq!(server.receive_window_shrink_debt, 0); + assert_eq!(server.local_max_data, expected_local_max_data); + assert!(server.local_max_data > prev_local_max_data); + + // credit again, all should be added to local_max_data + let credits = 1024 * 512; + let expected_local_max_data = server.local_max_data + credits; + let should_transmit = server.add_read_credits(credits); + assert_eq!(server.receive_window_shrink_debt, 0); + assert_eq!(server.local_max_data, expected_local_max_data); + assert!(should_transmit.should_transmit()); + } + + #[test] + fn expand_send_window() { + let mut server = make(Side::Server); + + let initial_send_window = server.send_window; + let larger_send_window = initial_send_window * 2; + + // Set `initial_max_data` larger than `send_window` so we're limited by local flow control + server.set_params(&TransportParameters { + initial_max_data: VarInt::MAX, + initial_max_stream_data_uni: VarInt::MAX, + initial_max_streams_uni: VarInt::from_u32(100), + ..TransportParameters::default() + }); + + assert_eq!(server.write_limit(), initial_send_window); + assert_eq!(server.poll(), None); + + let mut retransmits = Retransmits::default(); + let conn_state = ConnState::Established; + + let stream_id = Streams { + state: &mut server, + conn_state: &conn_state, + } + .open(Dir::Uni) + .expect("should be able to open a stream"); + + let mut stream = SendStream { + id: stream_id, + state: &mut server, + pending: &mut retransmits, + conn_state: &conn_state, + }; + + // Check that the stream accepts `initial_send_window` bytes + let initial_send_len = initial_send_window as usize; + let data = vec![0xFFu8; initial_send_len]; + + assert_eq!(stream.write(&data), Ok(initial_send_len)); + + // Try to write the same data again, observe that it's blocked + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + + // Check that we get a `Writable` event after increasing the send window + stream.state.set_send_window(larger_send_window); + assert_eq!( + stream.state.poll(), + Some(StreamEvent::Writable { id: stream_id }) + ); + + // Check that the stream accepts the exact same amount of data again + assert_eq!(stream.write(&data), Ok(initial_send_len)); + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + + assert_eq!(stream.state.poll(), None); + + // Ack the data + stream.state.received_ack_of(frame::StreamMeta { + id: stream_id, + offsets: 0..larger_send_window, + fin: false, + }); + + assert_eq!( + stream.state.poll(), + Some(StreamEvent::Writable { id: stream_id }) + ); + + // Check that our full send window is available again + assert_eq!(stream.write(&data), Ok(initial_send_len)); + assert_eq!(stream.write(&data), Ok(initial_send_len)); + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + } + + #[test] + fn shrink_send_window() { + let mut server = make(Side::Server); + + let initial_send_window = server.send_window; + let smaller_send_window = server.send_window / 2; + + // Set `initial_max_data` larger than `send_window` so we're limited by local flow control + server.set_params(&TransportParameters { + initial_max_data: VarInt::MAX, + initial_max_stream_data_uni: VarInt::MAX, + initial_max_streams_uni: VarInt::from_u32(100), + ..TransportParameters::default() + }); + + assert_eq!(server.write_limit(), initial_send_window); + assert_eq!(server.poll(), None); + + let mut retransmits = Retransmits::default(); + let conn_state = ConnState::Established; + + let stream_id = Streams { + state: &mut server, + conn_state: &conn_state, + } + .open(Dir::Uni) + .expect("should be able to open a stream"); + + let mut stream = SendStream { + id: stream_id, + state: &mut server, + pending: &mut retransmits, + conn_state: &conn_state, + }; + + let initial_send_len = initial_send_window as usize; + + let data = vec![0xFFu8; initial_send_len]; + + // Assert that the full send window is accepted + assert_eq!(stream.write(&data), Ok(initial_send_len)); + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + + assert_eq!(stream.state.write_limit(), 0); + assert_eq!(stream.state.poll(), None); + + // Shrink our send window, assert that it's still not writable + stream.state.set_send_window(smaller_send_window); + assert_eq!(stream.state.write_limit(), 0); + assert_eq!(stream.state.poll(), None); + + // Assert that data is still not accepted + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + + // Ack some data, assert that writes are still not accepted due to outstanding sends + stream.state.received_ack_of(frame::StreamMeta { + id: stream_id, + offsets: 0..smaller_send_window, + fin: false, + }); + + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + + // Ack the rest of the data + stream.state.received_ack_of(frame::StreamMeta { + id: stream_id, + offsets: smaller_send_window..initial_send_window, + fin: false, + }); + + // This should generate a `Writable` event + assert_eq!( + stream.state.poll(), + Some(StreamEvent::Writable { id: stream_id }) + ); + assert_eq!(stream.state.write_limit(), smaller_send_window); + + // Assert that only `smaller_send_window` bytes are accepted + assert_eq!(stream.write(&data), Ok(smaller_send_window as usize)); + assert_eq!(stream.write(&data), Err(WriteError::Blocked)); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/timer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/timer.rs new file mode 100644 index 0000000000000000000000000000000000000000..566652d0da83cf2b15a819b58d5c966af06b1da5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/connection/timer.rs @@ -0,0 +1,65 @@ +use crate::Instant; + +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub(crate) enum Timer { + /// When to send an ack-eliciting probe packet or declare unacked packets lost + LossDetection = 0, + /// When to close the connection after no activity + Idle = 1, + /// When the close timer expires, the connection has been gracefully terminated. + Close = 2, + /// When keys are discarded because they should not be needed anymore + KeyDiscard = 3, + /// When to give up on validating a new path to the peer + PathValidation = 4, + /// When to send a `PING` frame to keep the connection alive + KeepAlive = 5, + /// When pacing will allow us to send a packet + Pacing = 6, + /// When to invalidate old CID and proactively push new one via NEW_CONNECTION_ID frame + PushNewCid = 7, + /// When to send an immediate ACK if there are unacked ack-eliciting packets of the peer + MaxAckDelay = 8, +} + +impl Timer { + pub(crate) const VALUES: [Self; 9] = [ + Self::LossDetection, + Self::Idle, + Self::Close, + Self::KeyDiscard, + Self::PathValidation, + Self::KeepAlive, + Self::Pacing, + Self::PushNewCid, + Self::MaxAckDelay, + ]; +} + +/// A table of data associated with each distinct kind of `Timer` +#[derive(Debug, Copy, Clone, Default)] +pub(crate) struct TimerTable { + data: [Option; 10], +} + +impl TimerTable { + pub(super) fn set(&mut self, timer: Timer, time: Instant) { + self.data[timer as usize] = Some(time); + } + + pub(super) fn get(&self, timer: Timer) -> Option { + self.data[timer as usize] + } + + pub(super) fn stop(&mut self, timer: Timer) { + self.data[timer as usize] = None; + } + + pub(super) fn next_timeout(&self) -> Option { + self.data.iter().filter_map(|&x| x).min() + } + + pub(super) fn is_expired(&self, timer: Timer, after: Instant) -> bool { + self.data[timer as usize].is_some_and(|x| x <= after) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/constant_time.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/constant_time.rs new file mode 100644 index 0000000000000000000000000000000000000000..94cf6c43f15be1aa6d2a29baa167e134e548bba7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/constant_time.rs @@ -0,0 +1,22 @@ +// This function is non-inline to prevent the optimizer from looking inside it. +#[inline(never)] +fn constant_time_ne(a: &[u8], b: &[u8]) -> u8 { + assert!(a.len() == b.len()); + + // These useless slices make the optimizer elide the bounds checks. + // See the comment in clone_from_slice() added on Rust commit 6a7bc47. + let len = a.len(); + let a = &a[..len]; + let b = &b[..len]; + + let mut tmp = 0; + for i in 0..len { + tmp |= a[i] ^ b[i]; + } + tmp // The compare with 0 must happen outside this function. +} + +/// Compares byte strings in constant time. +pub(crate) fn eq(a: &[u8], b: &[u8]) -> bool { + a.len() == b.len() && constant_time_ne(a, b) == 0 +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto.rs new file mode 100644 index 0000000000000000000000000000000000000000..aebd864d49ba23f30f0e17d8beca88e6834386a7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto.rs @@ -0,0 +1,223 @@ +//! Traits and implementations for the QUIC cryptography protocol +//! +//! The protocol logic in Quinn is contained in types that abstract over the actual +//! cryptographic protocol used. This module contains the traits used for this +//! abstraction layer as well as a single implementation of these traits that uses +//! *ring* and rustls to implement the TLS protocol support. +//! +//! Note that usage of any protocol (version) other than TLS 1.3 does not conform to any +//! published versions of the specification, and will not be supported in QUIC v1. + +use std::{any::Any, str, sync::Arc}; + +use bytes::BytesMut; + +use crate::{ + ConnectError, Side, TransportError, shared::ConnectionId, + transport_parameters::TransportParameters, +}; + +/// Cryptography interface based on *ring* +#[cfg(any(feature = "aws-lc-rs", feature = "ring"))] +pub(crate) mod ring_like; +/// TLS interface based on rustls +#[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] +pub mod rustls; + +/// A cryptographic session (commonly TLS) +pub trait Session: Send + Sync + 'static { + /// Create the initial set of keys given the client's initial destination ConnectionId + fn initial_keys(&self, dst_cid: &ConnectionId, side: Side) -> Keys; + + /// Get data negotiated during the handshake, if available + /// + /// Returns `None` until the connection emits `HandshakeDataReady`. + fn handshake_data(&self) -> Option>; + + /// Get the peer's identity, if available + fn peer_identity(&self) -> Option>; + + /// Get the 0-RTT keys if available (clients only) + /// + /// On the client side, this method can be used to see if 0-RTT key material is available + /// to start sending data before the protocol handshake has completed. + /// + /// Returns `None` if the key material is not available. This might happen if you have + /// not connected to this server before. + fn early_crypto(&self) -> Option<(Box, Box)>; + + /// If the 0-RTT-encrypted data has been accepted by the peer + fn early_data_accepted(&self) -> Option; + + /// Returns `true` until the connection is fully established. + fn is_handshaking(&self) -> bool; + + /// Read bytes of handshake data + /// + /// This should be called with the contents of `CRYPTO` frames. If it returns `Ok`, the + /// caller should call `write_handshake()` to check if the crypto protocol has anything + /// to send to the peer. This method will only return `true` the first time that + /// handshake data is available. Future calls will always return false. + /// + /// On success, returns `true` iff `self.handshake_data()` has been populated. + fn read_handshake(&mut self, buf: &[u8]) -> Result; + + /// The peer's QUIC transport parameters + /// + /// These are only available after the first flight from the peer has been received. + fn transport_parameters(&self) -> Result, TransportError>; + + /// Writes handshake bytes into the given buffer and optionally returns the negotiated keys + /// + /// When the handshake proceeds to the next phase, this method will return a new set of + /// keys to encrypt data with. + fn write_handshake(&mut self, buf: &mut Vec) -> Option; + + /// Compute keys for the next key update + fn next_1rtt_keys(&mut self) -> Option>>; + + /// Verify the integrity of a retry packet + fn is_valid_retry(&self, orig_dst_cid: &ConnectionId, header: &[u8], payload: &[u8]) -> bool; + + /// Fill `output` with `output.len()` bytes of keying material derived + /// from the [Session]'s secrets, using `label` and `context` for domain + /// separation. + /// + /// This function will fail, returning [ExportKeyingMaterialError], + /// if the requested output length is too large. + fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: &[u8], + ) -> Result<(), ExportKeyingMaterialError>; +} + +/// A pair of keys for bidirectional communication +pub struct KeyPair { + /// Key for encrypting data + pub local: T, + /// Key for decrypting data + pub remote: T, +} + +/// A complete set of keys for a certain packet space +pub struct Keys { + /// Header protection keys + pub header: KeyPair>, + /// Packet protection keys + pub packet: KeyPair>, +} + +/// Client-side configuration for the crypto protocol +pub trait ClientConfig: Send + Sync { + /// Start a client session with this configuration + fn start_session( + self: Arc, + version: u32, + server_name: &str, + params: &TransportParameters, + ) -> Result, ConnectError>; +} + +/// Server-side configuration for the crypto protocol +pub trait ServerConfig: Send + Sync { + /// Create the initial set of keys given the client's initial destination ConnectionId + fn initial_keys( + &self, + version: u32, + dst_cid: &ConnectionId, + ) -> Result; + + /// Generate the integrity tag for a retry packet + /// + /// Never called if `initial_keys` rejected `version`. + fn retry_tag(&self, version: u32, orig_dst_cid: &ConnectionId, packet: &[u8]) -> [u8; 16]; + + /// Start a server session with this configuration + /// + /// Never called if `initial_keys` rejected `version`. + fn start_session( + self: Arc, + version: u32, + params: &TransportParameters, + ) -> Box; +} + +/// Keys used to protect packet payloads +pub trait PacketKey: Send + Sync { + /// Encrypt the packet payload with the given packet number + fn encrypt(&self, packet: u64, buf: &mut [u8], header_len: usize); + /// Decrypt the packet payload with the given packet number + fn decrypt( + &self, + packet: u64, + header: &[u8], + payload: &mut BytesMut, + ) -> Result<(), CryptoError>; + /// The length of the AEAD tag appended to packets on encryption + fn tag_len(&self) -> usize; + /// Maximum number of packets that may be sent using a single key + fn confidentiality_limit(&self) -> u64; + /// Maximum number of incoming packets that may fail decryption before the connection must be + /// abandoned + fn integrity_limit(&self) -> u64; +} + +/// Keys used to protect packet headers +pub trait HeaderKey: Send + Sync { + /// Decrypt the given packet's header + fn decrypt(&self, pn_offset: usize, packet: &mut [u8]); + /// Encrypt the given packet's header + fn encrypt(&self, pn_offset: usize, packet: &mut [u8]); + /// The sample size used for this key's algorithm + fn sample_size(&self) -> usize; +} + +/// A key for signing with HMAC-based algorithms +pub trait HmacKey: Send + Sync { + /// Method for signing a message + fn sign(&self, data: &[u8], signature_out: &mut [u8]); + /// Length of `sign`'s output + fn signature_len(&self) -> usize; + /// Method for verifying a message + fn verify(&self, data: &[u8], signature: &[u8]) -> Result<(), CryptoError>; +} + +/// Error returned by [Session::export_keying_material]. +/// +/// This error occurs if the requested output length is too large. +#[derive(Debug, PartialEq, Eq)] +pub struct ExportKeyingMaterialError; + +/// A pseudo random key for HKDF +pub trait HandshakeTokenKey: Send + Sync { + /// Derive AEAD using hkdf + fn aead_from_hkdf(&self, random_bytes: &[u8]) -> Box; +} + +/// A key for sealing data with AEAD-based algorithms +pub trait AeadKey { + /// Method for sealing message `data` + fn seal(&self, data: &mut Vec, additional_data: &[u8]) -> Result<(), CryptoError>; + /// Method for opening a sealed message `data` + fn open<'a>( + &self, + data: &'a mut [u8], + additional_data: &[u8], + ) -> Result<&'a mut [u8], CryptoError>; +} + +/// Generic crypto errors +#[derive(Debug)] +pub struct CryptoError; + +/// Error indicating that the specified QUIC version is not supported +#[derive(Debug)] +pub struct UnsupportedVersion; + +impl From for ConnectError { + fn from(_: UnsupportedVersion) -> Self { + Self::UnsupportedVersion + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/ring_like.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/ring_like.rs new file mode 100644 index 0000000000000000000000000000000000000000..1b5f301bfe5f4a51aaa23df5b2142c7092b6a108 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/ring_like.rs @@ -0,0 +1,57 @@ +#[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] +use aws_lc_rs::{aead, error, hkdf, hmac}; +#[cfg(feature = "ring")] +use ring::{aead, error, hkdf, hmac}; + +use crate::crypto::{self, CryptoError}; + +impl crypto::HmacKey for hmac::Key { + fn sign(&self, data: &[u8], out: &mut [u8]) { + out.copy_from_slice(hmac::sign(self, data).as_ref()); + } + + fn signature_len(&self) -> usize { + 32 + } + + fn verify(&self, data: &[u8], signature: &[u8]) -> Result<(), CryptoError> { + Ok(hmac::verify(self, data, signature)?) + } +} + +impl crypto::HandshakeTokenKey for hkdf::Prk { + fn aead_from_hkdf(&self, random_bytes: &[u8]) -> Box { + let mut key_buffer = [0u8; 32]; + let info = [random_bytes]; + let okm = self.expand(&info, hkdf::HKDF_SHA256).unwrap(); + + okm.fill(&mut key_buffer).unwrap(); + + let key = aead::UnboundKey::new(&aead::AES_256_GCM, &key_buffer).unwrap(); + Box::new(aead::LessSafeKey::new(key)) + } +} + +impl crypto::AeadKey for aead::LessSafeKey { + fn seal(&self, data: &mut Vec, additional_data: &[u8]) -> Result<(), CryptoError> { + let aad = aead::Aad::from(additional_data); + let zero_nonce = aead::Nonce::assume_unique_for_key([0u8; 12]); + Ok(self.seal_in_place_append_tag(zero_nonce, aad, data)?) + } + + fn open<'a>( + &self, + data: &'a mut [u8], + additional_data: &[u8], + ) -> Result<&'a mut [u8], CryptoError> { + let aad = aead::Aad::from(additional_data); + let zero_nonce = aead::Nonce::assume_unique_for_key([0u8; 12]); + Ok(self.open_in_place(zero_nonce, aad, data)?) + } +} + +impl From for CryptoError { + fn from(_: error::Unspecified) -> Self { + Self + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/rustls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/rustls.rs new file mode 100644 index 0000000000000000000000000000000000000000..231025b3c0c8dc9ad5a43088f9f0f9ef7b149263 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/crypto/rustls.rs @@ -0,0 +1,645 @@ +use std::{any::Any, io, str, sync::Arc}; + +#[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] +use aws_lc_rs::aead; +use bytes::BytesMut; +#[cfg(feature = "ring")] +use ring::aead; +pub use rustls::Error; +use rustls::{ + self, CipherSuite, + client::danger::ServerCertVerifier, + pki_types::{CertificateDer, PrivateKeyDer, ServerName}, + quic::{Connection, HeaderProtectionKey, KeyChange, PacketKey, Secrets, Suite, Version}, +}; +#[cfg(feature = "platform-verifier")] +use rustls_platform_verifier::BuilderVerifierExt; + +use crate::{ + ConnectError, ConnectionId, Side, TransportError, TransportErrorCode, + crypto::{ + self, CryptoError, ExportKeyingMaterialError, HeaderKey, KeyPair, Keys, UnsupportedVersion, + }, + transport_parameters::TransportParameters, +}; + +impl From for rustls::Side { + fn from(s: Side) -> Self { + match s { + Side::Client => Self::Client, + Side::Server => Self::Server, + } + } +} + +/// A rustls TLS session +pub struct TlsSession { + version: Version, + got_handshake_data: bool, + next_secrets: Option, + inner: Connection, + suite: Suite, +} + +impl TlsSession { + fn side(&self) -> Side { + match self.inner { + Connection::Client(_) => Side::Client, + Connection::Server(_) => Side::Server, + } + } +} + +impl crypto::Session for TlsSession { + fn initial_keys(&self, dst_cid: &ConnectionId, side: Side) -> Keys { + initial_keys(self.version, *dst_cid, side, &self.suite) + } + + fn handshake_data(&self) -> Option> { + if !self.got_handshake_data { + return None; + } + Some(Box::new(HandshakeData { + protocol: self.inner.alpn_protocol().map(|x| x.into()), + server_name: match self.inner { + Connection::Client(_) => None, + Connection::Server(ref session) => session.server_name().map(|x| x.into()), + }, + })) + } + + /// For the rustls `TlsSession`, the `Any` type is `Vec` + fn peer_identity(&self) -> Option> { + self.inner.peer_certificates().map(|v| -> Box { + Box::new( + v.iter() + .map(|v| v.clone().into_owned()) + .collect::>>(), + ) + }) + } + + fn early_crypto(&self) -> Option<(Box, Box)> { + let keys = self.inner.zero_rtt_keys()?; + Some((Box::new(keys.header), Box::new(keys.packet))) + } + + fn early_data_accepted(&self) -> Option { + match self.inner { + Connection::Client(ref session) => Some(session.is_early_data_accepted()), + _ => None, + } + } + + fn is_handshaking(&self) -> bool { + self.inner.is_handshaking() + } + + fn read_handshake(&mut self, buf: &[u8]) -> Result { + self.inner.read_hs(buf).map_err(|e| { + if let Some(alert) = self.inner.alert() { + TransportError { + code: TransportErrorCode::crypto(alert.into()), + frame: None, + reason: e.to_string(), + } + } else { + TransportError::PROTOCOL_VIOLATION(format!("TLS error: {e}")) + } + })?; + if !self.got_handshake_data { + // Hack around the lack of an explicit signal from rustls to reflect ClientHello being + // ready on incoming connections, or ALPN negotiation completing on outgoing + // connections. + let have_server_name = match self.inner { + Connection::Client(_) => false, + Connection::Server(ref session) => session.server_name().is_some(), + }; + if self.inner.alpn_protocol().is_some() || have_server_name || !self.is_handshaking() { + self.got_handshake_data = true; + return Ok(true); + } + } + Ok(false) + } + + fn transport_parameters(&self) -> Result, TransportError> { + match self.inner.quic_transport_parameters() { + None => Ok(None), + Some(buf) => match TransportParameters::read(self.side(), &mut io::Cursor::new(buf)) { + Ok(params) => Ok(Some(params)), + Err(e) => Err(e.into()), + }, + } + } + + fn write_handshake(&mut self, buf: &mut Vec) -> Option { + let keys = match self.inner.write_hs(buf)? { + KeyChange::Handshake { keys } => keys, + KeyChange::OneRtt { keys, next } => { + self.next_secrets = Some(next); + keys + } + }; + + Some(Keys { + header: KeyPair { + local: Box::new(keys.local.header), + remote: Box::new(keys.remote.header), + }, + packet: KeyPair { + local: Box::new(keys.local.packet), + remote: Box::new(keys.remote.packet), + }, + }) + } + + fn next_1rtt_keys(&mut self) -> Option>> { + let secrets = self.next_secrets.as_mut()?; + let keys = secrets.next_packet_keys(); + Some(KeyPair { + local: Box::new(keys.local), + remote: Box::new(keys.remote), + }) + } + + fn is_valid_retry(&self, orig_dst_cid: &ConnectionId, header: &[u8], payload: &[u8]) -> bool { + let tag_start = match payload.len().checked_sub(16) { + Some(x) => x, + None => return false, + }; + + let mut pseudo_packet = + Vec::with_capacity(header.len() + payload.len() + orig_dst_cid.len() + 1); + pseudo_packet.push(orig_dst_cid.len() as u8); + pseudo_packet.extend_from_slice(orig_dst_cid); + pseudo_packet.extend_from_slice(header); + let tag_start = tag_start + pseudo_packet.len(); + pseudo_packet.extend_from_slice(payload); + + let (nonce, key) = match self.version { + Version::V1 => (RETRY_INTEGRITY_NONCE_V1, RETRY_INTEGRITY_KEY_V1), + Version::V1Draft => (RETRY_INTEGRITY_NONCE_DRAFT, RETRY_INTEGRITY_KEY_DRAFT), + _ => unreachable!(), + }; + + let nonce = aead::Nonce::assume_unique_for_key(nonce); + let key = aead::LessSafeKey::new(aead::UnboundKey::new(&aead::AES_128_GCM, &key).unwrap()); + + let (aad, tag) = pseudo_packet.split_at_mut(tag_start); + key.open_in_place(nonce, aead::Aad::from(aad), tag).is_ok() + } + + fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: &[u8], + ) -> Result<(), ExportKeyingMaterialError> { + self.inner + .export_keying_material(output, label, Some(context)) + .map_err(|_| ExportKeyingMaterialError)?; + Ok(()) + } +} + +const RETRY_INTEGRITY_KEY_DRAFT: [u8; 16] = [ + 0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1, +]; +const RETRY_INTEGRITY_NONCE_DRAFT: [u8; 12] = [ + 0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c, +]; + +const RETRY_INTEGRITY_KEY_V1: [u8; 16] = [ + 0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e, +]; +const RETRY_INTEGRITY_NONCE_V1: [u8; 12] = [ + 0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb, +]; + +impl crypto::HeaderKey for Box { + fn decrypt(&self, pn_offset: usize, packet: &mut [u8]) { + let (header, sample) = packet.split_at_mut(pn_offset + 4); + let (first, rest) = header.split_at_mut(1); + let pn_end = Ord::min(pn_offset + 3, rest.len()); + self.decrypt_in_place( + &sample[..self.sample_size()], + &mut first[0], + &mut rest[pn_offset - 1..pn_end], + ) + .unwrap(); + } + + fn encrypt(&self, pn_offset: usize, packet: &mut [u8]) { + let (header, sample) = packet.split_at_mut(pn_offset + 4); + let (first, rest) = header.split_at_mut(1); + let pn_end = Ord::min(pn_offset + 3, rest.len()); + self.encrypt_in_place( + &sample[..self.sample_size()], + &mut first[0], + &mut rest[pn_offset - 1..pn_end], + ) + .unwrap(); + } + + fn sample_size(&self) -> usize { + self.sample_len() + } +} + +/// Authentication data for (rustls) TLS session +pub struct HandshakeData { + /// The negotiated application protocol, if ALPN is in use + /// + /// Guaranteed to be set if a nonempty list of protocols was specified for this connection. + pub protocol: Option>, + /// The server name specified by the client, if any + /// + /// Always `None` for outgoing connections + pub server_name: Option, +} + +/// A QUIC-compatible TLS client configuration +/// +/// Quinn implicitly constructs a `QuicClientConfig` with reasonable defaults within +/// [`ClientConfig::with_root_certificates()`][root_certs] and [`ClientConfig::with_platform_verifier()`][platform]. +/// Alternatively, `QuicClientConfig`'s [`TryFrom`] implementation can be used to wrap around a +/// custom [`rustls::ClientConfig`], in which case care should be taken around certain points: +/// +/// - If `enable_early_data` is not set to true, then sending 0-RTT data will not be possible on +/// outgoing connections. +/// - The [`rustls::ClientConfig`] must have TLS 1.3 support enabled for conversion to succeed. +/// +/// The object in the `resumption` field of the inner [`rustls::ClientConfig`] determines whether +/// calling `into_0rtt` on outgoing connections returns `Ok` or `Err`. It typically allows +/// `into_0rtt` to proceed if it recognizes the server name, and defaults to an in-memory cache of +/// 256 server names. +/// +/// [root_certs]: crate::config::ClientConfig::with_root_certificates() +/// [platform]: crate::config::ClientConfig::with_platform_verifier() +pub struct QuicClientConfig { + pub(crate) inner: Arc, + initial: Suite, +} + +impl QuicClientConfig { + #[cfg(feature = "platform-verifier")] + pub(crate) fn with_platform_verifier() -> Result { + // Keep in sync with `inner()` below + let mut inner = rustls::ClientConfig::builder_with_provider(configured_provider()) + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap() // The default providers support TLS 1.3 + .with_platform_verifier()? + .with_no_client_auth(); + + inner.enable_early_data = true; + Ok(Self { + // We're confident that the *ring* default provider contains TLS13_AES_128_GCM_SHA256 + initial: initial_suite_from_provider(inner.crypto_provider()) + .expect("no initial cipher suite found"), + inner: Arc::new(inner), + }) + } + + /// Initialize a sane QUIC-compatible TLS client configuration + /// + /// QUIC requires that TLS 1.3 be enabled. Advanced users can use any [`rustls::ClientConfig`] that + /// satisfies this requirement. + pub(crate) fn new(verifier: Arc) -> Self { + let inner = Self::inner(verifier); + Self { + // We're confident that the *ring* default provider contains TLS13_AES_128_GCM_SHA256 + initial: initial_suite_from_provider(inner.crypto_provider()) + .expect("no initial cipher suite found"), + inner: Arc::new(inner), + } + } + + /// Initialize a QUIC-compatible TLS client configuration with a separate initial cipher suite + /// + /// This is useful if you want to avoid the initial cipher suite for traffic encryption. + pub fn with_initial( + inner: Arc, + initial: Suite, + ) -> Result { + match initial.suite.common.suite { + CipherSuite::TLS13_AES_128_GCM_SHA256 => Ok(Self { inner, initial }), + _ => Err(NoInitialCipherSuite { specific: true }), + } + } + + pub(crate) fn inner(verifier: Arc) -> rustls::ClientConfig { + // Keep in sync with `with_platform_verifier()` above + let mut config = rustls::ClientConfig::builder_with_provider(configured_provider()) + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap() // The default providers support TLS 1.3 + .dangerous() + .with_custom_certificate_verifier(verifier) + .with_no_client_auth(); + + config.enable_early_data = true; + config + } +} + +impl crypto::ClientConfig for QuicClientConfig { + fn start_session( + self: Arc, + version: u32, + server_name: &str, + params: &TransportParameters, + ) -> Result, ConnectError> { + let version = interpret_version(version)?; + Ok(Box::new(TlsSession { + version, + got_handshake_data: false, + next_secrets: None, + inner: rustls::quic::Connection::Client( + rustls::quic::ClientConnection::new( + self.inner.clone(), + version, + ServerName::try_from(server_name) + .map_err(|_| ConnectError::InvalidServerName(server_name.into()))? + .to_owned(), + to_vec(params), + ) + .unwrap(), + ), + suite: self.initial, + })) + } +} + +impl TryFrom for QuicClientConfig { + type Error = NoInitialCipherSuite; + + fn try_from(inner: rustls::ClientConfig) -> Result { + Arc::new(inner).try_into() + } +} + +impl TryFrom> for QuicClientConfig { + type Error = NoInitialCipherSuite; + + fn try_from(inner: Arc) -> Result { + Ok(Self { + initial: initial_suite_from_provider(inner.crypto_provider()) + .ok_or(NoInitialCipherSuite { specific: false })?, + inner, + }) + } +} + +/// The initial cipher suite (AES-128-GCM-SHA256) is not available +/// +/// When the cipher suite is supplied `with_initial()`, it must be +/// [`CipherSuite::TLS13_AES_128_GCM_SHA256`]. When the cipher suite is derived from a config's +/// [`CryptoProvider`][provider], that provider must reference a cipher suite with the same ID. +/// +/// [provider]: rustls::crypto::CryptoProvider +#[derive(Clone, Debug)] +pub struct NoInitialCipherSuite { + /// Whether the initial cipher suite was supplied by the caller + specific: bool, +} + +impl std::fmt::Display for NoInitialCipherSuite { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str(match self.specific { + true => "invalid cipher suite specified", + false => "no initial cipher suite found", + }) + } +} + +impl std::error::Error for NoInitialCipherSuite {} + +/// A QUIC-compatible TLS server configuration +/// +/// Quinn implicitly constructs a `QuicServerConfig` with reasonable defaults within +/// [`ServerConfig::with_single_cert()`][single]. Alternatively, `QuicServerConfig`'s [`TryFrom`] +/// implementation or `with_initial` method can be used to wrap around a custom +/// [`rustls::ServerConfig`], in which case care should be taken around certain points: +/// +/// - If `max_early_data_size` is not set to `u32::MAX`, the server will not be able to accept +/// incoming 0-RTT data. QUIC prohibits `max_early_data_size` values other than 0 or `u32::MAX`. +/// - The `rustls::ServerConfig` must have TLS 1.3 support enabled for conversion to succeed. +/// +/// [single]: crate::config::ServerConfig::with_single_cert() +pub struct QuicServerConfig { + inner: Arc, + initial: Suite, +} + +impl QuicServerConfig { + pub(crate) fn new( + cert_chain: Vec>, + key: PrivateKeyDer<'static>, + ) -> Result { + let inner = Self::inner(cert_chain, key)?; + Ok(Self { + // We're confident that the *ring* default provider contains TLS13_AES_128_GCM_SHA256 + initial: initial_suite_from_provider(inner.crypto_provider()) + .expect("no initial cipher suite found"), + inner: Arc::new(inner), + }) + } + + /// Initialize a QUIC-compatible TLS client configuration with a separate initial cipher suite + /// + /// This is useful if you want to avoid the initial cipher suite for traffic encryption. + pub fn with_initial( + inner: Arc, + initial: Suite, + ) -> Result { + match initial.suite.common.suite { + CipherSuite::TLS13_AES_128_GCM_SHA256 => Ok(Self { inner, initial }), + _ => Err(NoInitialCipherSuite { specific: true }), + } + } + + /// Initialize a sane QUIC-compatible TLS server configuration + /// + /// QUIC requires that TLS 1.3 be enabled, and that the maximum early data size is either 0 or + /// `u32::MAX`. Advanced users can use any [`rustls::ServerConfig`] that satisfies these + /// requirements. + pub(crate) fn inner( + cert_chain: Vec>, + key: PrivateKeyDer<'static>, + ) -> Result { + let mut inner = rustls::ServerConfig::builder_with_provider(configured_provider()) + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap() // The *ring* default provider supports TLS 1.3 + .with_no_client_auth() + .with_single_cert(cert_chain, key)?; + + inner.max_early_data_size = u32::MAX; + Ok(inner) + } +} + +impl TryFrom for QuicServerConfig { + type Error = NoInitialCipherSuite; + + fn try_from(inner: rustls::ServerConfig) -> Result { + Arc::new(inner).try_into() + } +} + +impl TryFrom> for QuicServerConfig { + type Error = NoInitialCipherSuite; + + fn try_from(inner: Arc) -> Result { + Ok(Self { + initial: initial_suite_from_provider(inner.crypto_provider()) + .ok_or(NoInitialCipherSuite { specific: false })?, + inner, + }) + } +} + +impl crypto::ServerConfig for QuicServerConfig { + fn start_session( + self: Arc, + version: u32, + params: &TransportParameters, + ) -> Box { + // Safe: `start_session()` is never called if `initial_keys()` rejected `version` + let version = interpret_version(version).unwrap(); + Box::new(TlsSession { + version, + got_handshake_data: false, + next_secrets: None, + inner: rustls::quic::Connection::Server( + rustls::quic::ServerConnection::new(self.inner.clone(), version, to_vec(params)) + .unwrap(), + ), + suite: self.initial, + }) + } + + fn initial_keys( + &self, + version: u32, + dst_cid: &ConnectionId, + ) -> Result { + let version = interpret_version(version)?; + Ok(initial_keys(version, *dst_cid, Side::Server, &self.initial)) + } + + fn retry_tag(&self, version: u32, orig_dst_cid: &ConnectionId, packet: &[u8]) -> [u8; 16] { + // Safe: `start_session()` is never called if `initial_keys()` rejected `version` + let version = interpret_version(version).unwrap(); + let (nonce, key) = match version { + Version::V1 => (RETRY_INTEGRITY_NONCE_V1, RETRY_INTEGRITY_KEY_V1), + Version::V1Draft => (RETRY_INTEGRITY_NONCE_DRAFT, RETRY_INTEGRITY_KEY_DRAFT), + _ => unreachable!(), + }; + + let mut pseudo_packet = Vec::with_capacity(packet.len() + orig_dst_cid.len() + 1); + pseudo_packet.push(orig_dst_cid.len() as u8); + pseudo_packet.extend_from_slice(orig_dst_cid); + pseudo_packet.extend_from_slice(packet); + + let nonce = aead::Nonce::assume_unique_for_key(nonce); + let key = aead::LessSafeKey::new(aead::UnboundKey::new(&aead::AES_128_GCM, &key).unwrap()); + + let tag = key + .seal_in_place_separate_tag(nonce, aead::Aad::from(pseudo_packet), &mut []) + .unwrap(); + let mut result = [0; 16]; + result.copy_from_slice(tag.as_ref()); + result + } +} + +pub(crate) fn initial_suite_from_provider( + provider: &Arc, +) -> Option { + provider + .cipher_suites + .iter() + .find_map(|cs| match (cs.suite(), cs.tls13()) { + (rustls::CipherSuite::TLS13_AES_128_GCM_SHA256, Some(suite)) => { + Some(suite.quic_suite()) + } + _ => None, + }) + .flatten() +} + +pub(crate) fn configured_provider() -> Arc { + #[cfg(all(feature = "rustls-aws-lc-rs", not(feature = "rustls-ring")))] + let provider = rustls::crypto::aws_lc_rs::default_provider(); + #[cfg(feature = "rustls-ring")] + let provider = rustls::crypto::ring::default_provider(); + Arc::new(provider) +} + +fn to_vec(params: &TransportParameters) -> Vec { + let mut bytes = Vec::new(); + params.write(&mut bytes); + bytes +} + +pub(crate) fn initial_keys( + version: Version, + dst_cid: ConnectionId, + side: Side, + suite: &Suite, +) -> Keys { + let keys = suite.keys(&dst_cid, side.into(), version); + Keys { + header: KeyPair { + local: Box::new(keys.local.header), + remote: Box::new(keys.remote.header), + }, + packet: KeyPair { + local: Box::new(keys.local.packet), + remote: Box::new(keys.remote.packet), + }, + } +} + +impl crypto::PacketKey for Box { + fn encrypt(&self, packet: u64, buf: &mut [u8], header_len: usize) { + let (header, payload_tag) = buf.split_at_mut(header_len); + let (payload, tag_storage) = payload_tag.split_at_mut(payload_tag.len() - self.tag_len()); + let tag = self.encrypt_in_place(packet, &*header, payload).unwrap(); + tag_storage.copy_from_slice(tag.as_ref()); + } + + fn decrypt( + &self, + packet: u64, + header: &[u8], + payload: &mut BytesMut, + ) -> Result<(), CryptoError> { + let plain = self + .decrypt_in_place(packet, header, payload.as_mut()) + .map_err(|_| CryptoError)?; + let plain_len = plain.len(); + payload.truncate(plain_len); + Ok(()) + } + + fn tag_len(&self) -> usize { + (**self).tag_len() + } + + fn confidentiality_limit(&self) -> u64 { + (**self).confidentiality_limit() + } + + fn integrity_limit(&self) -> u64 { + (**self).integrity_limit() + } +} + +fn interpret_version(version: u32) -> Result { + match version { + 0xff00_001d..=0xff00_0020 => Ok(Version::V1Draft), + 0x0000_0001 | 0xff00_0021..=0xff00_0022 => Ok(Version::V1), + _ => Err(UnsupportedVersion), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/endpoint.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/endpoint.rs new file mode 100644 index 0000000000000000000000000000000000000000..c04694577f175a5b87e346e1db2cb933d0487162 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/endpoint.rs @@ -0,0 +1,1331 @@ +use std::{ + collections::{HashMap, hash_map}, + convert::TryFrom, + fmt, mem, + net::{IpAddr, SocketAddr}, + ops::{Index, IndexMut}, + sync::Arc, +}; + +use bytes::{BufMut, Bytes, BytesMut}; +use rand::{Rng, RngCore, SeedableRng, rngs::StdRng}; +use rustc_hash::FxHashMap; +use slab::Slab; +use thiserror::Error; +use tracing::{debug, error, trace, warn}; + +use crate::{ + Duration, INITIAL_MTU, Instant, MAX_CID_SIZE, MIN_INITIAL_SIZE, RESET_TOKEN_SIZE, ResetToken, + Side, Transmit, TransportConfig, TransportError, + cid_generator::ConnectionIdGenerator, + coding::BufMutExt, + config::{ClientConfig, EndpointConfig, ServerConfig}, + connection::{Connection, ConnectionError, SideArgs}, + crypto::{self, Keys, UnsupportedVersion}, + frame, + packet::{ + FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, PacketDecodeError, + PacketNumber, PartialDecode, ProtectedInitialHeader, + }, + shared::{ + ConnectionEvent, ConnectionEventInner, ConnectionId, DatagramConnectionEvent, EcnCodepoint, + EndpointEvent, EndpointEventInner, IssuedCid, + }, + token::{IncomingToken, InvalidRetryTokenError, Token, TokenPayload}, + transport_parameters::{PreferredAddress, TransportParameters}, +}; + +/// The main entry point to the library +/// +/// This object performs no I/O whatsoever. Instead, it consumes incoming packets and +/// connection-generated events via `handle` and `handle_event`. +pub struct Endpoint { + rng: StdRng, + index: ConnectionIndex, + connections: Slab, + local_cid_generator: Box, + config: Arc, + server_config: Option>, + /// Whether the underlying UDP socket promises not to fragment packets + allow_mtud: bool, + /// Time at which a stateless reset was most recently sent + last_stateless_reset: Option, + /// Buffered Initial and 0-RTT messages for pending incoming connections + incoming_buffers: Slab, + all_incoming_buffers_total_bytes: u64, +} + +impl Endpoint { + /// Create a new endpoint + /// + /// `allow_mtud` enables path MTU detection when requested by `Connection` configuration for + /// better performance. This requires that outgoing packets are never fragmented, which can be + /// achieved via e.g. the `IPV6_DONTFRAG` socket option. + /// + /// If `rng_seed` is provided, it will be used to initialize the endpoint's rng (having priority + /// over the rng seed configured in [`EndpointConfig`]). Note that the `rng_seed` parameter will + /// be removed in a future release, so prefer setting it to `None` and configuring rng seeds + /// using [`EndpointConfig::rng_seed`]. + pub fn new( + config: Arc, + server_config: Option>, + allow_mtud: bool, + rng_seed: Option<[u8; 32]>, + ) -> Self { + let rng_seed = rng_seed.or(config.rng_seed); + Self { + rng: rng_seed.map_or(StdRng::from_os_rng(), StdRng::from_seed), + index: ConnectionIndex::default(), + connections: Slab::new(), + local_cid_generator: (config.connection_id_generator_factory.as_ref())(), + config, + server_config, + allow_mtud, + last_stateless_reset: None, + incoming_buffers: Slab::new(), + all_incoming_buffers_total_bytes: 0, + } + } + + /// Replace the server configuration, affecting new incoming connections only + pub fn set_server_config(&mut self, server_config: Option>) { + self.server_config = server_config; + } + + /// Process `EndpointEvent`s emitted from related `Connection`s + /// + /// In turn, processing this event may return a `ConnectionEvent` for the same `Connection`. + pub fn handle_event( + &mut self, + ch: ConnectionHandle, + event: EndpointEvent, + ) -> Option { + use EndpointEventInner::*; + match event.0 { + NeedIdentifiers(now, n) => { + return Some(self.send_new_identifiers(now, ch, n)); + } + ResetToken(remote, token) => { + if let Some(old) = self.connections[ch].reset_token.replace((remote, token)) { + self.index.connection_reset_tokens.remove(old.0, old.1); + } + if self.index.connection_reset_tokens.insert(remote, token, ch) { + warn!("duplicate reset token"); + } + } + RetireConnectionId(now, seq, allow_more_cids) => { + if let Some(cid) = self.connections[ch].loc_cids.remove(&seq) { + trace!("peer retired CID {}: {}", seq, cid); + self.index.retire(cid); + if allow_more_cids { + return Some(self.send_new_identifiers(now, ch, 1)); + } + } + } + Drained => { + if let Some(conn) = self.connections.try_remove(ch.0) { + self.index.remove(&conn); + } else { + // This indicates a bug in downstream code, which could cause spurious + // connection loss instead of this error if the CID was (re)allocated prior to + // the illegal call. + error!(id = ch.0, "unknown connection drained"); + } + } + } + None + } + + /// Process an incoming UDP datagram + pub fn handle( + &mut self, + now: Instant, + remote: SocketAddr, + local_ip: Option, + ecn: Option, + data: BytesMut, + buf: &mut Vec, + ) -> Option { + // Partially decode packet or short-circuit if unable + let datagram_len = data.len(); + let event = match PartialDecode::new( + data, + &FixedLengthConnectionIdParser::new(self.local_cid_generator.cid_len()), + &self.config.supported_versions, + self.config.grease_quic_bit, + ) { + Ok((first_decode, remaining)) => DatagramConnectionEvent { + now, + remote, + ecn, + first_decode, + remaining, + }, + Err(PacketDecodeError::UnsupportedVersion { + src_cid, + dst_cid, + version, + }) => { + if self.server_config.is_none() { + debug!("dropping packet with unsupported version"); + return None; + } + trace!("sending version negotiation"); + // Negotiate versions + Header::VersionNegotiate { + random: self.rng.random::() | 0x40, + src_cid: dst_cid, + dst_cid: src_cid, + } + .encode(buf); + // Grease with a reserved version + buf.write::(match version { + 0x0a1a_2a3a => 0x0a1a_2a4a, + _ => 0x0a1a_2a3a, + }); + for &version in &self.config.supported_versions { + buf.write(version); + } + return Some(DatagramEvent::Response(Transmit { + destination: remote, + ecn: None, + size: buf.len(), + segment_size: None, + src_ip: local_ip, + })); + } + Err(e) => { + trace!("malformed header: {}", e); + return None; + } + }; + + let addresses = FourTuple { remote, local_ip }; + let dst_cid = event.first_decode.dst_cid(); + + if let Some(route_to) = self.index.get(&addresses, &event.first_decode) { + // Handle packet on existing connection + match route_to { + RouteDatagramTo::Incoming(incoming_idx) => { + let incoming_buffer = &mut self.incoming_buffers[incoming_idx]; + let config = &self.server_config.as_ref().unwrap(); + + if incoming_buffer + .total_bytes + .checked_add(datagram_len as u64) + .is_some_and(|n| n <= config.incoming_buffer_size) + && self + .all_incoming_buffers_total_bytes + .checked_add(datagram_len as u64) + .is_some_and(|n| n <= config.incoming_buffer_size_total) + { + incoming_buffer.datagrams.push(event); + incoming_buffer.total_bytes += datagram_len as u64; + self.all_incoming_buffers_total_bytes += datagram_len as u64; + } + + None + } + RouteDatagramTo::Connection(ch) => Some(DatagramEvent::ConnectionEvent( + ch, + ConnectionEvent(ConnectionEventInner::Datagram(event)), + )), + } + } else if event.first_decode.initial_header().is_some() { + // Potentially create a new connection + + self.handle_first_packet(datagram_len, event, addresses, buf) + } else if event.first_decode.has_long_header() { + debug!( + "ignoring non-initial packet for unknown connection {}", + dst_cid + ); + None + } else if !event.first_decode.is_initial() + && self.local_cid_generator.validate(dst_cid).is_err() + { + debug!("dropping packet with invalid CID"); + None + } else if dst_cid.is_empty() { + trace!("dropping unrecognized short packet without ID"); + None + } else { + // If we got this far, we're receiving a seemingly valid packet for an unknown + // connection. Send a stateless reset if possible. + self.stateless_reset(now, datagram_len, addresses, *dst_cid, buf) + .map(DatagramEvent::Response) + } + } + + fn stateless_reset( + &mut self, + now: Instant, + inciting_dgram_len: usize, + addresses: FourTuple, + dst_cid: ConnectionId, + buf: &mut Vec, + ) -> Option { + if self + .last_stateless_reset + .is_some_and(|last| last + self.config.min_reset_interval > now) + { + debug!("ignoring unexpected packet within minimum stateless reset interval"); + return None; + } + + /// Minimum amount of padding for the stateless reset to look like a short-header packet + const MIN_PADDING_LEN: usize = 5; + + // Prevent amplification attacks and reset loops by ensuring we pad to at most 1 byte + // smaller than the inciting packet. + let max_padding_len = match inciting_dgram_len.checked_sub(RESET_TOKEN_SIZE) { + Some(headroom) if headroom > MIN_PADDING_LEN => headroom - 1, + _ => { + debug!( + "ignoring unexpected {} byte packet: not larger than minimum stateless reset size", + inciting_dgram_len + ); + return None; + } + }; + + debug!( + "sending stateless reset for {} to {}", + dst_cid, addresses.remote + ); + self.last_stateless_reset = Some(now); + // Resets with at least this much padding can't possibly be distinguished from real packets + const IDEAL_MIN_PADDING_LEN: usize = MIN_PADDING_LEN + MAX_CID_SIZE; + let padding_len = if max_padding_len <= IDEAL_MIN_PADDING_LEN { + max_padding_len + } else { + self.rng + .random_range(IDEAL_MIN_PADDING_LEN..max_padding_len) + }; + buf.reserve(padding_len + RESET_TOKEN_SIZE); + buf.resize(padding_len, 0); + self.rng.fill_bytes(&mut buf[0..padding_len]); + buf[0] = 0b0100_0000 | (buf[0] >> 2); + buf.extend_from_slice(&ResetToken::new(&*self.config.reset_key, dst_cid)); + + debug_assert!(buf.len() < inciting_dgram_len); + + Some(Transmit { + destination: addresses.remote, + ecn: None, + size: buf.len(), + segment_size: None, + src_ip: addresses.local_ip, + }) + } + + /// Initiate a connection + pub fn connect( + &mut self, + now: Instant, + config: ClientConfig, + remote: SocketAddr, + server_name: &str, + ) -> Result<(ConnectionHandle, Connection), ConnectError> { + if self.cids_exhausted() { + return Err(ConnectError::CidsExhausted); + } + if remote.port() == 0 || remote.ip().is_unspecified() { + return Err(ConnectError::InvalidRemoteAddress(remote)); + } + if !self.config.supported_versions.contains(&config.version) { + return Err(ConnectError::UnsupportedVersion); + } + + let remote_id = (config.initial_dst_cid_provider)(); + trace!(initial_dcid = %remote_id); + + let ch = ConnectionHandle(self.connections.vacant_key()); + let loc_cid = self.new_cid(ch); + let params = TransportParameters::new( + &config.transport, + &self.config, + self.local_cid_generator.as_ref(), + loc_cid, + None, + &mut self.rng, + ); + let tls = config + .crypto + .start_session(config.version, server_name, ¶ms)?; + + let conn = self.add_connection( + ch, + config.version, + remote_id, + loc_cid, + remote_id, + FourTuple { + remote, + local_ip: None, + }, + now, + tls, + config.transport, + SideArgs::Client { + token_store: config.token_store, + server_name: server_name.into(), + }, + ); + Ok((ch, conn)) + } + + fn send_new_identifiers( + &mut self, + now: Instant, + ch: ConnectionHandle, + num: u64, + ) -> ConnectionEvent { + let mut ids = vec![]; + for _ in 0..num { + let id = self.new_cid(ch); + let meta = &mut self.connections[ch]; + let sequence = meta.cids_issued; + meta.cids_issued += 1; + meta.loc_cids.insert(sequence, id); + ids.push(IssuedCid { + sequence, + id, + reset_token: ResetToken::new(&*self.config.reset_key, id), + }); + } + ConnectionEvent(ConnectionEventInner::NewIdentifiers(ids, now)) + } + + /// Generate a connection ID for `ch` + fn new_cid(&mut self, ch: ConnectionHandle) -> ConnectionId { + loop { + let cid = self.local_cid_generator.generate_cid(); + if cid.is_empty() { + // Zero-length CID; nothing to track + debug_assert_eq!(self.local_cid_generator.cid_len(), 0); + return cid; + } + if let hash_map::Entry::Vacant(e) = self.index.connection_ids.entry(cid) { + e.insert(ch); + break cid; + } + } + } + + fn handle_first_packet( + &mut self, + datagram_len: usize, + event: DatagramConnectionEvent, + addresses: FourTuple, + buf: &mut Vec, + ) -> Option { + let dst_cid = event.first_decode.dst_cid(); + let header = event.first_decode.initial_header().unwrap(); + + let Some(server_config) = &self.server_config else { + debug!("packet for unrecognized connection {}", dst_cid); + return self + .stateless_reset(event.now, datagram_len, addresses, *dst_cid, buf) + .map(DatagramEvent::Response); + }; + + if datagram_len < MIN_INITIAL_SIZE as usize { + debug!("ignoring short initial for connection {}", dst_cid); + return None; + } + + let crypto = match server_config.crypto.initial_keys(header.version, dst_cid) { + Ok(keys) => keys, + Err(UnsupportedVersion) => { + // This probably indicates that the user set supported_versions incorrectly in + // `EndpointConfig`. + debug!( + "ignoring initial packet version {:#x} unsupported by cryptographic layer", + header.version + ); + return None; + } + }; + + if let Err(reason) = self.early_validate_first_packet(header) { + return Some(DatagramEvent::Response(self.initial_close( + header.version, + addresses, + &crypto, + &header.src_cid, + reason, + buf, + ))); + } + + let packet = match event.first_decode.finish(Some(&*crypto.header.remote)) { + Ok(packet) => packet, + Err(e) => { + trace!("unable to decode initial packet: {}", e); + return None; + } + }; + + if !packet.reserved_bits_valid() { + debug!("dropping connection attempt with invalid reserved bits"); + return None; + } + + let Header::Initial(header) = packet.header else { + panic!("non-initial packet in handle_first_packet()"); + }; + + let server_config = self.server_config.as_ref().unwrap().clone(); + + let token = match IncomingToken::from_header(&header, &server_config, addresses.remote) { + Ok(token) => token, + Err(InvalidRetryTokenError) => { + debug!("rejecting invalid retry token"); + return Some(DatagramEvent::Response(self.initial_close( + header.version, + addresses, + &crypto, + &header.src_cid, + TransportError::INVALID_TOKEN(""), + buf, + ))); + } + }; + + let incoming_idx = self.incoming_buffers.insert(IncomingBuffer::default()); + self.index + .insert_initial_incoming(header.dst_cid, incoming_idx); + + Some(DatagramEvent::NewConnection(Incoming { + received_at: event.now, + addresses, + ecn: event.ecn, + packet: InitialPacket { + header, + header_data: packet.header_data, + payload: packet.payload, + }, + rest: event.remaining, + crypto, + token, + incoming_idx, + improper_drop_warner: IncomingImproperDropWarner, + })) + } + + /// Attempt to accept this incoming connection (an error may still occur) + // AcceptError cannot be made smaller without semver breakage + #[allow(clippy::result_large_err)] + pub fn accept( + &mut self, + mut incoming: Incoming, + now: Instant, + buf: &mut Vec, + server_config: Option>, + ) -> Result<(ConnectionHandle, Connection), AcceptError> { + let remote_address_validated = incoming.remote_address_validated(); + incoming.improper_drop_warner.dismiss(); + let incoming_buffer = self.incoming_buffers.remove(incoming.incoming_idx); + self.all_incoming_buffers_total_bytes -= incoming_buffer.total_bytes; + + let packet_number = incoming.packet.header.number.expand(0); + let InitialHeader { + src_cid, + dst_cid, + version, + .. + } = incoming.packet.header; + let server_config = + server_config.unwrap_or_else(|| self.server_config.as_ref().unwrap().clone()); + + if server_config + .transport + .max_idle_timeout + .is_some_and(|timeout| { + incoming.received_at + Duration::from_millis(timeout.into()) <= now + }) + { + debug!("abandoning accept of stale initial"); + self.index.remove_initial(dst_cid); + return Err(AcceptError { + cause: ConnectionError::TimedOut, + response: None, + }); + } + + if self.cids_exhausted() { + debug!("refusing connection"); + self.index.remove_initial(dst_cid); + return Err(AcceptError { + cause: ConnectionError::CidsExhausted, + response: Some(self.initial_close( + version, + incoming.addresses, + &incoming.crypto, + &src_cid, + TransportError::CONNECTION_REFUSED(""), + buf, + )), + }); + } + + if incoming + .crypto + .packet + .remote + .decrypt( + packet_number, + &incoming.packet.header_data, + &mut incoming.packet.payload, + ) + .is_err() + { + debug!(packet_number, "failed to authenticate initial packet"); + self.index.remove_initial(dst_cid); + return Err(AcceptError { + cause: TransportError::PROTOCOL_VIOLATION("authentication failed").into(), + response: None, + }); + }; + + let ch = ConnectionHandle(self.connections.vacant_key()); + let loc_cid = self.new_cid(ch); + let mut params = TransportParameters::new( + &server_config.transport, + &self.config, + self.local_cid_generator.as_ref(), + loc_cid, + Some(&server_config), + &mut self.rng, + ); + params.stateless_reset_token = Some(ResetToken::new(&*self.config.reset_key, loc_cid)); + params.original_dst_cid = Some(incoming.token.orig_dst_cid); + params.retry_src_cid = incoming.token.retry_src_cid; + let mut pref_addr_cid = None; + if server_config.has_preferred_address() { + let cid = self.new_cid(ch); + pref_addr_cid = Some(cid); + params.preferred_address = Some(PreferredAddress { + address_v4: server_config.preferred_address_v4, + address_v6: server_config.preferred_address_v6, + connection_id: cid, + stateless_reset_token: ResetToken::new(&*self.config.reset_key, cid), + }); + } + + let tls = server_config.crypto.clone().start_session(version, ¶ms); + let transport_config = server_config.transport.clone(); + let mut conn = self.add_connection( + ch, + version, + dst_cid, + loc_cid, + src_cid, + incoming.addresses, + incoming.received_at, + tls, + transport_config, + SideArgs::Server { + server_config, + pref_addr_cid, + path_validated: remote_address_validated, + }, + ); + self.index.insert_initial(dst_cid, ch); + + match conn.handle_first_packet( + incoming.received_at, + incoming.addresses.remote, + incoming.ecn, + packet_number, + incoming.packet, + incoming.rest, + ) { + Ok(()) => { + trace!(id = ch.0, icid = %dst_cid, "new connection"); + + for event in incoming_buffer.datagrams { + conn.handle_event(ConnectionEvent(ConnectionEventInner::Datagram(event))) + } + + Ok((ch, conn)) + } + Err(e) => { + debug!("handshake failed: {}", e); + self.handle_event(ch, EndpointEvent(EndpointEventInner::Drained)); + let response = match e { + ConnectionError::TransportError(ref e) => Some(self.initial_close( + version, + incoming.addresses, + &incoming.crypto, + &src_cid, + e.clone(), + buf, + )), + _ => None, + }; + Err(AcceptError { cause: e, response }) + } + } + } + + /// Check if we should refuse a connection attempt regardless of the packet's contents + fn early_validate_first_packet( + &mut self, + header: &ProtectedInitialHeader, + ) -> Result<(), TransportError> { + let config = &self.server_config.as_ref().unwrap(); + if self.cids_exhausted() || self.incoming_buffers.len() >= config.max_incoming { + return Err(TransportError::CONNECTION_REFUSED("")); + } + + // RFC9000 §7.2 dictates that initial (client-chosen) destination CIDs must be at least 8 + // bytes. If this is a Retry packet, then the length must instead match our usual CID + // length. If we ever issue non-Retry address validation tokens via `NEW_TOKEN`, then we'll + // also need to validate CID length for those after decoding the token. + if header.dst_cid.len() < 8 + && (header.token_pos.is_empty() + || header.dst_cid.len() != self.local_cid_generator.cid_len()) + { + debug!( + "rejecting connection due to invalid DCID length {}", + header.dst_cid.len() + ); + return Err(TransportError::PROTOCOL_VIOLATION( + "invalid destination CID length", + )); + } + + Ok(()) + } + + /// Reject this incoming connection attempt + pub fn refuse(&mut self, incoming: Incoming, buf: &mut Vec) -> Transmit { + self.clean_up_incoming(&incoming); + incoming.improper_drop_warner.dismiss(); + + self.initial_close( + incoming.packet.header.version, + incoming.addresses, + &incoming.crypto, + &incoming.packet.header.src_cid, + TransportError::CONNECTION_REFUSED(""), + buf, + ) + } + + /// Respond with a retry packet, requiring the client to retry with address validation + /// + /// Errors if `incoming.may_retry()` is false. + pub fn retry(&mut self, incoming: Incoming, buf: &mut Vec) -> Result { + if !incoming.may_retry() { + return Err(RetryError(Box::new(incoming))); + } + + self.clean_up_incoming(&incoming); + incoming.improper_drop_warner.dismiss(); + + let server_config = self.server_config.as_ref().unwrap(); + + // First Initial + // The peer will use this as the DCID of its following Initials. Initial DCIDs are + // looked up separately from Handshake/Data DCIDs, so there is no risk of collision + // with established connections. In the unlikely event that a collision occurs + // between two connections in the initial phase, both will fail fast and may be + // retried by the application layer. + let loc_cid = self.local_cid_generator.generate_cid(); + + let payload = TokenPayload::Retry { + address: incoming.addresses.remote, + orig_dst_cid: incoming.packet.header.dst_cid, + issued: server_config.time_source.now(), + }; + let token = Token::new(payload, &mut self.rng).encode(&*server_config.token_key); + + let header = Header::Retry { + src_cid: loc_cid, + dst_cid: incoming.packet.header.src_cid, + version: incoming.packet.header.version, + }; + + let encode = header.encode(buf); + buf.put_slice(&token); + buf.extend_from_slice(&server_config.crypto.retry_tag( + incoming.packet.header.version, + &incoming.packet.header.dst_cid, + buf, + )); + encode.finish(buf, &*incoming.crypto.header.local, None); + + Ok(Transmit { + destination: incoming.addresses.remote, + ecn: None, + size: buf.len(), + segment_size: None, + src_ip: incoming.addresses.local_ip, + }) + } + + /// Ignore this incoming connection attempt, not sending any packet in response + /// + /// Doing this actively, rather than merely dropping the [`Incoming`], is necessary to prevent + /// memory leaks due to state within [`Endpoint`] tracking the incoming connection. + pub fn ignore(&mut self, incoming: Incoming) { + self.clean_up_incoming(&incoming); + incoming.improper_drop_warner.dismiss(); + } + + /// Clean up endpoint data structures associated with an `Incoming`. + fn clean_up_incoming(&mut self, incoming: &Incoming) { + self.index.remove_initial(incoming.packet.header.dst_cid); + let incoming_buffer = self.incoming_buffers.remove(incoming.incoming_idx); + self.all_incoming_buffers_total_bytes -= incoming_buffer.total_bytes; + } + + fn add_connection( + &mut self, + ch: ConnectionHandle, + version: u32, + init_cid: ConnectionId, + loc_cid: ConnectionId, + rem_cid: ConnectionId, + addresses: FourTuple, + now: Instant, + tls: Box, + transport_config: Arc, + side_args: SideArgs, + ) -> Connection { + let mut rng_seed = [0; 32]; + self.rng.fill_bytes(&mut rng_seed); + let side = side_args.side(); + let pref_addr_cid = side_args.pref_addr_cid(); + let conn = Connection::new( + self.config.clone(), + transport_config, + init_cid, + loc_cid, + rem_cid, + addresses.remote, + addresses.local_ip, + tls, + self.local_cid_generator.as_ref(), + now, + version, + self.allow_mtud, + rng_seed, + side_args, + ); + + let mut cids_issued = 0; + let mut loc_cids = FxHashMap::default(); + + loc_cids.insert(cids_issued, loc_cid); + cids_issued += 1; + + if let Some(cid) = pref_addr_cid { + debug_assert_eq!(cids_issued, 1, "preferred address cid seq must be 1"); + loc_cids.insert(cids_issued, cid); + cids_issued += 1; + } + + let id = self.connections.insert(ConnectionMeta { + init_cid, + cids_issued, + loc_cids, + addresses, + side, + reset_token: None, + }); + debug_assert_eq!(id, ch.0, "connection handle allocation out of sync"); + + self.index.insert_conn(addresses, loc_cid, ch, side); + + conn + } + + fn initial_close( + &mut self, + version: u32, + addresses: FourTuple, + crypto: &Keys, + remote_id: &ConnectionId, + reason: TransportError, + buf: &mut Vec, + ) -> Transmit { + // We don't need to worry about CID collisions in initial closes because the peer + // shouldn't respond, and if it does, and the CID collides, we'll just drop the + // unexpected response. + let local_id = self.local_cid_generator.generate_cid(); + let number = PacketNumber::U8(0); + let header = Header::Initial(InitialHeader { + dst_cid: *remote_id, + src_cid: local_id, + number, + token: Bytes::new(), + version, + }); + + let partial_encode = header.encode(buf); + let max_len = + INITIAL_MTU as usize - partial_encode.header_len - crypto.packet.local.tag_len(); + frame::Close::from(reason).encode(buf, max_len); + buf.resize(buf.len() + crypto.packet.local.tag_len(), 0); + partial_encode.finish(buf, &*crypto.header.local, Some((0, &*crypto.packet.local))); + Transmit { + destination: addresses.remote, + ecn: None, + size: buf.len(), + segment_size: None, + src_ip: addresses.local_ip, + } + } + + /// Access the configuration used by this endpoint + pub fn config(&self) -> &EndpointConfig { + &self.config + } + + /// Number of connections that are currently open + pub fn open_connections(&self) -> usize { + self.connections.len() + } + + /// Counter for the number of bytes currently used + /// in the buffers for Initial and 0-RTT messages for pending incoming connections + pub fn incoming_buffer_bytes(&self) -> u64 { + self.all_incoming_buffers_total_bytes + } + + #[cfg(test)] + pub(crate) fn known_connections(&self) -> usize { + let x = self.connections.len(); + debug_assert_eq!(x, self.index.connection_ids_initial.len()); + // Not all connections have known reset tokens + debug_assert!(x >= self.index.connection_reset_tokens.0.len()); + // Not all connections have unique remotes, and 0-length CIDs might not be in use. + debug_assert!(x >= self.index.incoming_connection_remotes.len()); + debug_assert!(x >= self.index.outgoing_connection_remotes.len()); + x + } + + #[cfg(test)] + pub(crate) fn known_cids(&self) -> usize { + self.index.connection_ids.len() + } + + /// Whether we've used up 3/4 of the available CID space + /// + /// We leave some space unused so that `new_cid` can be relied upon to finish quickly. We don't + /// bother to check when CID longer than 4 bytes are used because 2^40 connections is a lot. + fn cids_exhausted(&self) -> bool { + self.local_cid_generator.cid_len() <= 4 + && self.local_cid_generator.cid_len() != 0 + && (2usize.pow(self.local_cid_generator.cid_len() as u32 * 8) + - self.index.connection_ids.len()) + < 2usize.pow(self.local_cid_generator.cid_len() as u32 * 8 - 2) + } +} + +impl fmt::Debug for Endpoint { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Endpoint") + .field("rng", &self.rng) + .field("index", &self.index) + .field("connections", &self.connections) + .field("config", &self.config) + .field("server_config", &self.server_config) + // incoming_buffers too large + .field("incoming_buffers.len", &self.incoming_buffers.len()) + .field( + "all_incoming_buffers_total_bytes", + &self.all_incoming_buffers_total_bytes, + ) + .finish() + } +} + +/// Buffered Initial and 0-RTT messages for a pending incoming connection +#[derive(Default)] +struct IncomingBuffer { + datagrams: Vec, + total_bytes: u64, +} + +/// Part of protocol state incoming datagrams can be routed to +#[derive(Copy, Clone, Debug)] +enum RouteDatagramTo { + Incoming(usize), + Connection(ConnectionHandle), +} + +/// Maps packets to existing connections +#[derive(Default, Debug)] +struct ConnectionIndex { + /// Identifies connections based on the initial DCID the peer utilized + /// + /// Uses a standard `HashMap` to protect against hash collision attacks. + /// + /// Used by the server, not the client. + connection_ids_initial: HashMap, + /// Identifies connections based on locally created CIDs + /// + /// Uses a cheaper hash function since keys are locally created + connection_ids: FxHashMap, + /// Identifies incoming connections with zero-length CIDs + /// + /// Uses a standard `HashMap` to protect against hash collision attacks. + incoming_connection_remotes: HashMap, + /// Identifies outgoing connections with zero-length CIDs + /// + /// We don't yet support explicit source addresses for client connections, and zero-length CIDs + /// require a unique four-tuple, so at most one client connection with zero-length local CIDs + /// may be established per remote. We must omit the local address from the key because we don't + /// necessarily know what address we're sending from, and hence receiving at. + /// + /// Uses a standard `HashMap` to protect against hash collision attacks. + outgoing_connection_remotes: HashMap, + /// Reset tokens provided by the peer for the CID each connection is currently sending to + /// + /// Incoming stateless resets do not have correct CIDs, so we need this to identify the correct + /// recipient, if any. + connection_reset_tokens: ResetTokenTable, +} + +impl ConnectionIndex { + /// Associate an incoming connection with its initial destination CID + fn insert_initial_incoming(&mut self, dst_cid: ConnectionId, incoming_key: usize) { + if dst_cid.is_empty() { + return; + } + self.connection_ids_initial + .insert(dst_cid, RouteDatagramTo::Incoming(incoming_key)); + } + + /// Remove an association with an initial destination CID + fn remove_initial(&mut self, dst_cid: ConnectionId) { + if dst_cid.is_empty() { + return; + } + let removed = self.connection_ids_initial.remove(&dst_cid); + debug_assert!(removed.is_some()); + } + + /// Associate a connection with its initial destination CID + fn insert_initial(&mut self, dst_cid: ConnectionId, connection: ConnectionHandle) { + if dst_cid.is_empty() { + return; + } + self.connection_ids_initial + .insert(dst_cid, RouteDatagramTo::Connection(connection)); + } + + /// Associate a connection with its first locally-chosen destination CID if used, or otherwise + /// its current 4-tuple + fn insert_conn( + &mut self, + addresses: FourTuple, + dst_cid: ConnectionId, + connection: ConnectionHandle, + side: Side, + ) { + match dst_cid.len() { + 0 => match side { + Side::Server => { + self.incoming_connection_remotes + .insert(addresses, connection); + } + Side::Client => { + self.outgoing_connection_remotes + .insert(addresses.remote, connection); + } + }, + _ => { + self.connection_ids.insert(dst_cid, connection); + } + } + } + + /// Discard a connection ID + fn retire(&mut self, dst_cid: ConnectionId) { + self.connection_ids.remove(&dst_cid); + } + + /// Remove all references to a connection + fn remove(&mut self, conn: &ConnectionMeta) { + if conn.side.is_server() { + self.remove_initial(conn.init_cid); + } + for cid in conn.loc_cids.values() { + self.connection_ids.remove(cid); + } + self.incoming_connection_remotes.remove(&conn.addresses); + self.outgoing_connection_remotes + .remove(&conn.addresses.remote); + if let Some((remote, token)) = conn.reset_token { + self.connection_reset_tokens.remove(remote, token); + } + } + + /// Find the existing connection that `datagram` should be routed to, if any + fn get(&self, addresses: &FourTuple, datagram: &PartialDecode) -> Option { + if !datagram.dst_cid().is_empty() { + if let Some(&ch) = self.connection_ids.get(datagram.dst_cid()) { + return Some(RouteDatagramTo::Connection(ch)); + } + } + if datagram.is_initial() || datagram.is_0rtt() { + if let Some(&ch) = self.connection_ids_initial.get(datagram.dst_cid()) { + return Some(ch); + } + } + if datagram.dst_cid().is_empty() { + if let Some(&ch) = self.incoming_connection_remotes.get(addresses) { + return Some(RouteDatagramTo::Connection(ch)); + } + if let Some(&ch) = self.outgoing_connection_remotes.get(&addresses.remote) { + return Some(RouteDatagramTo::Connection(ch)); + } + } + let data = datagram.data(); + if data.len() < RESET_TOKEN_SIZE { + return None; + } + self.connection_reset_tokens + .get(addresses.remote, &data[data.len() - RESET_TOKEN_SIZE..]) + .cloned() + .map(RouteDatagramTo::Connection) + } +} + +#[derive(Debug)] +pub(crate) struct ConnectionMeta { + init_cid: ConnectionId, + /// Number of local connection IDs that have been issued in NEW_CONNECTION_ID frames. + cids_issued: u64, + loc_cids: FxHashMap, + /// Remote/local addresses the connection began with + /// + /// Only needed to support connections with zero-length CIDs, which cannot migrate, so we don't + /// bother keeping it up to date. + addresses: FourTuple, + side: Side, + /// Reset token provided by the peer for the CID we're currently sending to, and the address + /// being sent to + reset_token: Option<(SocketAddr, ResetToken)>, +} + +/// Internal identifier for a `Connection` currently associated with an endpoint +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] +pub struct ConnectionHandle(pub usize); + +impl From for usize { + fn from(x: ConnectionHandle) -> Self { + x.0 + } +} + +impl Index for Slab { + type Output = ConnectionMeta; + fn index(&self, ch: ConnectionHandle) -> &ConnectionMeta { + &self[ch.0] + } +} + +impl IndexMut for Slab { + fn index_mut(&mut self, ch: ConnectionHandle) -> &mut ConnectionMeta { + &mut self[ch.0] + } +} + +/// Event resulting from processing a single datagram +pub enum DatagramEvent { + /// The datagram is redirected to its `Connection` + ConnectionEvent(ConnectionHandle, ConnectionEvent), + /// The datagram may result in starting a new `Connection` + NewConnection(Incoming), + /// Response generated directly by the endpoint + Response(Transmit), +} + +/// An incoming connection for which the server has not yet begun its part of the handshake. +pub struct Incoming { + received_at: Instant, + addresses: FourTuple, + ecn: Option, + packet: InitialPacket, + rest: Option, + crypto: Keys, + token: IncomingToken, + incoming_idx: usize, + improper_drop_warner: IncomingImproperDropWarner, +} + +impl Incoming { + /// The local IP address which was used when the peer established the connection + /// + /// This has the same behavior as [`Connection::local_ip`]. + pub fn local_ip(&self) -> Option { + self.addresses.local_ip + } + + /// The peer's UDP address + pub fn remote_address(&self) -> SocketAddr { + self.addresses.remote + } + + /// Whether the socket address that is initiating this connection has been validated + /// + /// This means that the sender of the initial packet has proved that they can receive traffic + /// sent to `self.remote_address()`. + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. + pub fn remote_address_validated(&self) -> bool { + self.token.validated + } + + /// Whether it is legal to respond with a retry packet + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. + pub fn may_retry(&self) -> bool { + self.token.retry_src_cid.is_none() + } + + /// The original destination connection ID sent by the client + pub fn orig_dst_cid(&self) -> &ConnectionId { + &self.token.orig_dst_cid + } +} + +impl fmt::Debug for Incoming { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Incoming") + .field("addresses", &self.addresses) + .field("ecn", &self.ecn) + // packet doesn't implement debug + // rest is too big and not meaningful enough + .field("token", &self.token) + .field("incoming_idx", &self.incoming_idx) + // improper drop warner contains no information + .finish_non_exhaustive() + } +} + +struct IncomingImproperDropWarner; + +impl IncomingImproperDropWarner { + fn dismiss(self) { + mem::forget(self); + } +} + +impl Drop for IncomingImproperDropWarner { + fn drop(&mut self) { + warn!( + "quinn_proto::Incoming dropped without passing to Endpoint::accept/refuse/retry/ignore \ + (may cause memory leak and eventual inability to accept new connections)" + ); + } +} + +/// Errors in the parameters being used to create a new connection +/// +/// These arise before any I/O has been performed. +#[derive(Debug, Error, Clone, PartialEq, Eq)] +pub enum ConnectError { + /// The endpoint can no longer create new connections + /// + /// Indicates that a necessary component of the endpoint has been dropped or otherwise disabled. + #[error("endpoint stopping")] + EndpointStopping, + /// The connection could not be created because not enough of the CID space is available + /// + /// Try using longer connection IDs + #[error("CIDs exhausted")] + CidsExhausted, + /// The given server name was malformed + #[error("invalid server name: {0}")] + InvalidServerName(String), + /// The remote [`SocketAddr`] supplied was malformed + /// + /// Examples include attempting to connect to port 0, or using an inappropriate address family. + #[error("invalid remote address: {0}")] + InvalidRemoteAddress(SocketAddr), + /// No default client configuration was set up + /// + /// Use `Endpoint::connect_with` to specify a client configuration. + #[error("no default client config")] + NoDefaultClientConfig, + /// The local endpoint does not support the QUIC version specified in the client configuration + #[error("unsupported QUIC version")] + UnsupportedVersion, +} + +/// Error type for attempting to accept an [`Incoming`] +#[derive(Debug)] +pub struct AcceptError { + /// Underlying error describing reason for failure + pub cause: ConnectionError, + /// Optional response to transmit back + pub response: Option, +} + +/// Error for attempting to retry an [`Incoming`] which already bears a token from a previous retry +#[derive(Debug, Error)] +#[error("retry() with validated Incoming")] +pub struct RetryError(Box); + +impl RetryError { + /// Get the [`Incoming`] + pub fn into_incoming(self) -> Incoming { + *self.0 + } +} + +/// Reset Tokens which are associated with peer socket addresses +/// +/// The standard `HashMap` is used since both `SocketAddr` and `ResetToken` are +/// peer generated and might be usable for hash collision attacks. +#[derive(Default, Debug)] +struct ResetTokenTable(HashMap>); + +impl ResetTokenTable { + fn insert(&mut self, remote: SocketAddr, token: ResetToken, ch: ConnectionHandle) -> bool { + self.0 + .entry(remote) + .or_default() + .insert(token, ch) + .is_some() + } + + fn remove(&mut self, remote: SocketAddr, token: ResetToken) { + use std::collections::hash_map::Entry; + match self.0.entry(remote) { + Entry::Vacant(_) => {} + Entry::Occupied(mut e) => { + e.get_mut().remove(&token); + if e.get().is_empty() { + e.remove_entry(); + } + } + } + } + + fn get(&self, remote: SocketAddr, token: &[u8]) -> Option<&ConnectionHandle> { + let token = ResetToken::from(<[u8; RESET_TOKEN_SIZE]>::try_from(token).ok()?); + self.0.get(&remote)?.get(&token) + } +} + +/// Identifies a connection by the combination of remote and local addresses +/// +/// Including the local ensures good behavior when the host has multiple IP addresses on the same +/// subnet and zero-length connection IDs are in use. +#[derive(Hash, Eq, PartialEq, Debug, Copy, Clone)] +struct FourTuple { + remote: SocketAddr, + // A single socket can only listen on a single port, so no need to store it explicitly + local_ip: Option, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/frame.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/frame.rs new file mode 100644 index 0000000000000000000000000000000000000000..01d9a02643fab1329bd98dda6f2fd2f8c068eb84 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/frame.rs @@ -0,0 +1,1008 @@ +use std::{ + fmt::{self, Write}, + mem, + ops::{Range, RangeInclusive}, +}; + +use bytes::{Buf, BufMut, Bytes}; +use tinyvec::TinyVec; + +use crate::{ + Dir, MAX_CID_SIZE, RESET_TOKEN_SIZE, ResetToken, StreamId, TransportError, TransportErrorCode, + VarInt, + coding::{self, BufExt, BufMutExt, UnexpectedEnd}, + range_set::ArrayRangeSet, + shared::{ConnectionId, EcnCodepoint}, +}; + +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; + +/// A QUIC frame type +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct FrameType(u64); + +impl FrameType { + fn stream(self) -> Option { + if STREAM_TYS.contains(&self.0) { + Some(StreamInfo(self.0 as u8)) + } else { + None + } + } + fn datagram(self) -> Option { + if DATAGRAM_TYS.contains(&self.0) { + Some(DatagramInfo(self.0 as u8)) + } else { + None + } + } +} + +impl coding::Codec for FrameType { + fn decode(buf: &mut B) -> coding::Result { + Ok(Self(buf.get_var()?)) + } + fn encode(&self, buf: &mut B) { + buf.write_var(self.0); + } +} + +pub(crate) trait FrameStruct { + /// Smallest number of bytes this type of frame is guaranteed to fit within. + const SIZE_BOUND: usize; +} + +macro_rules! frame_types { + {$($name:ident = $val:expr,)*} => { + impl FrameType { + $(pub(crate) const $name: FrameType = FrameType($val);)* + } + + impl fmt::Debug for FrameType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + $($val => f.write_str(stringify!($name)),)* + _ => write!(f, "Type({:02x})", self.0) + } + } + } + + impl fmt::Display for FrameType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + $($val => f.write_str(stringify!($name)),)* + x if STREAM_TYS.contains(&x) => f.write_str("STREAM"), + x if DATAGRAM_TYS.contains(&x) => f.write_str("DATAGRAM"), + _ => write!(f, "", self.0), + } + } + } + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +struct StreamInfo(u8); + +impl StreamInfo { + fn fin(self) -> bool { + self.0 & 0x01 != 0 + } + fn len(self) -> bool { + self.0 & 0x02 != 0 + } + fn off(self) -> bool { + self.0 & 0x04 != 0 + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +struct DatagramInfo(u8); + +impl DatagramInfo { + fn len(self) -> bool { + self.0 & 0x01 != 0 + } +} + +frame_types! { + PADDING = 0x00, + PING = 0x01, + ACK = 0x02, + ACK_ECN = 0x03, + RESET_STREAM = 0x04, + STOP_SENDING = 0x05, + CRYPTO = 0x06, + NEW_TOKEN = 0x07, + // STREAM + MAX_DATA = 0x10, + MAX_STREAM_DATA = 0x11, + MAX_STREAMS_BIDI = 0x12, + MAX_STREAMS_UNI = 0x13, + DATA_BLOCKED = 0x14, + STREAM_DATA_BLOCKED = 0x15, + STREAMS_BLOCKED_BIDI = 0x16, + STREAMS_BLOCKED_UNI = 0x17, + NEW_CONNECTION_ID = 0x18, + RETIRE_CONNECTION_ID = 0x19, + PATH_CHALLENGE = 0x1a, + PATH_RESPONSE = 0x1b, + CONNECTION_CLOSE = 0x1c, + APPLICATION_CLOSE = 0x1d, + HANDSHAKE_DONE = 0x1e, + // ACK Frequency + ACK_FREQUENCY = 0xaf, + IMMEDIATE_ACK = 0x1f, + // DATAGRAM +} + +const STREAM_TYS: RangeInclusive = RangeInclusive::new(0x08, 0x0f); +const DATAGRAM_TYS: RangeInclusive = RangeInclusive::new(0x30, 0x31); + +#[derive(Debug)] +pub(crate) enum Frame { + Padding, + Ping, + Ack(Ack), + ResetStream(ResetStream), + StopSending(StopSending), + Crypto(Crypto), + NewToken(NewToken), + Stream(Stream), + MaxData(VarInt), + MaxStreamData { id: StreamId, offset: u64 }, + MaxStreams { dir: Dir, count: u64 }, + DataBlocked { offset: u64 }, + StreamDataBlocked { id: StreamId, offset: u64 }, + StreamsBlocked { dir: Dir, limit: u64 }, + NewConnectionId(NewConnectionId), + RetireConnectionId { sequence: u64 }, + PathChallenge(u64), + PathResponse(u64), + Close(Close), + Datagram(Datagram), + AckFrequency(AckFrequency), + ImmediateAck, + HandshakeDone, +} + +impl Frame { + pub(crate) fn ty(&self) -> FrameType { + use Frame::*; + match *self { + Padding => FrameType::PADDING, + ResetStream(_) => FrameType::RESET_STREAM, + Close(self::Close::Connection(_)) => FrameType::CONNECTION_CLOSE, + Close(self::Close::Application(_)) => FrameType::APPLICATION_CLOSE, + MaxData(_) => FrameType::MAX_DATA, + MaxStreamData { .. } => FrameType::MAX_STREAM_DATA, + MaxStreams { dir: Dir::Bi, .. } => FrameType::MAX_STREAMS_BIDI, + MaxStreams { dir: Dir::Uni, .. } => FrameType::MAX_STREAMS_UNI, + Ping => FrameType::PING, + DataBlocked { .. } => FrameType::DATA_BLOCKED, + StreamDataBlocked { .. } => FrameType::STREAM_DATA_BLOCKED, + StreamsBlocked { dir: Dir::Bi, .. } => FrameType::STREAMS_BLOCKED_BIDI, + StreamsBlocked { dir: Dir::Uni, .. } => FrameType::STREAMS_BLOCKED_UNI, + StopSending { .. } => FrameType::STOP_SENDING, + RetireConnectionId { .. } => FrameType::RETIRE_CONNECTION_ID, + Ack(_) => FrameType::ACK, + Stream(ref x) => { + let mut ty = *STREAM_TYS.start(); + if x.fin { + ty |= 0x01; + } + if x.offset != 0 { + ty |= 0x04; + } + FrameType(ty) + } + PathChallenge(_) => FrameType::PATH_CHALLENGE, + PathResponse(_) => FrameType::PATH_RESPONSE, + NewConnectionId { .. } => FrameType::NEW_CONNECTION_ID, + Crypto(_) => FrameType::CRYPTO, + NewToken(_) => FrameType::NEW_TOKEN, + Datagram(_) => FrameType(*DATAGRAM_TYS.start()), + AckFrequency(_) => FrameType::ACK_FREQUENCY, + ImmediateAck => FrameType::IMMEDIATE_ACK, + HandshakeDone => FrameType::HANDSHAKE_DONE, + } + } + + pub(crate) fn is_ack_eliciting(&self) -> bool { + !matches!(*self, Self::Ack(_) | Self::Padding | Self::Close(_)) + } +} + +#[derive(Clone, Debug)] +pub enum Close { + Connection(ConnectionClose), + Application(ApplicationClose), +} + +impl Close { + pub(crate) fn encode(&self, out: &mut W, max_len: usize) { + match *self { + Self::Connection(ref x) => x.encode(out, max_len), + Self::Application(ref x) => x.encode(out, max_len), + } + } + + pub(crate) fn is_transport_layer(&self) -> bool { + matches!(*self, Self::Connection(_)) + } +} + +impl From for Close { + fn from(x: TransportError) -> Self { + Self::Connection(x.into()) + } +} +impl From for Close { + fn from(x: ConnectionClose) -> Self { + Self::Connection(x) + } +} +impl From for Close { + fn from(x: ApplicationClose) -> Self { + Self::Application(x) + } +} + +/// Reason given by the transport for closing the connection +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ConnectionClose { + /// Class of error as encoded in the specification + pub error_code: TransportErrorCode, + /// Type of frame that caused the close + pub frame_type: Option, + /// Human-readable reason for the close + pub reason: Bytes, +} + +impl fmt::Display for ConnectionClose { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.error_code.fmt(f)?; + if !self.reason.as_ref().is_empty() { + f.write_str(": ")?; + f.write_str(&String::from_utf8_lossy(&self.reason))?; + } + Ok(()) + } +} + +impl From for ConnectionClose { + fn from(x: TransportError) -> Self { + Self { + error_code: x.code, + frame_type: x.frame, + reason: x.reason.into(), + } + } +} + +impl FrameStruct for ConnectionClose { + const SIZE_BOUND: usize = 1 + 8 + 8 + 8; +} + +impl ConnectionClose { + pub(crate) fn encode(&self, out: &mut W, max_len: usize) { + out.write(FrameType::CONNECTION_CLOSE); // 1 byte + out.write(self.error_code); // <= 8 bytes + let ty = self.frame_type.map_or(0, |x| x.0); + out.write_var(ty); // <= 8 bytes + let max_len = max_len + - 3 + - VarInt::from_u64(ty).unwrap().size() + - VarInt::from_u64(self.reason.len() as u64).unwrap().size(); + let actual_len = self.reason.len().min(max_len); + out.write_var(actual_len as u64); // <= 8 bytes + out.put_slice(&self.reason[0..actual_len]); // whatever's left + } +} + +/// Reason given by an application for closing the connection +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ApplicationClose { + /// Application-specific reason code + pub error_code: VarInt, + /// Human-readable reason for the close + pub reason: Bytes, +} + +impl fmt::Display for ApplicationClose { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if !self.reason.as_ref().is_empty() { + f.write_str(&String::from_utf8_lossy(&self.reason))?; + f.write_str(" (code ")?; + self.error_code.fmt(f)?; + f.write_str(")")?; + } else { + self.error_code.fmt(f)?; + } + Ok(()) + } +} + +impl FrameStruct for ApplicationClose { + const SIZE_BOUND: usize = 1 + 8 + 8; +} + +impl ApplicationClose { + pub(crate) fn encode(&self, out: &mut W, max_len: usize) { + out.write(FrameType::APPLICATION_CLOSE); // 1 byte + out.write(self.error_code); // <= 8 bytes + let max_len = max_len - 3 - VarInt::from_u64(self.reason.len() as u64).unwrap().size(); + let actual_len = self.reason.len().min(max_len); + out.write_var(actual_len as u64); // <= 8 bytes + out.put_slice(&self.reason[0..actual_len]); // whatever's left + } +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Ack { + pub largest: u64, + pub delay: u64, + pub additional: Bytes, + pub ecn: Option, +} + +impl fmt::Debug for Ack { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ranges = "[".to_string(); + let mut first = true; + for range in self.iter() { + if !first { + ranges.push(','); + } + write!(ranges, "{range:?}").unwrap(); + first = false; + } + ranges.push(']'); + + f.debug_struct("Ack") + .field("largest", &self.largest) + .field("delay", &self.delay) + .field("ecn", &self.ecn) + .field("ranges", &ranges) + .finish() + } +} + +impl<'a> IntoIterator for &'a Ack { + type Item = RangeInclusive; + type IntoIter = AckIter<'a>; + + fn into_iter(self) -> AckIter<'a> { + AckIter::new(self.largest, &self.additional[..]) + } +} + +impl Ack { + pub fn encode( + delay: u64, + ranges: &ArrayRangeSet, + ecn: Option<&EcnCounts>, + buf: &mut W, + ) { + let mut rest = ranges.iter().rev(); + let first = rest.next().unwrap(); + let largest = first.end - 1; + let first_size = first.end - first.start; + buf.write(if ecn.is_some() { + FrameType::ACK_ECN + } else { + FrameType::ACK + }); + buf.write_var(largest); + buf.write_var(delay); + buf.write_var(ranges.len() as u64 - 1); + buf.write_var(first_size - 1); + let mut prev = first.start; + for block in rest { + let size = block.end - block.start; + buf.write_var(prev - block.end - 1); + buf.write_var(size - 1); + prev = block.start; + } + if let Some(x) = ecn { + x.encode(buf) + } + } + + pub fn iter(&self) -> AckIter<'_> { + self.into_iter() + } +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct EcnCounts { + pub ect0: u64, + pub ect1: u64, + pub ce: u64, +} + +impl std::ops::AddAssign for EcnCounts { + fn add_assign(&mut self, rhs: EcnCodepoint) { + match rhs { + EcnCodepoint::Ect0 => { + self.ect0 += 1; + } + EcnCodepoint::Ect1 => { + self.ect1 += 1; + } + EcnCodepoint::Ce => { + self.ce += 1; + } + } + } +} + +impl EcnCounts { + pub const ZERO: Self = Self { + ect0: 0, + ect1: 0, + ce: 0, + }; + + pub fn encode(&self, out: &mut W) { + out.write_var(self.ect0); + out.write_var(self.ect1); + out.write_var(self.ce); + } +} + +#[derive(Debug, Clone)] +pub(crate) struct Stream { + pub(crate) id: StreamId, + pub(crate) offset: u64, + pub(crate) fin: bool, + pub(crate) data: Bytes, +} + +impl FrameStruct for Stream { + const SIZE_BOUND: usize = 1 + 8 + 8 + 8; +} + +/// Metadata from a stream frame +#[derive(Debug, Clone)] +pub(crate) struct StreamMeta { + pub(crate) id: StreamId, + pub(crate) offsets: Range, + pub(crate) fin: bool, +} + +// This manual implementation exists because `Default` is not implemented for `StreamId` +impl Default for StreamMeta { + fn default() -> Self { + Self { + id: StreamId(0), + offsets: 0..0, + fin: false, + } + } +} + +impl StreamMeta { + pub(crate) fn encode(&self, length: bool, out: &mut W) { + let mut ty = *STREAM_TYS.start(); + if self.offsets.start != 0 { + ty |= 0x04; + } + if length { + ty |= 0x02; + } + if self.fin { + ty |= 0x01; + } + out.write_var(ty); // 1 byte + out.write(self.id); // <=8 bytes + if self.offsets.start != 0 { + out.write_var(self.offsets.start); // <=8 bytes + } + if length { + out.write_var(self.offsets.end - self.offsets.start); // <=8 bytes + } + } +} + +/// A vector of [`StreamMeta`] with optimization for the single element case +pub(crate) type StreamMetaVec = TinyVec<[StreamMeta; 1]>; + +#[derive(Debug, Clone)] +pub(crate) struct Crypto { + pub(crate) offset: u64, + pub(crate) data: Bytes, +} + +impl Crypto { + pub(crate) const SIZE_BOUND: usize = 17; + + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::CRYPTO); + out.write_var(self.offset); + out.write_var(self.data.len() as u64); + out.put_slice(&self.data); + } +} + +#[derive(Debug, Clone)] +pub(crate) struct NewToken { + pub(crate) token: Bytes, +} + +impl NewToken { + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::NEW_TOKEN); + out.write_var(self.token.len() as u64); + out.put_slice(&self.token); + } + + pub(crate) fn size(&self) -> usize { + 1 + VarInt::from_u64(self.token.len() as u64).unwrap().size() + self.token.len() + } +} + +pub(crate) struct Iter { + bytes: Bytes, + last_ty: Option, +} + +impl Iter { + pub(crate) fn new(payload: Bytes) -> Result { + if payload.is_empty() { + // "An endpoint MUST treat receipt of a packet containing no frames as a + // connection error of type PROTOCOL_VIOLATION." + // https://www.rfc-editor.org/rfc/rfc9000.html#name-frames-and-frame-types + return Err(TransportError::PROTOCOL_VIOLATION( + "packet payload is empty", + )); + } + + Ok(Self { + bytes: payload, + last_ty: None, + }) + } + + fn take_len(&mut self) -> Result { + let len = self.bytes.get_var()?; + if len > self.bytes.remaining() as u64 { + return Err(UnexpectedEnd); + } + Ok(self.bytes.split_to(len as usize)) + } + + fn try_next(&mut self) -> Result { + let ty = self.bytes.get::()?; + self.last_ty = Some(ty); + Ok(match ty { + FrameType::PADDING => Frame::Padding, + FrameType::RESET_STREAM => Frame::ResetStream(ResetStream { + id: self.bytes.get()?, + error_code: self.bytes.get()?, + final_offset: self.bytes.get()?, + }), + FrameType::CONNECTION_CLOSE => Frame::Close(Close::Connection(ConnectionClose { + error_code: self.bytes.get()?, + frame_type: { + let x = self.bytes.get_var()?; + if x == 0 { None } else { Some(FrameType(x)) } + }, + reason: self.take_len()?, + })), + FrameType::APPLICATION_CLOSE => Frame::Close(Close::Application(ApplicationClose { + error_code: self.bytes.get()?, + reason: self.take_len()?, + })), + FrameType::MAX_DATA => Frame::MaxData(self.bytes.get()?), + FrameType::MAX_STREAM_DATA => Frame::MaxStreamData { + id: self.bytes.get()?, + offset: self.bytes.get_var()?, + }, + FrameType::MAX_STREAMS_BIDI => Frame::MaxStreams { + dir: Dir::Bi, + count: self.bytes.get_var()?, + }, + FrameType::MAX_STREAMS_UNI => Frame::MaxStreams { + dir: Dir::Uni, + count: self.bytes.get_var()?, + }, + FrameType::PING => Frame::Ping, + FrameType::DATA_BLOCKED => Frame::DataBlocked { + offset: self.bytes.get_var()?, + }, + FrameType::STREAM_DATA_BLOCKED => Frame::StreamDataBlocked { + id: self.bytes.get()?, + offset: self.bytes.get_var()?, + }, + FrameType::STREAMS_BLOCKED_BIDI => Frame::StreamsBlocked { + dir: Dir::Bi, + limit: self.bytes.get_var()?, + }, + FrameType::STREAMS_BLOCKED_UNI => Frame::StreamsBlocked { + dir: Dir::Uni, + limit: self.bytes.get_var()?, + }, + FrameType::STOP_SENDING => Frame::StopSending(StopSending { + id: self.bytes.get()?, + error_code: self.bytes.get()?, + }), + FrameType::RETIRE_CONNECTION_ID => Frame::RetireConnectionId { + sequence: self.bytes.get_var()?, + }, + FrameType::ACK | FrameType::ACK_ECN => { + let largest = self.bytes.get_var()?; + let delay = self.bytes.get_var()?; + let extra_blocks = self.bytes.get_var()? as usize; + let n = scan_ack_blocks(&self.bytes, largest, extra_blocks)?; + Frame::Ack(Ack { + delay, + largest, + additional: self.bytes.split_to(n), + ecn: if ty != FrameType::ACK_ECN { + None + } else { + Some(EcnCounts { + ect0: self.bytes.get_var()?, + ect1: self.bytes.get_var()?, + ce: self.bytes.get_var()?, + }) + }, + }) + } + FrameType::PATH_CHALLENGE => Frame::PathChallenge(self.bytes.get()?), + FrameType::PATH_RESPONSE => Frame::PathResponse(self.bytes.get()?), + FrameType::NEW_CONNECTION_ID => { + let sequence = self.bytes.get_var()?; + let retire_prior_to = self.bytes.get_var()?; + if retire_prior_to > sequence { + return Err(IterErr::Malformed); + } + let length = self.bytes.get::()? as usize; + if length > MAX_CID_SIZE || length == 0 { + return Err(IterErr::Malformed); + } + if length > self.bytes.remaining() { + return Err(IterErr::UnexpectedEnd); + } + let mut stage = [0; MAX_CID_SIZE]; + self.bytes.copy_to_slice(&mut stage[0..length]); + let id = ConnectionId::new(&stage[..length]); + if self.bytes.remaining() < 16 { + return Err(IterErr::UnexpectedEnd); + } + let mut reset_token = [0; RESET_TOKEN_SIZE]; + self.bytes.copy_to_slice(&mut reset_token); + Frame::NewConnectionId(NewConnectionId { + sequence, + retire_prior_to, + id, + reset_token: reset_token.into(), + }) + } + FrameType::CRYPTO => Frame::Crypto(Crypto { + offset: self.bytes.get_var()?, + data: self.take_len()?, + }), + FrameType::NEW_TOKEN => Frame::NewToken(NewToken { + token: self.take_len()?, + }), + FrameType::HANDSHAKE_DONE => Frame::HandshakeDone, + FrameType::ACK_FREQUENCY => Frame::AckFrequency(AckFrequency { + sequence: self.bytes.get()?, + ack_eliciting_threshold: self.bytes.get()?, + request_max_ack_delay: self.bytes.get()?, + reordering_threshold: self.bytes.get()?, + }), + FrameType::IMMEDIATE_ACK => Frame::ImmediateAck, + _ => { + if let Some(s) = ty.stream() { + Frame::Stream(Stream { + id: self.bytes.get()?, + offset: if s.off() { self.bytes.get_var()? } else { 0 }, + fin: s.fin(), + data: if s.len() { + self.take_len()? + } else { + self.take_remaining() + }, + }) + } else if let Some(d) = ty.datagram() { + Frame::Datagram(Datagram { + data: if d.len() { + self.take_len()? + } else { + self.take_remaining() + }, + }) + } else { + return Err(IterErr::InvalidFrameId); + } + } + }) + } + + fn take_remaining(&mut self) -> Bytes { + mem::take(&mut self.bytes) + } +} + +impl Iterator for Iter { + type Item = Result; + fn next(&mut self) -> Option { + if !self.bytes.has_remaining() { + return None; + } + match self.try_next() { + Ok(x) => Some(Ok(x)), + Err(e) => { + // Corrupt frame, skip it and everything that follows + self.bytes.clear(); + Some(Err(InvalidFrame { + ty: self.last_ty, + reason: e.reason(), + })) + } + } + } +} + +#[derive(Debug)] +pub(crate) struct InvalidFrame { + pub(crate) ty: Option, + pub(crate) reason: &'static str, +} + +impl From for TransportError { + fn from(err: InvalidFrame) -> Self { + let mut te = Self::FRAME_ENCODING_ERROR(err.reason); + te.frame = err.ty; + te + } +} + +/// Validate exactly `n` ACK ranges in `buf` and return the number of bytes they cover +fn scan_ack_blocks(mut buf: &[u8], largest: u64, n: usize) -> Result { + let total_len = buf.remaining(); + let first_block = buf.get_var()?; + let mut smallest = largest.checked_sub(first_block).ok_or(IterErr::Malformed)?; + for _ in 0..n { + let gap = buf.get_var()?; + smallest = smallest.checked_sub(gap + 2).ok_or(IterErr::Malformed)?; + let block = buf.get_var()?; + smallest = smallest.checked_sub(block).ok_or(IterErr::Malformed)?; + } + Ok(total_len - buf.remaining()) +} + +enum IterErr { + UnexpectedEnd, + InvalidFrameId, + Malformed, +} + +impl IterErr { + fn reason(&self) -> &'static str { + use IterErr::*; + match *self { + UnexpectedEnd => "unexpected end", + InvalidFrameId => "invalid frame ID", + Malformed => "malformed", + } + } +} + +impl From for IterErr { + fn from(_: UnexpectedEnd) -> Self { + Self::UnexpectedEnd + } +} + +#[derive(Debug, Clone)] +pub struct AckIter<'a> { + largest: u64, + data: &'a [u8], +} + +impl<'a> AckIter<'a> { + fn new(largest: u64, data: &'a [u8]) -> Self { + Self { largest, data } + } +} + +impl Iterator for AckIter<'_> { + type Item = RangeInclusive; + fn next(&mut self) -> Option> { + if !self.data.has_remaining() { + return None; + } + let block = self.data.get_var().unwrap(); + let largest = self.largest; + if let Ok(gap) = self.data.get_var() { + self.largest -= block + gap + 2; + } + Some(largest - block..=largest) + } +} + +#[allow(unreachable_pub)] // fuzzing only +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, Copy, Clone)] +pub struct ResetStream { + pub(crate) id: StreamId, + pub(crate) error_code: VarInt, + pub(crate) final_offset: VarInt, +} + +impl FrameStruct for ResetStream { + const SIZE_BOUND: usize = 1 + 8 + 8 + 8; +} + +impl ResetStream { + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::RESET_STREAM); // 1 byte + out.write(self.id); // <= 8 bytes + out.write(self.error_code); // <= 8 bytes + out.write(self.final_offset); // <= 8 bytes + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct StopSending { + pub(crate) id: StreamId, + pub(crate) error_code: VarInt, +} + +impl FrameStruct for StopSending { + const SIZE_BOUND: usize = 1 + 8 + 8; +} + +impl StopSending { + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::STOP_SENDING); // 1 byte + out.write(self.id); // <= 8 bytes + out.write(self.error_code) // <= 8 bytes + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct NewConnectionId { + pub(crate) sequence: u64, + pub(crate) retire_prior_to: u64, + pub(crate) id: ConnectionId, + pub(crate) reset_token: ResetToken, +} + +impl NewConnectionId { + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::NEW_CONNECTION_ID); + out.write_var(self.sequence); + out.write_var(self.retire_prior_to); + out.write(self.id.len() as u8); + out.put_slice(&self.id); + out.put_slice(&self.reset_token); + } +} + +impl FrameStruct for NewConnectionId { + const SIZE_BOUND: usize = 1 + 8 + 8 + 1 + MAX_CID_SIZE + RESET_TOKEN_SIZE; +} + +/// Smallest number of bytes this type of frame is guaranteed to fit within. +pub(crate) const RETIRE_CONNECTION_ID_SIZE_BOUND: usize = 9; + +/// An unreliable datagram +#[derive(Debug, Clone)] +pub struct Datagram { + /// Payload + pub data: Bytes, +} + +impl FrameStruct for Datagram { + const SIZE_BOUND: usize = 1 + 8; +} + +impl Datagram { + pub(crate) fn encode(&self, length: bool, out: &mut Vec) { + out.write(FrameType(*DATAGRAM_TYS.start() | u64::from(length))); // 1 byte + if length { + // Safe to unwrap because we check length sanity before queueing datagrams + out.write(VarInt::from_u64(self.data.len() as u64).unwrap()); // <= 8 bytes + } + out.extend_from_slice(&self.data); + } + + pub(crate) fn size(&self, length: bool) -> usize { + 1 + if length { + VarInt::from_u64(self.data.len() as u64).unwrap().size() + } else { + 0 + } + self.data.len() + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) struct AckFrequency { + pub(crate) sequence: VarInt, + pub(crate) ack_eliciting_threshold: VarInt, + pub(crate) request_max_ack_delay: VarInt, + pub(crate) reordering_threshold: VarInt, +} + +impl AckFrequency { + pub(crate) fn encode(&self, buf: &mut W) { + buf.write(FrameType::ACK_FREQUENCY); + buf.write(self.sequence); + buf.write(self.ack_eliciting_threshold); + buf.write(self.request_max_ack_delay); + buf.write(self.reordering_threshold); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::coding::Codec; + use assert_matches::assert_matches; + + fn frames(buf: Vec) -> Vec { + Iter::new(Bytes::from(buf)) + .unwrap() + .collect::, _>>() + .unwrap() + } + + #[test] + fn ack_coding() { + const PACKETS: &[u64] = &[1, 2, 3, 5, 10, 11, 14]; + let mut ranges = ArrayRangeSet::new(); + for &packet in PACKETS { + ranges.insert(packet..packet + 1); + } + let mut buf = Vec::new(); + const ECN: EcnCounts = EcnCounts { + ect0: 42, + ect1: 24, + ce: 12, + }; + Ack::encode(42, &ranges, Some(&ECN), &mut buf); + let frames = frames(buf); + assert_eq!(frames.len(), 1); + match frames[0] { + Frame::Ack(ref ack) => { + let mut packets = ack.iter().flatten().collect::>(); + packets.sort_unstable(); + assert_eq!(&packets[..], PACKETS); + assert_eq!(ack.ecn, Some(ECN)); + } + ref x => panic!("incorrect frame {x:?}"), + } + } + + #[test] + fn ack_frequency_coding() { + let mut buf = Vec::new(); + let original = AckFrequency { + sequence: VarInt(42), + ack_eliciting_threshold: VarInt(20), + request_max_ack_delay: VarInt(50_000), + reordering_threshold: VarInt(1), + }; + original.encode(&mut buf); + let frames = frames(buf); + assert_eq!(frames.len(), 1); + match &frames[0] { + Frame::AckFrequency(decoded) => assert_eq!(decoded, &original), + x => panic!("incorrect frame {x:?}"), + } + } + + #[test] + fn immediate_ack_coding() { + let mut buf = Vec::new(); + FrameType::IMMEDIATE_ACK.encode(&mut buf); + let frames = frames(buf); + assert_eq!(frames.len(), 1); + assert_matches!(&frames[0], Frame::ImmediateAck); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5982b69033db7e1d366e05074fdf32098509976a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/lib.rs @@ -0,0 +1,336 @@ +//! Low-level protocol logic for the QUIC protoocol +//! +//! quinn-proto contains a fully deterministic implementation of QUIC protocol logic. It contains +//! no networking code and does not get any relevant timestamps from the operating system. Most +//! users may want to use the futures-based quinn API instead. +//! +//! The quinn-proto API might be of interest if you want to use it from a C or C++ project +//! through C bindings or if you want to use a different event loop than the one tokio provides. +//! +//! The most important types are `Endpoint`, which conceptually represents the protocol state for +//! a single socket and mostly manages configuration and dispatches incoming datagrams to the +//! related `Connection`. `Connection` types contain the bulk of the protocol logic related to +//! managing a single connection and all the related state (such as streams). + +#![cfg_attr(not(fuzzing), warn(missing_docs))] +#![cfg_attr(test, allow(dead_code))] +// Fixes welcome: +#![warn(unreachable_pub)] +#![allow(clippy::cognitive_complexity)] +#![allow(clippy::too_many_arguments)] +#![warn(clippy::use_self)] + +use std::{ + fmt, + net::{IpAddr, SocketAddr}, + ops, +}; + +mod cid_queue; +pub mod coding; +mod constant_time; +mod range_set; +#[cfg(all(test, any(feature = "rustls-aws-lc-rs", feature = "rustls-ring")))] +mod tests; +pub mod transport_parameters; +mod varint; + +pub use varint::{VarInt, VarIntBoundsExceeded}; + +#[cfg(feature = "bloom")] +mod bloom_token_log; +#[cfg(feature = "bloom")] +pub use bloom_token_log::BloomTokenLog; + +mod connection; +pub use crate::connection::{ + Chunk, Chunks, ClosedStream, Connection, ConnectionError, ConnectionStats, Datagrams, Event, + FinishError, FrameStats, PathStats, ReadError, ReadableError, RecvStream, RttEstimator, + SendDatagramError, SendStream, ShouldTransmit, StreamEvent, Streams, UdpStats, WriteError, + Written, +}; +#[cfg(feature = "qlog")] +pub use connection::qlog::QlogStream; + +#[cfg(feature = "rustls")] +pub use rustls; + +mod config; +#[cfg(feature = "qlog")] +pub use config::QlogConfig; +pub use config::{ + AckFrequencyConfig, ClientConfig, ConfigError, EndpointConfig, IdleTimeout, MtuDiscoveryConfig, + ServerConfig, StdSystemTime, TimeSource, TransportConfig, ValidationTokenConfig, +}; + +pub mod crypto; + +mod frame; +use crate::frame::Frame; +pub use crate::frame::{ApplicationClose, ConnectionClose, Datagram, FrameType}; + +mod endpoint; +pub use crate::endpoint::{ + AcceptError, ConnectError, ConnectionHandle, DatagramEvent, Endpoint, Incoming, RetryError, +}; + +mod packet; +pub use packet::{ + ConnectionIdParser, FixedLengthConnectionIdParser, LongType, PacketDecodeError, PartialDecode, + ProtectedHeader, ProtectedInitialHeader, +}; + +mod shared; +pub use crate::shared::{ConnectionEvent, ConnectionId, EcnCodepoint, EndpointEvent}; + +mod transport_error; +pub use crate::transport_error::{Code as TransportErrorCode, Error as TransportError}; + +pub mod congestion; + +mod cid_generator; +pub use crate::cid_generator::{ + ConnectionIdGenerator, HashedConnectionIdGenerator, InvalidCid, RandomConnectionIdGenerator, +}; + +mod token; +use token::ResetToken; +pub use token::{NoneTokenLog, NoneTokenStore, TokenLog, TokenReuseError, TokenStore}; + +mod token_memory_cache; +pub use token_memory_cache::TokenMemoryCache; + +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; + +// Deal with time +#[cfg(not(all(target_family = "wasm", target_os = "unknown")))] +pub(crate) use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +pub(crate) use web_time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +#[cfg(fuzzing)] +pub mod fuzzing { + pub use crate::connection::{Retransmits, State as ConnectionState, StreamsState}; + pub use crate::frame::ResetStream; + pub use crate::packet::PartialDecode; + pub use crate::transport_parameters::TransportParameters; + pub use bytes::{BufMut, BytesMut}; + + #[cfg(feature = "arbitrary")] + use arbitrary::{Arbitrary, Result, Unstructured}; + + #[cfg(feature = "arbitrary")] + impl<'arbitrary> Arbitrary<'arbitrary> for TransportParameters { + fn arbitrary(u: &mut Unstructured<'arbitrary>) -> Result { + Ok(Self { + initial_max_streams_bidi: u.arbitrary()?, + initial_max_streams_uni: u.arbitrary()?, + ack_delay_exponent: u.arbitrary()?, + max_udp_payload_size: u.arbitrary()?, + ..Self::default() + }) + } + } + + #[derive(Debug)] + pub struct PacketParams { + pub local_cid_len: usize, + pub buf: BytesMut, + pub grease_quic_bit: bool, + } + + #[cfg(feature = "arbitrary")] + impl<'arbitrary> Arbitrary<'arbitrary> for PacketParams { + fn arbitrary(u: &mut Unstructured<'arbitrary>) -> Result { + let local_cid_len: usize = u.int_in_range(0..=crate::MAX_CID_SIZE)?; + let bytes: Vec = Vec::arbitrary(u)?; + let mut buf = BytesMut::new(); + buf.put_slice(&bytes[..]); + Ok(Self { + local_cid_len, + buf, + grease_quic_bit: bool::arbitrary(u)?, + }) + } + } +} + +/// The QUIC protocol version implemented. +pub const DEFAULT_SUPPORTED_VERSIONS: &[u32] = &[ + 0x00000001, + 0xff00_001d, + 0xff00_001e, + 0xff00_001f, + 0xff00_0020, + 0xff00_0021, + 0xff00_0022, +]; + +/// Whether an endpoint was the initiator of a connection +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum Side { + /// The initiator of a connection + Client = 0, + /// The acceptor of a connection + Server = 1, +} + +impl Side { + #[inline] + /// Shorthand for `self == Side::Client` + pub fn is_client(self) -> bool { + self == Self::Client + } + + #[inline] + /// Shorthand for `self == Side::Server` + pub fn is_server(self) -> bool { + self == Self::Server + } +} + +impl ops::Not for Side { + type Output = Self; + fn not(self) -> Self { + match self { + Self::Client => Self::Server, + Self::Server => Self::Client, + } + } +} + +/// Whether a stream communicates data in both directions or only from the initiator +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum Dir { + /// Data flows in both directions + Bi = 0, + /// Data flows only from the stream's initiator + Uni = 1, +} + +impl Dir { + fn iter() -> impl Iterator { + [Self::Bi, Self::Uni].iter().cloned() + } +} + +impl fmt::Display for Dir { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use Dir::*; + f.pad(match *self { + Bi => "bidirectional", + Uni => "unidirectional", + }) + } +} + +/// Identifier for a stream within a particular connection +#[cfg_attr(feature = "arbitrary", derive(Arbitrary))] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct StreamId(u64); + +impl fmt::Display for StreamId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let initiator = match self.initiator() { + Side::Client => "client", + Side::Server => "server", + }; + let dir = match self.dir() { + Dir::Uni => "uni", + Dir::Bi => "bi", + }; + write!( + f, + "{} {}directional stream {}", + initiator, + dir, + self.index() + ) + } +} + +impl StreamId { + /// Create a new StreamId + pub fn new(initiator: Side, dir: Dir, index: u64) -> Self { + Self((index << 2) | ((dir as u64) << 1) | initiator as u64) + } + /// Which side of a connection initiated the stream + pub fn initiator(self) -> Side { + if self.0 & 0x1 == 0 { + Side::Client + } else { + Side::Server + } + } + /// Which directions data flows in + pub fn dir(self) -> Dir { + if self.0 & 0x2 == 0 { Dir::Bi } else { Dir::Uni } + } + /// Distinguishes streams of the same initiator and directionality + pub fn index(self) -> u64 { + self.0 >> 2 + } +} + +impl From for VarInt { + fn from(x: StreamId) -> Self { + unsafe { Self::from_u64_unchecked(x.0) } + } +} + +impl From for StreamId { + fn from(v: VarInt) -> Self { + Self(v.0) + } +} + +impl From for u64 { + fn from(x: StreamId) -> Self { + x.0 + } +} + +impl coding::Codec for StreamId { + fn decode(buf: &mut B) -> coding::Result { + VarInt::decode(buf).map(|x| Self(x.into_inner())) + } + fn encode(&self, buf: &mut B) { + VarInt::from_u64(self.0).unwrap().encode(buf); + } +} + +/// An outgoing packet +#[derive(Debug)] +#[must_use] +pub struct Transmit { + /// The socket this datagram should be sent to + pub destination: SocketAddr, + /// Explicit congestion notification bits to set on the packet + pub ecn: Option, + /// Amount of data written to the caller-supplied buffer + pub size: usize, + /// The segment size if this transmission contains multiple datagrams. + /// This is `None` if the transmit only contains a single datagram + pub segment_size: Option, + /// Optional source IP address for the datagram + pub src_ip: Option, +} + +// +// Useful internal constants +// + +/// The maximum number of CIDs we bother to issue per connection +const LOC_CID_COUNT: u64 = 8; +const RESET_TOKEN_SIZE: usize = 16; +const MAX_CID_SIZE: usize = 20; +const MIN_INITIAL_SIZE: u16 = 1200; +/// +const INITIAL_MTU: u16 = 1200; +const MAX_UDP_PAYLOAD: u16 = 65527; +const TIMER_GRANULARITY: Duration = Duration::from_millis(1); +/// Maximum number of streams that can be uniquely identified by a stream ID +const MAX_STREAM_COUNT: u64 = 1 << 60; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/packet.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/packet.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5ef0c4026c52f0c5b4b34583de7871038dd3de3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/packet.rs @@ -0,0 +1,1014 @@ +use std::{cmp::Ordering, io, ops::Range, str}; + +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use thiserror::Error; + +use crate::{ + ConnectionId, + coding::{self, BufExt, BufMutExt}, + crypto, +}; + +/// Decodes a QUIC packet's invariant header +/// +/// Due to packet number encryption, it is impossible to fully decode a header +/// (which includes a variable-length packet number) without crypto context. +/// The crypto context (represented by the `Crypto` type in Quinn) is usually +/// part of the `Connection`, or can be derived from the destination CID for +/// Initial packets. +/// +/// To cope with this, we decode the invariant header (which should be stable +/// across QUIC versions), which gives us the destination CID and allows us +/// to inspect the version and packet type (which depends on the version). +/// This information allows us to fully decode and decrypt the packet. +#[cfg_attr(test, derive(Clone))] +#[derive(Debug)] +pub struct PartialDecode { + plain_header: ProtectedHeader, + buf: io::Cursor, +} + +#[allow(clippy::len_without_is_empty)] +impl PartialDecode { + /// Begin decoding a QUIC packet from `bytes`, returning any trailing data not part of that packet + pub fn new( + bytes: BytesMut, + cid_parser: &(impl ConnectionIdParser + ?Sized), + supported_versions: &[u32], + grease_quic_bit: bool, + ) -> Result<(Self, Option), PacketDecodeError> { + let mut buf = io::Cursor::new(bytes); + let plain_header = + ProtectedHeader::decode(&mut buf, cid_parser, supported_versions, grease_quic_bit)?; + let dgram_len = buf.get_ref().len(); + let packet_len = plain_header + .payload_len() + .map(|len| (buf.position() + len) as usize) + .unwrap_or(dgram_len); + match dgram_len.cmp(&packet_len) { + Ordering::Equal => Ok((Self { plain_header, buf }, None)), + Ordering::Less => Err(PacketDecodeError::InvalidHeader( + "packet too short to contain payload length", + )), + Ordering::Greater => { + let rest = Some(buf.get_mut().split_off(packet_len)); + Ok((Self { plain_header, buf }, rest)) + } + } + } + + /// The underlying partially-decoded packet data + pub(crate) fn data(&self) -> &[u8] { + self.buf.get_ref() + } + + pub(crate) fn initial_header(&self) -> Option<&ProtectedInitialHeader> { + self.plain_header.as_initial() + } + + pub(crate) fn has_long_header(&self) -> bool { + !matches!(self.plain_header, ProtectedHeader::Short { .. }) + } + + pub(crate) fn is_initial(&self) -> bool { + self.space() == Some(SpaceId::Initial) + } + + pub(crate) fn space(&self) -> Option { + use ProtectedHeader::*; + match self.plain_header { + Initial { .. } => Some(SpaceId::Initial), + Long { + ty: LongType::Handshake, + .. + } => Some(SpaceId::Handshake), + Long { + ty: LongType::ZeroRtt, + .. + } => Some(SpaceId::Data), + Short { .. } => Some(SpaceId::Data), + _ => None, + } + } + + pub(crate) fn is_0rtt(&self) -> bool { + match self.plain_header { + ProtectedHeader::Long { ty, .. } => ty == LongType::ZeroRtt, + _ => false, + } + } + + /// The destination connection ID of the packet + pub fn dst_cid(&self) -> &ConnectionId { + self.plain_header.dst_cid() + } + + /// Length of QUIC packet being decoded + #[allow(unreachable_pub)] // fuzzing only + pub fn len(&self) -> usize { + self.buf.get_ref().len() + } + + pub(crate) fn finish( + self, + header_crypto: Option<&dyn crypto::HeaderKey>, + ) -> Result { + use ProtectedHeader::*; + let Self { + plain_header, + mut buf, + } = self; + + if let Initial(ProtectedInitialHeader { + dst_cid, + src_cid, + token_pos, + version, + .. + }) = plain_header + { + let number = Self::decrypt_header(&mut buf, header_crypto.unwrap())?; + let header_len = buf.position() as usize; + let mut bytes = buf.into_inner(); + + let header_data = bytes.split_to(header_len).freeze(); + let token = header_data.slice(token_pos.start..token_pos.end); + return Ok(Packet { + header: Header::Initial(InitialHeader { + dst_cid, + src_cid, + token, + number, + version, + }), + header_data, + payload: bytes, + }); + } + + let header = match plain_header { + Long { + ty, + dst_cid, + src_cid, + version, + .. + } => Header::Long { + ty, + dst_cid, + src_cid, + number: Self::decrypt_header(&mut buf, header_crypto.unwrap())?, + version, + }, + Retry { + dst_cid, + src_cid, + version, + } => Header::Retry { + dst_cid, + src_cid, + version, + }, + Short { spin, dst_cid, .. } => { + let number = Self::decrypt_header(&mut buf, header_crypto.unwrap())?; + let key_phase = buf.get_ref()[0] & KEY_PHASE_BIT != 0; + Header::Short { + spin, + key_phase, + dst_cid, + number, + } + } + VersionNegotiate { + random, + dst_cid, + src_cid, + } => Header::VersionNegotiate { + random, + dst_cid, + src_cid, + }, + Initial { .. } => unreachable!(), + }; + + let header_len = buf.position() as usize; + let mut bytes = buf.into_inner(); + Ok(Packet { + header, + header_data: bytes.split_to(header_len).freeze(), + payload: bytes, + }) + } + + fn decrypt_header( + buf: &mut io::Cursor, + header_crypto: &dyn crypto::HeaderKey, + ) -> Result { + let packet_length = buf.get_ref().len(); + let pn_offset = buf.position() as usize; + if packet_length < pn_offset + 4 + header_crypto.sample_size() { + return Err(PacketDecodeError::InvalidHeader( + "packet too short to extract header protection sample", + )); + } + + header_crypto.decrypt(pn_offset, buf.get_mut()); + + let len = PacketNumber::decode_len(buf.get_ref()[0]); + PacketNumber::decode(len, buf) + } +} + +pub(crate) struct Packet { + pub(crate) header: Header, + pub(crate) header_data: Bytes, + pub(crate) payload: BytesMut, +} + +impl Packet { + pub(crate) fn reserved_bits_valid(&self) -> bool { + let mask = match self.header { + Header::Short { .. } => SHORT_RESERVED_BITS, + _ => LONG_RESERVED_BITS, + }; + self.header_data[0] & mask == 0 + } +} + +pub(crate) struct InitialPacket { + pub(crate) header: InitialHeader, + pub(crate) header_data: Bytes, + pub(crate) payload: BytesMut, +} + +impl From for Packet { + fn from(x: InitialPacket) -> Self { + Self { + header: Header::Initial(x.header), + header_data: x.header_data, + payload: x.payload, + } + } +} + +#[cfg_attr(test, derive(Clone))] +#[derive(Debug)] +pub(crate) enum Header { + Initial(InitialHeader), + Long { + ty: LongType, + dst_cid: ConnectionId, + src_cid: ConnectionId, + number: PacketNumber, + version: u32, + }, + Retry { + dst_cid: ConnectionId, + src_cid: ConnectionId, + version: u32, + }, + Short { + spin: bool, + key_phase: bool, + dst_cid: ConnectionId, + number: PacketNumber, + }, + VersionNegotiate { + random: u8, + src_cid: ConnectionId, + dst_cid: ConnectionId, + }, +} + +impl Header { + pub(crate) fn encode(&self, w: &mut Vec) -> PartialEncode { + use Header::*; + let start = w.len(); + match *self { + Initial(InitialHeader { + ref dst_cid, + ref src_cid, + ref token, + number, + version, + }) => { + w.write(u8::from(LongHeaderType::Initial) | number.tag()); + w.write(version); + dst_cid.encode_long(w); + src_cid.encode_long(w); + w.write_var(token.len() as u64); + w.put_slice(token); + w.write::(0); // Placeholder for payload length; see `set_payload_length` + number.encode(w); + PartialEncode { + start, + header_len: w.len() - start, + pn: Some((number.len(), true)), + } + } + Long { + ty, + ref dst_cid, + ref src_cid, + number, + version, + } => { + w.write(u8::from(LongHeaderType::Standard(ty)) | number.tag()); + w.write(version); + dst_cid.encode_long(w); + src_cid.encode_long(w); + w.write::(0); // Placeholder for payload length; see `set_payload_length` + number.encode(w); + PartialEncode { + start, + header_len: w.len() - start, + pn: Some((number.len(), true)), + } + } + Retry { + ref dst_cid, + ref src_cid, + version, + } => { + w.write(u8::from(LongHeaderType::Retry)); + w.write(version); + dst_cid.encode_long(w); + src_cid.encode_long(w); + PartialEncode { + start, + header_len: w.len() - start, + pn: None, + } + } + Short { + spin, + key_phase, + ref dst_cid, + number, + } => { + w.write( + FIXED_BIT + | if key_phase { KEY_PHASE_BIT } else { 0 } + | if spin { SPIN_BIT } else { 0 } + | number.tag(), + ); + w.put_slice(dst_cid); + number.encode(w); + PartialEncode { + start, + header_len: w.len() - start, + pn: Some((number.len(), false)), + } + } + VersionNegotiate { + ref random, + ref dst_cid, + ref src_cid, + } => { + w.write(0x80u8 | random); + w.write::(0); + dst_cid.encode_long(w); + src_cid.encode_long(w); + PartialEncode { + start, + header_len: w.len() - start, + pn: None, + } + } + } + } + + /// Whether the packet is encrypted on the wire + pub(crate) fn is_protected(&self) -> bool { + !matches!(*self, Self::Retry { .. } | Self::VersionNegotiate { .. }) + } + + pub(crate) fn number(&self) -> Option { + use Header::*; + Some(match *self { + Initial(InitialHeader { number, .. }) => number, + Long { number, .. } => number, + Short { number, .. } => number, + _ => { + return None; + } + }) + } + + pub(crate) fn space(&self) -> SpaceId { + use Header::*; + match *self { + Short { .. } => SpaceId::Data, + Long { + ty: LongType::ZeroRtt, + .. + } => SpaceId::Data, + Long { + ty: LongType::Handshake, + .. + } => SpaceId::Handshake, + _ => SpaceId::Initial, + } + } + + pub(crate) fn key_phase(&self) -> bool { + match *self { + Self::Short { key_phase, .. } => key_phase, + _ => false, + } + } + + pub(crate) fn is_short(&self) -> bool { + matches!(*self, Self::Short { .. }) + } + + pub(crate) fn is_1rtt(&self) -> bool { + self.is_short() + } + + pub(crate) fn is_0rtt(&self) -> bool { + matches!( + *self, + Self::Long { + ty: LongType::ZeroRtt, + .. + } + ) + } + + pub(crate) fn dst_cid(&self) -> ConnectionId { + use Header::*; + match *self { + Initial(InitialHeader { dst_cid, .. }) => dst_cid, + Long { dst_cid, .. } => dst_cid, + Retry { dst_cid, .. } => dst_cid, + Short { dst_cid, .. } => dst_cid, + VersionNegotiate { dst_cid, .. } => dst_cid, + } + } + + /// Whether the payload of this packet contains QUIC frames + pub(crate) fn has_frames(&self) -> bool { + use Header::*; + match *self { + Initial(_) => true, + Long { .. } => true, + Retry { .. } => false, + Short { .. } => true, + VersionNegotiate { .. } => false, + } + } +} + +pub(crate) struct PartialEncode { + pub(crate) start: usize, + pub(crate) header_len: usize, + // Packet number length, payload length needed + pn: Option<(usize, bool)>, +} + +impl PartialEncode { + pub(crate) fn finish( + self, + buf: &mut [u8], + header_crypto: &dyn crypto::HeaderKey, + crypto: Option<(u64, &dyn crypto::PacketKey)>, + ) { + let Self { header_len, pn, .. } = self; + let (pn_len, write_len) = match pn { + Some((pn_len, write_len)) => (pn_len, write_len), + None => return, + }; + + let pn_pos = header_len - pn_len; + if write_len { + let len = buf.len() - header_len + pn_len; + assert!(len < 2usize.pow(14)); // Fits in reserved space + let mut slice = &mut buf[pn_pos - 2..pn_pos]; + slice.put_u16(len as u16 | (0b01 << 14)); + } + + if let Some((number, crypto)) = crypto { + crypto.encrypt(number, buf, header_len); + } + + debug_assert!( + pn_pos + 4 + header_crypto.sample_size() <= buf.len(), + "packet must be padded to at least {} bytes for header protection sampling", + pn_pos + 4 + header_crypto.sample_size() + ); + header_crypto.encrypt(pn_pos, buf); + } +} + +/// Plain packet header +#[derive(Clone, Debug)] +pub enum ProtectedHeader { + /// An Initial packet header + Initial(ProtectedInitialHeader), + /// A Long packet header, as used during the handshake + Long { + /// Type of the Long header packet + ty: LongType, + /// Destination Connection ID + dst_cid: ConnectionId, + /// Source Connection ID + src_cid: ConnectionId, + /// Length of the packet payload + len: u64, + /// QUIC version + version: u32, + }, + /// A Retry packet header + Retry { + /// Destination Connection ID + dst_cid: ConnectionId, + /// Source Connection ID + src_cid: ConnectionId, + /// QUIC version + version: u32, + }, + /// A short packet header, as used during the data phase + Short { + /// Spin bit + spin: bool, + /// Destination Connection ID + dst_cid: ConnectionId, + }, + /// A Version Negotiation packet header + VersionNegotiate { + /// Random value + random: u8, + /// Destination Connection ID + dst_cid: ConnectionId, + /// Source Connection ID + src_cid: ConnectionId, + }, +} + +impl ProtectedHeader { + fn as_initial(&self) -> Option<&ProtectedInitialHeader> { + match self { + Self::Initial(x) => Some(x), + _ => None, + } + } + + /// The destination Connection ID of the packet + pub fn dst_cid(&self) -> &ConnectionId { + use ProtectedHeader::*; + match self { + Initial(header) => &header.dst_cid, + Long { dst_cid, .. } => dst_cid, + Retry { dst_cid, .. } => dst_cid, + Short { dst_cid, .. } => dst_cid, + VersionNegotiate { dst_cid, .. } => dst_cid, + } + } + + fn payload_len(&self) -> Option { + use ProtectedHeader::*; + match self { + Initial(ProtectedInitialHeader { len, .. }) | Long { len, .. } => Some(*len), + _ => None, + } + } + + /// Decode a plain header from given buffer, with given [`ConnectionIdParser`]. + pub fn decode( + buf: &mut io::Cursor, + cid_parser: &(impl ConnectionIdParser + ?Sized), + supported_versions: &[u32], + grease_quic_bit: bool, + ) -> Result { + let first = buf.get::()?; + if !grease_quic_bit && first & FIXED_BIT == 0 { + return Err(PacketDecodeError::InvalidHeader("fixed bit unset")); + } + if first & LONG_HEADER_FORM == 0 { + let spin = first & SPIN_BIT != 0; + + Ok(Self::Short { + spin, + dst_cid: cid_parser.parse(buf)?, + }) + } else { + let version = buf.get::()?; + + let dst_cid = ConnectionId::decode_long(buf) + .ok_or(PacketDecodeError::InvalidHeader("malformed cid"))?; + let src_cid = ConnectionId::decode_long(buf) + .ok_or(PacketDecodeError::InvalidHeader("malformed cid"))?; + + // TODO: Support long CIDs for compatibility with future QUIC versions + if version == 0 { + let random = first & !LONG_HEADER_FORM; + return Ok(Self::VersionNegotiate { + random, + dst_cid, + src_cid, + }); + } + + if !supported_versions.contains(&version) { + return Err(PacketDecodeError::UnsupportedVersion { + src_cid, + dst_cid, + version, + }); + } + + match LongHeaderType::from_byte(first)? { + LongHeaderType::Initial => { + let token_len = buf.get_var()? as usize; + let token_start = buf.position() as usize; + if token_len > buf.remaining() { + return Err(PacketDecodeError::InvalidHeader("token out of bounds")); + } + buf.advance(token_len); + + let len = buf.get_var()?; + Ok(Self::Initial(ProtectedInitialHeader { + dst_cid, + src_cid, + token_pos: token_start..token_start + token_len, + len, + version, + })) + } + LongHeaderType::Retry => Ok(Self::Retry { + dst_cid, + src_cid, + version, + }), + LongHeaderType::Standard(ty) => Ok(Self::Long { + ty, + dst_cid, + src_cid, + len: buf.get_var()?, + version, + }), + } + } + } +} + +/// Header of an Initial packet, before decryption +#[derive(Clone, Debug)] +pub struct ProtectedInitialHeader { + /// Destination Connection ID + pub dst_cid: ConnectionId, + /// Source Connection ID + pub src_cid: ConnectionId, + /// The position of a token in the packet buffer + pub token_pos: Range, + /// Length of the packet payload + pub len: u64, + /// QUIC version + pub version: u32, +} + +#[derive(Clone, Debug)] +pub(crate) struct InitialHeader { + pub(crate) dst_cid: ConnectionId, + pub(crate) src_cid: ConnectionId, + pub(crate) token: Bytes, + pub(crate) number: PacketNumber, + pub(crate) version: u32, +} + +// An encoded packet number +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) enum PacketNumber { + U8(u8), + U16(u16), + U24(u32), + U32(u32), +} + +impl PacketNumber { + pub(crate) fn new(n: u64, largest_acked: u64) -> Self { + let range = (n - largest_acked) * 2; + if range < 1 << 8 { + Self::U8(n as u8) + } else if range < 1 << 16 { + Self::U16(n as u16) + } else if range < 1 << 24 { + Self::U24(n as u32) + } else if range < 1 << 32 { + Self::U32(n as u32) + } else { + panic!("packet number too large to encode") + } + } + + pub(crate) fn len(self) -> usize { + use PacketNumber::*; + match self { + U8(_) => 1, + U16(_) => 2, + U24(_) => 3, + U32(_) => 4, + } + } + + pub(crate) fn encode(self, w: &mut W) { + use PacketNumber::*; + match self { + U8(x) => w.write(x), + U16(x) => w.write(x), + U24(x) => w.put_uint(u64::from(x), 3), + U32(x) => w.write(x), + } + } + + pub(crate) fn decode(len: usize, r: &mut R) -> Result { + use PacketNumber::*; + let pn = match len { + 1 => U8(r.get()?), + 2 => U16(r.get()?), + 3 => U24(r.get_uint(3) as u32), + 4 => U32(r.get()?), + _ => unreachable!(), + }; + Ok(pn) + } + + pub(crate) fn decode_len(tag: u8) -> usize { + 1 + (tag & 0x03) as usize + } + + fn tag(self) -> u8 { + use PacketNumber::*; + match self { + U8(_) => 0b00, + U16(_) => 0b01, + U24(_) => 0b10, + U32(_) => 0b11, + } + } + + pub(crate) fn expand(self, expected: u64) -> u64 { + // From Appendix A + use PacketNumber::*; + let truncated = match self { + U8(x) => u64::from(x), + U16(x) => u64::from(x), + U24(x) => u64::from(x), + U32(x) => u64::from(x), + }; + let nbits = self.len() * 8; + let win = 1 << nbits; + let hwin = win / 2; + let mask = win - 1; + // The incoming packet number should be greater than expected - hwin and less than or equal + // to expected + hwin + // + // This means we can't just strip the trailing bits from expected and add the truncated + // because that might yield a value outside the window. + // + // The following code calculates a candidate value and makes sure it's within the packet + // number window. + let candidate = (expected & !mask) | truncated; + if expected.checked_sub(hwin).is_some_and(|x| candidate <= x) { + candidate + win + } else if candidate > expected + hwin && candidate > win { + candidate - win + } else { + candidate + } + } +} + +/// A [`ConnectionIdParser`] implementation that assumes the connection ID is of fixed length +pub struct FixedLengthConnectionIdParser { + expected_len: usize, +} + +impl FixedLengthConnectionIdParser { + /// Create a new instance of `FixedLengthConnectionIdParser` + pub fn new(expected_len: usize) -> Self { + Self { expected_len } + } +} + +impl ConnectionIdParser for FixedLengthConnectionIdParser { + fn parse(&self, buffer: &mut dyn Buf) -> Result { + (buffer.remaining() >= self.expected_len) + .then(|| ConnectionId::from_buf(buffer, self.expected_len)) + .ok_or(PacketDecodeError::InvalidHeader("packet too small")) + } +} + +/// Parse connection id in short header packet +pub trait ConnectionIdParser { + /// Parse a connection id from given buffer + fn parse(&self, buf: &mut dyn Buf) -> Result; +} + +/// Long packet type including non-uniform cases +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub(crate) enum LongHeaderType { + Initial, + Retry, + Standard(LongType), +} + +impl LongHeaderType { + fn from_byte(b: u8) -> Result { + use {LongHeaderType::*, LongType::*}; + debug_assert!(b & LONG_HEADER_FORM != 0, "not a long packet"); + Ok(match (b & 0x30) >> 4 { + 0x0 => Initial, + 0x1 => Standard(ZeroRtt), + 0x2 => Standard(Handshake), + 0x3 => Retry, + _ => unreachable!(), + }) + } +} + +impl From for u8 { + fn from(ty: LongHeaderType) -> Self { + use {LongHeaderType::*, LongType::*}; + match ty { + Initial => LONG_HEADER_FORM | FIXED_BIT, + Standard(ZeroRtt) => LONG_HEADER_FORM | FIXED_BIT | (0x1 << 4), + Standard(Handshake) => LONG_HEADER_FORM | FIXED_BIT | (0x2 << 4), + Retry => LONG_HEADER_FORM | FIXED_BIT | (0x3 << 4), + } + } +} + +/// Long packet types with uniform header structure +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum LongType { + /// Handshake packet + Handshake, + /// 0-RTT packet + ZeroRtt, +} + +/// Packet decode error +#[derive(Debug, Error, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum PacketDecodeError { + /// Packet uses a QUIC version that is not supported + #[error("unsupported version {version:x}")] + UnsupportedVersion { + /// Source Connection ID + src_cid: ConnectionId, + /// Destination Connection ID + dst_cid: ConnectionId, + /// The version that was unsupported + version: u32, + }, + /// The packet header is invalid + #[error("invalid header: {0}")] + InvalidHeader(&'static str), +} + +impl From for PacketDecodeError { + fn from(_: coding::UnexpectedEnd) -> Self { + Self::InvalidHeader("unexpected end of packet") + } +} + +pub(crate) const LONG_HEADER_FORM: u8 = 0x80; +pub(crate) const FIXED_BIT: u8 = 0x40; +pub(crate) const SPIN_BIT: u8 = 0x20; +const SHORT_RESERVED_BITS: u8 = 0x18; +const LONG_RESERVED_BITS: u8 = 0x0c; +const KEY_PHASE_BIT: u8 = 0x04; + +/// Packet number space identifiers +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] +pub enum SpaceId { + /// Unprotected packets, used to bootstrap the handshake + Initial = 0, + Handshake = 1, + /// Application data space, used for 0-RTT and post-handshake/1-RTT packets + Data = 2, +} + +impl SpaceId { + pub fn iter() -> impl Iterator { + [Self::Initial, Self::Handshake, Self::Data].iter().cloned() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + use std::io; + + fn check_pn(typed: PacketNumber, encoded: &[u8]) { + let mut buf = Vec::new(); + typed.encode(&mut buf); + assert_eq!(&buf[..], encoded); + let decoded = PacketNumber::decode(typed.len(), &mut io::Cursor::new(&buf)).unwrap(); + assert_eq!(typed, decoded); + } + + #[test] + fn roundtrip_packet_numbers() { + check_pn(PacketNumber::U8(0x7f), &hex!("7f")); + check_pn(PacketNumber::U16(0x80), &hex!("0080")); + check_pn(PacketNumber::U16(0x3fff), &hex!("3fff")); + check_pn(PacketNumber::U32(0x0000_4000), &hex!("0000 4000")); + check_pn(PacketNumber::U32(0xffff_ffff), &hex!("ffff ffff")); + } + + #[test] + fn pn_encode() { + check_pn(PacketNumber::new(0x10, 0), &hex!("10")); + check_pn(PacketNumber::new(0x100, 0), &hex!("0100")); + check_pn(PacketNumber::new(0x10000, 0), &hex!("010000")); + } + + #[test] + fn pn_expand_roundtrip() { + for expected in 0..1024 { + for actual in expected..1024 { + assert_eq!(actual, PacketNumber::new(actual, expected).expand(expected)); + } + } + } + + #[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] + #[test] + fn header_encoding() { + use crate::Side; + use crate::crypto::rustls::{initial_keys, initial_suite_from_provider}; + #[cfg(all(feature = "rustls-aws-lc-rs", not(feature = "rustls-ring")))] + use rustls::crypto::aws_lc_rs::default_provider; + #[cfg(feature = "rustls-ring")] + use rustls::crypto::ring::default_provider; + use rustls::quic::Version; + + let dcid = ConnectionId::new(&hex!("06b858ec6f80452b")); + let provider = default_provider(); + + let suite = initial_suite_from_provider(&std::sync::Arc::new(provider)).unwrap(); + let client = initial_keys(Version::V1, dcid, Side::Client, &suite); + let mut buf = Vec::new(); + let header = Header::Initial(InitialHeader { + number: PacketNumber::U8(0), + src_cid: ConnectionId::new(&[]), + dst_cid: dcid, + token: Bytes::new(), + version: crate::DEFAULT_SUPPORTED_VERSIONS[0], + }); + let encode = header.encode(&mut buf); + let header_len = buf.len(); + buf.resize(header_len + 16 + client.packet.local.tag_len(), 0); + encode.finish( + &mut buf, + &*client.header.local, + Some((0, &*client.packet.local)), + ); + + for byte in &buf { + print!("{byte:02x}"); + } + println!(); + assert_eq!( + buf[..], + hex!( + "c8000000010806b858ec6f80452b00004021be + 3ef50807b84191a196f760a6dad1e9d1c430c48952cba0148250c21c0a6a70e1" + )[..] + ); + + let server = initial_keys(Version::V1, dcid, Side::Server, &suite); + let supported_versions = crate::DEFAULT_SUPPORTED_VERSIONS.to_vec(); + let decode = PartialDecode::new( + buf.as_slice().into(), + &FixedLengthConnectionIdParser::new(0), + &supported_versions, + false, + ) + .unwrap() + .0; + let mut packet = decode.finish(Some(&*server.header.remote)).unwrap(); + assert_eq!( + packet.header_data[..], + hex!("c0000000010806b858ec6f80452b0000402100")[..] + ); + server + .packet + .remote + .decrypt(0, &packet.header_data, &mut packet.payload) + .unwrap(); + assert_eq!(packet.payload[..], [0; 16]); + match packet.header { + Header::Initial(InitialHeader { + number: PacketNumber::U8(0), + .. + }) => {} + _ => { + panic!("unexpected header {:?}", packet.header); + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/array_range_set.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/array_range_set.rs new file mode 100644 index 0000000000000000000000000000000000000000..f273391a9fba98e9308ea986a42c433cf36ecad1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/array_range_set.rs @@ -0,0 +1,209 @@ +use std::ops::Range; + +use tinyvec::TinyVec; + +/// A set of u64 values optimized for long runs and random insert/delete/contains +/// +/// `ArrayRangeSet` uses an array representation, where each array entry represents +/// a range. +/// +/// The array-based RangeSet provides 2 benefits: +/// - There exists an inline representation, which avoids the need of heap +/// allocating ACK ranges for SentFrames for small ranges. +/// - Iterating over ranges should usually be faster since there is only +/// a single cache-friendly contiguous range. +/// +/// `ArrayRangeSet` is especially useful for tracking ACK ranges where the amount +/// of ranges is usually very low (since ACK numbers are in consecutive fashion +/// unless reordering or packet loss occur). +#[derive(Debug, Default)] +pub struct ArrayRangeSet(TinyVec<[Range; ARRAY_RANGE_SET_INLINE_CAPACITY]>); + +/// The capacity of elements directly stored in [`ArrayRangeSet`] +/// +/// An inline capacity of 2 is chosen to keep `SentFrame` below 128 bytes. +const ARRAY_RANGE_SET_INLINE_CAPACITY: usize = 2; + +impl Clone for ArrayRangeSet { + fn clone(&self) -> Self { + // tinyvec keeps the heap representation after clones. + // We rather prefer the inline representation for clones if possible, + // since clones (e.g. for storage in `SentFrames`) are rarely mutated + if self.0.is_inline() || self.0.len() > ARRAY_RANGE_SET_INLINE_CAPACITY { + return Self(self.0.clone()); + } + + let mut vec = TinyVec::new(); + vec.extend_from_slice(self.0.as_slice()); + Self(vec) + } +} + +impl ArrayRangeSet { + pub fn new() -> Self { + Default::default() + } + + pub fn iter(&self) -> impl DoubleEndedIterator> + '_ { + self.0.iter().cloned() + } + + pub fn elts(&self) -> impl Iterator + '_ { + self.iter().flatten() + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn contains(&self, x: u64) -> bool { + for range in self.0.iter() { + if range.start > x { + // We only get here if there was no prior range that contained x + return false; + } else if range.contains(&x) { + return true; + } + } + false + } + + pub fn subtract(&mut self, other: &Self) { + // TODO: This can potentially be made more efficient, since the we know + // individual ranges are not overlapping, and the next range must start + // after the last one finished + for range in &other.0 { + self.remove(range.clone()); + } + } + + pub fn insert_one(&mut self, x: u64) -> bool { + self.insert(x..x + 1) + } + + pub fn insert(&mut self, x: Range) -> bool { + let mut result = false; + + if x.is_empty() { + // Don't try to deal with ranges where x.end <= x.start + return false; + } + + let mut idx = 0; + while idx != self.0.len() { + let range = &mut self.0[idx]; + + if range.start > x.end { + // The range is fully before this range and therefore not extensible. + // Add a new range to the left + self.0.insert(idx, x); + return true; + } else if range.start > x.start { + // The new range starts before this range but overlaps. + // Extend the current range to the left + // Note that we don't have to merge a potential left range, since + // this case would have been captured by merging the right range + // in the previous loop iteration + result = true; + range.start = x.start; + } + + // At this point we have handled all parts of the new range which + // are in front of the current range. Now we handle everything from + // the start of the current range + + if x.end <= range.end { + // Fully contained + return result; + } else if x.start <= range.end { + // Extend the current range to the end of the new range. + // Since it's not contained it must be bigger + range.end = x.end; + + // Merge all follow-up ranges which overlap + while idx != self.0.len() - 1 { + let curr = self.0[idx].clone(); + let next = self.0[idx + 1].clone(); + if curr.end >= next.start { + self.0[idx].end = next.end.max(curr.end); + self.0.remove(idx + 1); + } else { + break; + } + } + + return true; + } + + idx += 1; + } + + // Insert a range at the end + self.0.push(x); + true + } + + pub fn remove(&mut self, x: Range) -> bool { + let mut result = false; + + if x.is_empty() { + // Don't try to deal with ranges where x.end <= x.start + return false; + } + + let mut idx = 0; + while idx != self.0.len() && x.start != x.end { + let range = self.0[idx].clone(); + + if x.end <= range.start { + // The range is fully before this range + return result; + } else if x.start >= range.end { + // The range is fully after this range + idx += 1; + continue; + } + + // The range overlaps with this range + result = true; + + let left = range.start..x.start; + let right = x.end..range.end; + if left.is_empty() && right.is_empty() { + self.0.remove(idx); + } else if left.is_empty() { + self.0[idx] = right; + idx += 1; + } else if right.is_empty() { + self.0[idx] = left; + idx += 1; + } else { + self.0[idx] = right; + self.0.insert(idx, left); + idx += 2; + } + } + + result + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn pop_min(&mut self) -> Option> { + if !self.0.is_empty() { + Some(self.0.remove(0)) + } else { + None + } + } + + pub fn min(&self) -> Option { + self.iter().next().map(|x| x.start) + } + + pub fn max(&self) -> Option { + self.iter().next_back().map(|x| x.end - 1) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/btree_range_set.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/btree_range_set.rs new file mode 100644 index 0000000000000000000000000000000000000000..9121bd9cb057850696ed148bea382b61be06498b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/btree_range_set.rs @@ -0,0 +1,381 @@ +use std::{ + cmp, + cmp::Ordering, + collections::{BTreeMap, btree_map}, + ops::{ + Bound::{Excluded, Included}, + Range, + }, +}; + +/// A set of u64 values optimized for long runs and random insert/delete/contains +#[derive(Debug, Default, Clone)] +pub struct RangeSet(BTreeMap); + +impl RangeSet { + pub fn new() -> Self { + Default::default() + } + + pub fn contains(&self, x: u64) -> bool { + self.pred(x).is_some_and(|(_, end)| end > x) + } + + pub fn insert_one(&mut self, x: u64) -> bool { + if let Some((start, end)) = self.pred(x) { + match end.cmp(&x) { + // Wholly contained + Ordering::Greater => { + return false; + } + Ordering::Equal => { + // Extend existing + self.0.remove(&start); + let mut new_end = x + 1; + if let Some((next_start, next_end)) = self.succ(x) { + if next_start == new_end { + self.0.remove(&next_start); + new_end = next_end; + } + } + self.0.insert(start, new_end); + return true; + } + _ => {} + } + } + let mut new_end = x + 1; + if let Some((next_start, next_end)) = self.succ(x) { + if next_start == new_end { + self.0.remove(&next_start); + new_end = next_end; + } + } + self.0.insert(x, new_end); + true + } + + pub fn insert(&mut self, mut x: Range) -> bool { + if x.is_empty() { + return false; + } + if let Some((start, end)) = self.pred(x.start) { + if end >= x.end { + // Wholly contained + return false; + } else if end >= x.start { + // Extend overlapping predecessor + self.0.remove(&start); + x.start = start; + } + } + while let Some((next_start, next_end)) = self.succ(x.start) { + if next_start > x.end { + break; + } + // Overlaps with successor + self.0.remove(&next_start); + x.end = cmp::max(next_end, x.end); + } + self.0.insert(x.start, x.end); + true + } + + /// Find closest range to `x` that begins at or before it + fn pred(&self, x: u64) -> Option<(u64, u64)> { + self.0 + .range((Included(0), Included(x))) + .next_back() + .map(|(&x, &y)| (x, y)) + } + + /// Find the closest range to `x` that begins after it + fn succ(&self, x: u64) -> Option<(u64, u64)> { + self.0 + .range((Excluded(x), Included(u64::MAX))) + .next() + .map(|(&x, &y)| (x, y)) + } + + pub fn remove(&mut self, x: Range) -> bool { + if x.is_empty() { + return false; + } + + let before = match self.pred(x.start) { + Some((start, end)) if end > x.start => { + self.0.remove(&start); + if start < x.start { + self.0.insert(start, x.start); + } + if end > x.end { + self.0.insert(x.end, end); + } + // Short-circuit if we cannot possibly overlap with another range + if end >= x.end { + return true; + } + true + } + Some(_) | None => false, + }; + let mut after = false; + while let Some((start, end)) = self.succ(x.start) { + if start >= x.end { + break; + } + after = true; + self.0.remove(&start); + if end > x.end { + self.0.insert(x.end, end); + break; + } + } + before || after + } + + /// Add a range to the set, returning the intersection of current ranges with the new one + pub fn replace(&mut self, mut range: Range) -> Replace<'_> { + let pred = if let Some((prev_start, prev_end)) = self + .pred(range.start) + .filter(|&(_, end)| end >= range.start) + { + self.0.remove(&prev_start); + let replaced_start = range.start; + range.start = range.start.min(prev_start); + let replaced_end = range.end.min(prev_end); + range.end = range.end.max(prev_end); + if replaced_start != replaced_end { + Some(replaced_start..replaced_end) + } else { + None + } + } else { + None + }; + Replace { + set: self, + range, + pred, + } + } + + pub fn add(&mut self, other: &Self) { + for (&start, &end) in &other.0 { + self.insert(start..end); + } + } + + pub fn subtract(&mut self, other: &Self) { + for (&start, &end) in &other.0 { + self.remove(start..end); + } + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn min(&self) -> Option { + self.0.first_key_value().map(|(&start, _)| start) + } + + pub fn max(&self) -> Option { + self.0.last_key_value().map(|(_, &end)| end - 1) + } + + pub fn len(&self) -> usize { + self.0.len() + } + pub fn iter(&self) -> Iter<'_> { + Iter(self.0.iter()) + } + pub fn elts(&self) -> EltIter<'_> { + EltIter { + inner: self.0.iter(), + next: 0, + end: 0, + } + } + + pub fn peek_min(&self) -> Option> { + let (&start, &end) = self.0.iter().next()?; + Some(start..end) + } + + pub fn pop_min(&mut self) -> Option> { + let result = self.peek_min()?; + self.0.remove(&result.start); + Some(result) + } +} + +pub struct Iter<'a>(btree_map::Iter<'a, u64, u64>); + +impl Iterator for Iter<'_> { + type Item = Range; + fn next(&mut self) -> Option> { + let (&start, &end) = self.0.next()?; + Some(start..end) + } +} + +impl DoubleEndedIterator for Iter<'_> { + fn next_back(&mut self) -> Option> { + let (&start, &end) = self.0.next_back()?; + Some(start..end) + } +} + +impl<'a> IntoIterator for &'a RangeSet { + type Item = Range; + type IntoIter = Iter<'a>; + fn into_iter(self) -> Iter<'a> { + self.iter() + } +} + +pub struct EltIter<'a> { + inner: btree_map::Iter<'a, u64, u64>, + next: u64, + end: u64, +} + +impl Iterator for EltIter<'_> { + type Item = u64; + fn next(&mut self) -> Option { + if self.next == self.end { + let (&start, &end) = self.inner.next()?; + self.next = start; + self.end = end; + } + let x = self.next; + self.next += 1; + Some(x) + } +} + +impl DoubleEndedIterator for EltIter<'_> { + fn next_back(&mut self) -> Option { + if self.next == self.end { + let (&start, &end) = self.inner.next_back()?; + self.next = start; + self.end = end; + } + self.end -= 1; + Some(self.end) + } +} + +/// Iterator returned by `RangeSet::replace` +pub struct Replace<'a> { + set: &'a mut RangeSet, + /// Portion of the intersection arising from a range beginning at or before the newly inserted + /// range + pred: Option>, + /// Union of the input range and all ranges that have been visited by the iterator so far + range: Range, +} + +impl Iterator for Replace<'_> { + type Item = Range; + fn next(&mut self) -> Option> { + if let Some(pred) = self.pred.take() { + // If a range starting before the inserted range overlapped with it, return the + // corresponding overlap first + return Some(pred); + } + + let (next_start, next_end) = self.set.succ(self.range.start)?; + if next_start > self.range.end { + // If the next successor range starts after the current range ends, there can be no more + // overlaps. This is sound even when `self.range.end` is increased because `RangeSet` is + // guaranteed not to contain pairs of ranges that could be simplified. + return None; + } + // Remove the redundant range... + self.set.0.remove(&next_start); + // ...and handle the case where the redundant range ends later than the new range. + let replaced_end = self.range.end.min(next_end); + self.range.end = self.range.end.max(next_end); + if next_start == replaced_end { + // If the redundant range started exactly where the new range ended, there was no + // overlap with it or any later range. + None + } else { + Some(next_start..replaced_end) + } + } +} + +impl Drop for Replace<'_> { + fn drop(&mut self) { + // Ensure we drain all remaining overlapping ranges + for _ in &mut *self {} + // Insert the final aggregate range + self.set.0.insert(self.range.start, self.range.end); + } +} + +/// This module contains tests which only apply for this `RangeSet` implementation +/// +/// Tests which apply for all implementations can be found in the `tests.rs` module +#[cfg(test)] +mod tests { + #![allow(clippy::single_range_in_vec_init)] // https://github.com/rust-lang/rust-clippy/issues/11086 + use super::*; + + #[test] + fn replace_contained() { + let mut set = RangeSet::new(); + set.insert(2..4); + assert_eq!(set.replace(1..5).collect::>(), &[2..4]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 1..5); + } + + #[test] + fn replace_contains() { + let mut set = RangeSet::new(); + set.insert(1..5); + assert_eq!(set.replace(2..4).collect::>(), &[2..4]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 1..5); + } + + #[test] + fn replace_pred() { + let mut set = RangeSet::new(); + set.insert(2..4); + assert_eq!(set.replace(3..5).collect::>(), &[3..4]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 2..5); + } + + #[test] + fn replace_succ() { + let mut set = RangeSet::new(); + set.insert(2..4); + assert_eq!(set.replace(1..3).collect::>(), &[2..3]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 1..4); + } + + #[test] + fn replace_exact_pred() { + let mut set = RangeSet::new(); + set.insert(2..4); + assert_eq!(set.replace(4..6).collect::>(), &[]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 2..6); + } + + #[test] + fn replace_exact_succ() { + let mut set = RangeSet::new(); + set.insert(2..4); + assert_eq!(set.replace(0..2).collect::>(), &[]); + assert_eq!(set.len(), 1); + assert_eq!(set.peek_min().unwrap(), 0..4); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f16e7e86ce3c22aa804e13908d118e757535fa5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/mod.rs @@ -0,0 +1,7 @@ +mod array_range_set; +mod btree_range_set; +#[cfg(test)] +mod tests; + +pub(crate) use array_range_set::ArrayRangeSet; +pub(crate) use btree_range_set::RangeSet; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/tests.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..1e75da4dcf95e908a1d9fd2ba2c6f2adedac2c77 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/range_set/tests.rs @@ -0,0 +1,263 @@ +use std::ops::Range; + +use super::*; + +macro_rules! common_set_tests { + ($set_name:ident, $set_type:ident) => { + mod $set_name { + use super::*; + + #[test] + fn merge_and_split() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(2..4)); + assert!(!set.insert(1..3)); + assert_eq!(set.len(), 1); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 2, 3]); + assert!(!set.contains(4)); + assert!(set.remove(2..3)); + assert_eq!(set.len(), 2); + assert!(!set.contains(2)); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 3]); + } + + #[test] + fn double_merge_exact() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(4..6)); + assert_eq!(set.len(), 2); + assert!(set.insert(2..4)); + assert_eq!(set.len(), 1); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 2, 3, 4, 5]); + } + + #[test] + fn single_merge_low() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(4..6)); + assert_eq!(set.len(), 2); + assert!(set.insert(2..3)); + assert_eq!(set.len(), 2); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 2, 4, 5]); + } + + #[test] + fn single_merge_high() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(4..6)); + assert_eq!(set.len(), 2); + assert!(set.insert(3..4)); + assert_eq!(set.len(), 2); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 3, 4, 5]); + } + + #[test] + fn double_merge_wide() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(4..6)); + assert_eq!(set.len(), 2); + assert!(set.insert(1..5)); + assert_eq!(set.len(), 1); + assert_eq!(&set.elts().collect::>()[..], [0, 1, 2, 3, 4, 5]); + } + + #[test] + fn double_remove() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(set.insert(4..6)); + assert!(set.remove(1..5)); + assert_eq!(set.len(), 2); + assert_eq!(&set.elts().collect::>()[..], [0, 5]); + } + + #[test] + fn insert_multiple() { + let mut set = $set_type::new(); + assert!(set.insert(0..1)); + assert!(set.insert(2..3)); + assert!(set.insert(4..5)); + assert!(set.insert(0..5)); + assert_eq!(set.len(), 1); + } + + #[test] + fn remove_multiple() { + let mut set = $set_type::new(); + assert!(set.insert(0..1)); + assert!(set.insert(2..3)); + assert!(set.insert(4..5)); + assert!(set.remove(0..5)); + assert!(set.is_empty()); + } + + #[test] + fn double_insert() { + let mut set = $set_type::new(); + assert!(set.insert(0..2)); + assert!(!set.insert(0..2)); + assert!(set.insert(2..4)); + assert!(!set.insert(2..4)); + assert!(!set.insert(0..4)); + assert!(!set.insert(1..2)); + assert!(!set.insert(1..3)); + assert!(!set.insert(1..4)); + assert_eq!(set.len(), 1); + } + + #[test] + fn skip_empty_ranges() { + let mut set = $set_type::new(); + assert!(!set.insert(2..2)); + assert_eq!(set.len(), 0); + assert!(!set.insert(4..4)); + assert_eq!(set.len(), 0); + assert!(!set.insert(0..0)); + assert_eq!(set.len(), 0); + } + + #[test] + fn compare_insert_to_reference() { + const MAX_RANGE: u64 = 50; + + for start in 0..=MAX_RANGE { + for end in 0..=MAX_RANGE { + println!("insert({}..{})", start, end); + let (mut set, mut reference) = create_initial_sets(MAX_RANGE); + assert_eq!(set.insert(start..end), reference.insert(start..end)); + assert_sets_equal(&set, &reference); + } + } + } + + #[test] + fn compare_remove_to_reference() { + const MAX_RANGE: u64 = 50; + + for start in 0..=MAX_RANGE { + for end in 0..=MAX_RANGE { + println!("remove({}..{})", start, end); + let (mut set, mut reference) = create_initial_sets(MAX_RANGE); + assert_eq!(set.remove(start..end), reference.remove(start..end)); + assert_sets_equal(&set, &reference); + } + } + } + + #[test] + fn min_max() { + let mut set = $set_type::new(); + set.insert(1..3); + set.insert(4..5); + set.insert(6..10); + assert_eq!(set.min(), Some(1)); + assert_eq!(set.max(), Some(9)); + } + + fn create_initial_sets(max_range: u64) -> ($set_type, RefRangeSet) { + let mut set = $set_type::new(); + let mut reference = RefRangeSet::new(max_range as usize); + assert_sets_equal(&set, &reference); + + assert_eq!(set.insert(2..6), reference.insert(2..6)); + assert_eq!(set.insert(10..14), reference.insert(10..14)); + assert_eq!(set.insert(14..14), reference.insert(14..14)); + assert_eq!(set.insert(18..19), reference.insert(18..19)); + assert_eq!(set.insert(20..21), reference.insert(20..21)); + assert_eq!(set.insert(22..24), reference.insert(22..24)); + assert_eq!(set.insert(26..30), reference.insert(26..30)); + assert_eq!(set.insert(34..38), reference.insert(34..38)); + assert_eq!(set.insert(42..44), reference.insert(42..44)); + + assert_sets_equal(&set, &reference); + + (set, reference) + } + + fn assert_sets_equal(set: &$set_type, reference: &RefRangeSet) { + assert_eq!(set.len(), reference.len()); + assert_eq!(set.is_empty(), reference.is_empty()); + assert_eq!(set.elts().collect::>()[..], reference.elts()[..]); + } + } + }; +} + +common_set_tests!(range_set, RangeSet); +common_set_tests!(array_range_set, ArrayRangeSet); + +/// A very simple reference implementation of a RangeSet +struct RefRangeSet { + data: Vec, +} + +impl RefRangeSet { + fn new(capacity: usize) -> Self { + Self { + data: vec![false; capacity], + } + } + + fn len(&self) -> usize { + let mut last = false; + let mut count = 0; + + for v in self.data.iter() { + if !last && *v { + count += 1; + } + last = *v; + } + + count + } + + fn is_empty(&self) -> bool { + self.len() == 0 + } + + fn insert(&mut self, x: Range) -> bool { + let mut result = false; + + assert!(x.end <= self.data.len() as u64); + + for i in x { + let i = i as usize; + if !self.data[i] { + result = true; + self.data[i] = true; + } + } + + result + } + + fn remove(&mut self, x: Range) -> bool { + let mut result = false; + + assert!(x.end <= self.data.len() as u64); + + for i in x { + let i = i as usize; + if self.data[i] { + result = true; + self.data[i] = false; + } + } + + result + } + + fn elts(&self) -> Vec { + self.data + .iter() + .enumerate() + .filter_map(|(i, e)| if *e { Some(i as u64) } else { None }) + .collect() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/shared.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/shared.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2d0ad5d2ac1e548a1549a3e1d556f2bb0df3a2c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/shared.rs @@ -0,0 +1,180 @@ +use std::{fmt, net::SocketAddr}; + +use bytes::{Buf, BufMut, BytesMut}; + +use crate::{Instant, MAX_CID_SIZE, ResetToken, coding::BufExt, packet::PartialDecode}; + +/// Events sent from an Endpoint to a Connection +#[derive(Debug)] +pub struct ConnectionEvent(pub(crate) ConnectionEventInner); + +#[derive(Debug)] +pub(crate) enum ConnectionEventInner { + /// A datagram has been received for the Connection + Datagram(DatagramConnectionEvent), + /// New connection identifiers have been issued for the Connection + NewIdentifiers(Vec, Instant), +} + +/// Variant of [`ConnectionEventInner`]. +#[derive(Debug)] +pub(crate) struct DatagramConnectionEvent { + pub(crate) now: Instant, + pub(crate) remote: SocketAddr, + pub(crate) ecn: Option, + pub(crate) first_decode: PartialDecode, + pub(crate) remaining: Option, +} + +/// Events sent from a Connection to an Endpoint +#[derive(Debug)] +pub struct EndpointEvent(pub(crate) EndpointEventInner); + +impl EndpointEvent { + /// Construct an event that indicating that a `Connection` will no longer emit events + /// + /// Useful for notifying an `Endpoint` that a `Connection` has been destroyed outside of the + /// usual state machine flow, e.g. when being dropped by the user. + pub fn drained() -> Self { + Self(EndpointEventInner::Drained) + } + + /// Determine whether this is the last event a `Connection` will emit + /// + /// Useful for determining when connection-related event loop state can be freed. + pub fn is_drained(&self) -> bool { + self.0 == EndpointEventInner::Drained + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum EndpointEventInner { + /// The connection has been drained + Drained, + /// The reset token and/or address eligible for generating resets has been updated + ResetToken(SocketAddr, ResetToken), + /// The connection needs connection identifiers + NeedIdentifiers(Instant, u64), + /// Stop routing connection ID for this sequence number to the connection + /// When `bool == true`, a new connection ID will be issued to peer + RetireConnectionId(Instant, u64, bool), +} + +/// Protocol-level identifier for a connection. +/// +/// Mainly useful for identifying this connection's packets on the wire with tools like Wireshark. +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct ConnectionId { + /// length of CID + len: u8, + /// CID in byte array + bytes: [u8; MAX_CID_SIZE], +} + +impl ConnectionId { + /// Construct cid from byte array + pub fn new(bytes: &[u8]) -> Self { + debug_assert!(bytes.len() <= MAX_CID_SIZE); + let mut res = Self { + len: bytes.len() as u8, + bytes: [0; MAX_CID_SIZE], + }; + res.bytes[..bytes.len()].copy_from_slice(bytes); + res + } + + /// Constructs cid by reading `len` bytes from a `Buf` + /// + /// Callers need to assure that `buf.remaining() >= len` + pub fn from_buf(buf: &mut (impl Buf + ?Sized), len: usize) -> Self { + debug_assert!(len <= MAX_CID_SIZE); + let mut res = Self { + len: len as u8, + bytes: [0; MAX_CID_SIZE], + }; + buf.copy_to_slice(&mut res[..len]); + res + } + + /// Decode from long header format + pub(crate) fn decode_long(buf: &mut impl Buf) -> Option { + let len = buf.get::().ok()? as usize; + match len > MAX_CID_SIZE || buf.remaining() < len { + false => Some(Self::from_buf(buf, len)), + true => None, + } + } + + /// Encode in long header format + pub(crate) fn encode_long(&self, buf: &mut impl BufMut) { + buf.put_u8(self.len() as u8); + buf.put_slice(self); + } +} + +impl ::std::ops::Deref for ConnectionId { + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.bytes[0..self.len as usize] + } +} + +impl ::std::ops::DerefMut for ConnectionId { + fn deref_mut(&mut self) -> &mut [u8] { + &mut self.bytes[0..self.len as usize] + } +} + +impl fmt::Debug for ConnectionId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.bytes[0..self.len as usize].fmt(f) + } +} + +impl fmt::Display for ConnectionId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for byte in self.iter() { + write!(f, "{byte:02x}")?; + } + Ok(()) + } +} + +/// Explicit congestion notification codepoint +#[repr(u8)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum EcnCodepoint { + /// The ECT(0) codepoint, indicating that an endpoint is ECN-capable + Ect0 = 0b10, + /// The ECT(1) codepoint, indicating that an endpoint is ECN-capable + Ect1 = 0b01, + /// The CE codepoint, signalling that congestion was experienced + Ce = 0b11, +} + +impl EcnCodepoint { + /// Create new object from the given bits + pub fn from_bits(x: u8) -> Option { + use EcnCodepoint::*; + Some(match x & 0b11 { + 0b10 => Ect0, + 0b01 => Ect1, + 0b11 => Ce, + _ => { + return None; + } + }) + } + + /// Returns whether the codepoint is a CE, signalling that congestion was experienced + pub fn is_ce(self) -> bool { + matches!(self, Self::Ce) + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct IssuedCid { + pub(crate) sequence: u64, + pub(crate) id: ConnectionId, + pub(crate) reset_token: ResetToken, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ecb398127f454e0b509306358477240373556a8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/mod.rs @@ -0,0 +1,3376 @@ +use std::{ + convert::TryInto, + mem, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::{Arc, Mutex}, +}; + +use assert_matches::assert_matches; +#[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] +use aws_lc_rs::hmac; +use bytes::{Bytes, BytesMut}; +use hex_literal::hex; +use rand::RngCore; +#[cfg(feature = "ring")] +use ring::hmac; +#[cfg(all(feature = "rustls-aws-lc-rs", not(feature = "rustls-ring")))] +use rustls::crypto::aws_lc_rs::default_provider; +#[cfg(feature = "rustls-ring")] +use rustls::crypto::ring::default_provider; +use rustls::{ + AlertDescription, RootCertStore, + pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}, + server::WebPkiClientVerifier, +}; +use tracing::info; + +use super::*; +use crate::{ + Duration, Instant, + cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}, + crypto::rustls::QuicServerConfig, + frame::FrameStruct, + transport_parameters::TransportParameters, +}; +mod util; +use util::*; + +mod token; + +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +use wasm_bindgen_test::wasm_bindgen_test as test; + +// Enable this if you want to run these tests in the browser. +// Unfortunately it's either-or: Enable this and you can run in the browser, disable to run in nodejs. +// #[cfg(all(target_family = "wasm", target_os = "unknown"))] +// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +#[test] +fn version_negotiate_server() { + let _guard = subscribe(); + let client_addr = "[::2]:7890".parse().unwrap(); + let mut server = Endpoint::new( + Default::default(), + Some(Arc::new(server_config())), + true, + None, + ); + let now = Instant::now(); + let mut buf = Vec::with_capacity(server.config().get_max_udp_payload_size() as usize); + let event = server.handle( + now, + client_addr, + None, + None, + // Long-header packet with reserved version number + hex!("80 0a1a2a3a 04 00000000 04 00000000 00")[..].into(), + &mut buf, + ); + let Some(DatagramEvent::Response(Transmit { .. })) = event else { + panic!("expected a response"); + }; + + assert_ne!(buf[0] & 0x80, 0); + assert_eq!(&buf[1..15], hex!("00000000 04 00000000 04 00000000")); + assert!(buf[15..].chunks(4).any(|x| { + DEFAULT_SUPPORTED_VERSIONS.contains(&u32::from_be_bytes(x.try_into().unwrap())) + })); +} + +#[test] +fn version_negotiate_client() { + let _guard = subscribe(); + let server_addr = "[::2]:7890".parse().unwrap(); + // Configure client to use empty CIDs so we can easily hardcode a server version negotiation + // packet + let cid_generator_factory: fn() -> Box = + || Box::new(RandomConnectionIdGenerator::new(0)); + let mut client = Endpoint::new( + Arc::new(EndpointConfig { + connection_id_generator_factory: Arc::new(cid_generator_factory), + ..Default::default() + }), + None, + true, + None, + ); + let (_, mut client_ch) = client + .connect(Instant::now(), client_config(), server_addr, "localhost") + .unwrap(); + let now = Instant::now(); + let mut buf = Vec::with_capacity(client.config().get_max_udp_payload_size() as usize); + let opt_event = client.handle( + now, + server_addr, + None, + None, + // Version negotiation packet for reserved version, with empty DCID + hex!( + "80 00000000 00 04 00000000 + 0a1a2a3a" + )[..] + .into(), + &mut buf, + ); + if let Some(DatagramEvent::ConnectionEvent(_, event)) = opt_event { + client_ch.handle_event(event); + } + assert_matches!( + client_ch.poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::VersionMismatch, + }) + ); +} + +#[test] +fn lifecycle() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert!(pair.client_conn_mut(client_ch).using_ecn()); + assert!(pair.server_conn_mut(server_ch).using_ecn()); + + const REASON: &[u8] = b"whee"; + info!("closing"); + pair.client.connections.get_mut(&client_ch).unwrap().close( + pair.time, + VarInt(42), + REASON.into(), + ); + pair.drive(); + assert_matches!(pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ApplicationClosed( + ApplicationClose { error_code: VarInt(42), ref reason } + )}) if reason == REASON); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn draft_version_compat() { + let _guard = subscribe(); + + let mut client_config = client_config(); + client_config.version(0xff00_0020); + + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect_with(client_config); + + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert!(pair.client_conn_mut(client_ch).using_ecn()); + assert!(pair.server_conn_mut(server_ch).using_ecn()); + + const REASON: &[u8] = b"whee"; + info!("closing"); + pair.client.connections.get_mut(&client_ch).unwrap().close( + pair.time, + VarInt(42), + REASON.into(), + ); + pair.drive(); + assert_matches!(pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ApplicationClosed( + ApplicationClose { error_code: VarInt(42), ref reason } + )}) if reason == REASON); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn server_stateless_reset() { + let _guard = subscribe(); + let mut key_material = vec![0; 64]; + let mut rng = rand::rng(); + rng.fill_bytes(&mut key_material); + let reset_key = hmac::Key::new(hmac::HMAC_SHA256, &key_material); + rng.fill_bytes(&mut key_material); + + let mut endpoint_config = EndpointConfig::new(Arc::new(reset_key)); + endpoint_config.cid_generator(move || Box::new(HashedConnectionIdGenerator::from_key(0))); + let endpoint_config = Arc::new(endpoint_config); + + let mut pair = Pair::new(endpoint_config.clone(), server_config()); + let (client_ch, _) = pair.connect(); + pair.drive(); // Flush any post-handshake frames + pair.server.endpoint = + Endpoint::new(endpoint_config, Some(Arc::new(server_config())), true, None); + // Force the server to generate the smallest possible stateless reset + pair.client.connections.get_mut(&client_ch).unwrap().ping(); + info!("resetting"); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::Reset + }) + ); +} + +#[test] +fn client_stateless_reset() { + let _guard = subscribe(); + let mut key_material = vec![0; 64]; + let mut rng = rand::rng(); + rng.fill_bytes(&mut key_material); + let reset_key = hmac::Key::new(hmac::HMAC_SHA256, &key_material); + rng.fill_bytes(&mut key_material); + + let mut endpoint_config = EndpointConfig::new(Arc::new(reset_key)); + endpoint_config.cid_generator(move || Box::new(HashedConnectionIdGenerator::from_key(0))); + let endpoint_config = Arc::new(endpoint_config); + + let mut pair = Pair::new(endpoint_config.clone(), server_config()); + let (_, server_ch) = pair.connect(); + pair.client.endpoint = + Endpoint::new(endpoint_config, Some(Arc::new(server_config())), true, None); + // Send something big enough to allow room for a smaller stateless reset. + pair.server.connections.get_mut(&server_ch).unwrap().close( + pair.time, + VarInt(42), + (&[0xab; 128][..]).into(), + ); + info!("resetting"); + pair.drive(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::Reset + }) + ); +} + +/// Verify that stateless resets are rate-limited +#[test] +fn stateless_reset_limit() { + let _guard = subscribe(); + let remote = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 42); + let mut endpoint_config = EndpointConfig::default(); + endpoint_config.cid_generator(move || Box::new(RandomConnectionIdGenerator::new(8))); + let endpoint_config = Arc::new(endpoint_config); + let mut endpoint = Endpoint::new( + endpoint_config.clone(), + Some(Arc::new(server_config())), + true, + None, + ); + let time = Instant::now(); + let mut buf = Vec::new(); + let event = endpoint.handle(time, remote, None, None, [0u8; 1024][..].into(), &mut buf); + assert!(matches!(event, Some(DatagramEvent::Response(_)))); + let event = endpoint.handle(time, remote, None, None, [0u8; 1024][..].into(), &mut buf); + assert!(event.is_none()); + let event = endpoint.handle( + time + endpoint_config.min_reset_interval - Duration::from_nanos(1), + remote, + None, + None, + [0u8; 1024][..].into(), + &mut buf, + ); + assert!(event.is_none()); + let event = endpoint.handle( + time + endpoint_config.min_reset_interval, + remote, + None, + None, + [0u8; 1024][..].into(), + &mut buf, + ); + assert!(matches!(event, Some(DatagramEvent::Response(_)))); +} + +#[test] +fn export_keying_material() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + const LABEL: &[u8] = b"test_label"; + const CONTEXT: &[u8] = b"test_context"; + + // client keying material + let mut client_buf = [0u8; 64]; + pair.client_conn_mut(client_ch) + .crypto_session() + .export_keying_material(&mut client_buf, LABEL, CONTEXT) + .unwrap(); + + // server keying material + let mut server_buf = [0u8; 64]; + pair.server_conn_mut(server_ch) + .crypto_session() + .export_keying_material(&mut server_buf, LABEL, CONTEXT) + .unwrap(); + + assert_eq!(&client_buf[..], &server_buf[..]); +} + +#[test] +fn finish_stream_simple() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + assert_eq!(pair.client_streams(client_ch).send_streams(), 1); + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive(); + + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Finished { id })) if id == s + ); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_eq!(pair.client_streams(client_ch).send_streams(), 0); + assert_eq!(pair.server_conn_mut(client_ch).streams().send_streams(), 0); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + // Receive-only streams do not get `StreamFinished` events + assert_eq!(pair.server_conn_mut(client_ch).streams().send_streams(), 0); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); +} + +#[test] +fn reset_stream() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + + info!("resetting stream"); + const ERROR: VarInt = VarInt(42); + pair.client_send(client_ch, s).reset(ERROR).unwrap(); + pair.drive(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!(chunks.next(usize::MAX), Err(ReadError::Reset(ERROR))); + let _ = chunks.finalize(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); +} + +#[test] +fn stop_stream() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + + info!("stopping stream"); + const ERROR: VarInt = VarInt(42); + pair.server_recv(server_ch, s).stop(ERROR).unwrap(); + pair.drive(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + + assert_matches!( + pair.client_send(client_ch, s).write(b"foo"), + Err(WriteError::Stopped(ERROR)) + ); + assert_matches!( + pair.client_send(client_ch, s).finish(), + Err(FinishError::Stopped(ERROR)) + ); +} + +#[test] +fn reject_self_signed_server_cert() { + let _guard = subscribe(); + let mut pair = Pair::default(); + info!("connecting"); + + // Create a self-signed certificate with a different distinguished name than the default one, + // such that path building cannot confuse the default root the server is using and the one + // the client is trusting (in which case we'd get a different error). + let mut cert = rcgen::CertificateParams::new(["localhost".into()]).unwrap(); + let mut issuer = rcgen::DistinguishedName::new(); + issuer.push( + rcgen::DnType::OrganizationName, + "Crazy Quinn's House of Certificates", + ); + cert.distinguished_name = issuer; + let cert = cert + .self_signed(&rcgen::KeyPair::generate().unwrap()) + .unwrap(); + let client_ch = pair.begin_connect(client_config_with_certs(vec![cert.into()])); + + pair.drive(); + + assert_matches!(pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::TransportError(ref error)}) + if error.code == TransportErrorCode::crypto(AlertDescription::UnknownCA.into())); +} + +#[test] +fn reject_missing_client_cert() { + let _guard = subscribe(); + + let mut store = RootCertStore::empty(); + // `WebPkiClientVerifier` requires a non-empty store, so we stick our own certificate into it + // because it's convenient. + store.add(CERTIFIED_KEY.cert.der().clone()).unwrap(); + + let key = PrivatePkcs8KeyDer::from(CERTIFIED_KEY.signing_key.serialize_der()); + let cert = CERTIFIED_KEY.cert.der().clone(); + + let provider = Arc::new(default_provider()); + let config = rustls::ServerConfig::builder_with_provider(provider.clone()) + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap() + .with_client_cert_verifier( + WebPkiClientVerifier::builder_with_provider(Arc::new(store), provider) + .build() + .unwrap(), + ) + .with_single_cert(vec![cert], PrivateKeyDer::from(key)) + .unwrap(); + let config = QuicServerConfig::try_from(config).unwrap(); + + let mut pair = Pair::new( + Default::default(), + ServerConfig::with_crypto(Arc::new(config)), + ); + + info!("connecting"); + let client_ch = pair.begin_connect(client_config()); + pair.drive(); + + // The client completes the connection, but finds it immediately closed + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); + assert_matches!(pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(ref close)}) + if close.error_code == TransportErrorCode::crypto(AlertDescription::CertificateRequired.into())); + + // The server never completes the connection + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::TransportError(ref error)}) + if error.code == TransportErrorCode::crypto(AlertDescription::CertificateRequired.into())); +} + +#[test] +fn congestion() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + + const TARGET: u64 = 2048; + assert!(pair.client_conn_mut(client_ch).congestion_window() > TARGET); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + // Send data without receiving ACKs until the congestion state falls below target + while pair.client_conn_mut(client_ch).congestion_window() > TARGET { + let n = pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); + assert_eq!(n, 1024); + pair.drive_client(); + } + // Ensure that the congestion state recovers after receiving the ACKs + pair.drive(); + assert!(pair.client_conn_mut(client_ch).congestion_window() >= TARGET); + pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); +} + +#[test] +fn high_latency_handshake() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.latency = Duration::from_micros(200 * 1000); + let (client_ch, server_ch) = pair.connect(); + assert_eq!(pair.client_conn_mut(client_ch).bytes_in_flight(), 0); + assert_eq!(pair.server_conn_mut(server_ch).bytes_in_flight(), 0); + assert!(pair.client_conn_mut(client_ch).using_ecn()); + assert!(pair.server_conn_mut(server_ch).using_ecn()); +} + +#[test] +fn zero_rtt_happypath() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(validate_incoming); + let config = client_config(); + + // Establish normal connection + let client_ch = pair.begin_connect(config.clone()); + pair.drive(); + pair.server.assert_accept(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(0), [][..].into()); + pair.drive(); + + pair.client.addr = SocketAddr::new( + Ipv6Addr::LOCALHOST.into(), + CLIENT_PORTS.lock().unwrap().next().unwrap(), + ); + info!("resuming session"); + let client_ch = pair.begin_connect(config); + assert!(pair.client_conn_mut(client_ch).has_0rtt()); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"Hello, 0-RTT!"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); + + assert!(pair.client_conn_mut(client_ch).accepted_0rtt()); + let server_ch = pair.server.assert_accept(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + // We don't currently preserve stream event order wrt. connection events + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + let _ = chunks.finalize(); + assert_eq!(pair.client_conn_mut(client_ch).stats().path.lost_packets, 0); +} + +#[test] +fn zero_rtt_rejection() { + let _guard = subscribe(); + let server_config = ServerConfig::with_crypto(Arc::new(server_crypto_with_alpn(vec![ + "foo".into(), + "bar".into(), + ]))); + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let mut client_crypto = Arc::new(client_crypto_with_alpn(vec!["foo".into()])); + let client_config = ClientConfig::new(client_crypto.clone()); + + // Establish normal connection + let client_ch = pair.begin_connect(client_config); + pair.drive(); + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(0), [][..].into()); + pair.drive(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { .. }) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + pair.client.connections.clear(); + pair.server.connections.clear(); + + // We want to have a TLS client config with the existing session cache (so resumption could + // happen), but with different ALPN protocols (so that the server must reject it). Reuse + // the existing `ClientConfig` and change the ALPN protocols to make that happen. + let this = Arc::get_mut(&mut client_crypto).expect("QuicClientConfig is shared"); + let inner = Arc::get_mut(&mut this.inner).expect("QuicClientConfig.inner is shared"); + inner.alpn_protocols = vec!["bar".into()]; + + // Changing protocols invalidates 0-RTT + let client_config = ClientConfig::new(client_crypto); + info!("resuming session"); + let client_ch = pair.begin_connect(client_config); + assert!(pair.client_conn_mut(client_ch).has_0rtt()); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"Hello, 0-RTT!"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + assert!(!pair.client_conn_mut(client_ch).accepted_0rtt()); + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + let s2 = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + assert_eq!(s, s2); + + let mut recv = pair.server_recv(server_ch, s2); + let mut chunks = recv.read(false).unwrap(); + assert_eq!(chunks.next(usize::MAX), Err(ReadError::Blocked)); + let _ = chunks.finalize(); + assert_eq!(pair.client_conn_mut(client_ch).stats().path.lost_packets, 0); +} + +fn test_zero_rtt_incoming_limit(configure_server: F) { + // caller sets the server limit to 4000 bytes + // the client writes 8000 bytes + const CLIENT_WRITES: usize = 8000; + // this gets split across 8 packets + // the first packet is stored in the Incoming + // the next three are incoming-buffered, bringing the incoming buffer size to 3600 bytes + // the last four are dropped due to the buffering limit and must be retransmitted + const EXPECTED_DROPPED: u64 = 4; + + let _guard = subscribe(); + let mut server_config = server_config(); + configure_server(&mut server_config); + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let config = client_config(); + + // Establish normal connection + let client_ch = pair.begin_connect(config.clone()); + pair.drive(); + pair.server.assert_accept(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(0), [][..].into()); + pair.drive(); + + pair.client.addr = SocketAddr::new( + Ipv6Addr::LOCALHOST.into(), + CLIENT_PORTS.lock().unwrap().next().unwrap(), + ); + info!("resuming session"); + pair.server.handle_incoming = Box::new(|_| IncomingConnectionBehavior::Wait); + let client_ch = pair.begin_connect(config); + assert!(pair.client_conn_mut(client_ch).has_0rtt()); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + pair.client_send(client_ch, s) + .write(&vec![0; CLIENT_WRITES]) + .unwrap(); + pair.drive(); + let incoming = pair.server.waiting_incoming.pop().unwrap(); + assert!(pair.server.waiting_incoming.is_empty()); + let _ = pair.server.try_accept(incoming, pair.time); + pair.drive(); + + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); + + assert!(pair.client_conn_mut(client_ch).accepted_0rtt()); + let server_ch = pair.server.assert_accept(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + // We don't currently preserve stream event order wrt. connection events + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + let mut offset = 0; + loop { + match chunks.next(usize::MAX) { + Ok(Some(chunk)) => { + assert_eq!(chunk.offset as usize, offset); + offset += chunk.bytes.len(); + } + Err(ReadError::Blocked) => break, + Ok(None) => panic!("unexpected stream end"), + Err(e) => panic!("{}", e), + } + } + assert_eq!(offset, CLIENT_WRITES); + let _ = chunks.finalize(); + assert_eq!( + pair.client_conn_mut(client_ch).stats().path.lost_packets, + EXPECTED_DROPPED + ); +} + +#[test] +fn zero_rtt_incoming_buffer_size() { + test_zero_rtt_incoming_limit(|config| { + config.incoming_buffer_size(4000); + }); +} + +#[test] +fn zero_rtt_incoming_buffer_size_total() { + test_zero_rtt_incoming_limit(|config| { + config.incoming_buffer_size_total(4000); + }); +} + +#[test] +fn alpn_success() { + let _guard = subscribe(); + let server_config = ServerConfig::with_crypto(Arc::new(server_crypto_with_alpn(vec![ + "foo".into(), + "bar".into(), + "baz".into(), + ]))); + + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let client_config = ClientConfig::new(Arc::new(client_crypto_with_alpn(vec![ + "bar".into(), + "quux".into(), + "corge".into(), + ]))); + + // Establish normal connection + let client_ch = pair.begin_connect(client_config); + pair.drive(); + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + + let hd = pair + .client_conn_mut(client_ch) + .crypto_session() + .handshake_data() + .unwrap() + .downcast::() + .unwrap(); + assert_eq!(hd.protocol.unwrap(), &b"bar"[..]); +} + +#[test] +fn server_alpn_unset() { + let _guard = subscribe(); + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config()); + let client_config = ClientConfig::new(Arc::new(client_crypto_with_alpn(vec!["foo".into()]))); + + let client_ch = pair.begin_connect(client_config); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(err) }) if err.error_code == TransportErrorCode::crypto(0x78) + ); +} + +#[test] +fn client_alpn_unset() { + let _guard = subscribe(); + let server_config = ServerConfig::with_crypto(Arc::new(server_crypto_with_alpn(vec![ + "foo".into(), + "bar".into(), + "baz".into(), + ]))); + + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let client_ch = pair.begin_connect(client_config()); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(err) }) if err.error_code == TransportErrorCode::crypto(0x78) + ); +} + +#[test] +fn alpn_mismatch() { + let _guard = subscribe(); + let server_config = ServerConfig::with_crypto(Arc::new(server_crypto_with_alpn(vec![ + "foo".into(), + "bar".into(), + "baz".into(), + ]))); + + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let client_ch = pair.begin_connect(ClientConfig::new(Arc::new(client_crypto_with_alpn(vec![ + "quux".into(), + "corge".into(), + ])))); + + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(err) }) if err.error_code == TransportErrorCode::crypto(0x78) + ); +} + +#[test] +fn stream_id_limit() { + let _guard = subscribe(); + let server = ServerConfig { + transport: Arc::new(TransportConfig { + max_concurrent_uni_streams: 1u32.into(), + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + + let s = pair + .client + .connections + .get_mut(&client_ch) + .unwrap() + .streams() + .open(Dir::Uni) + .expect("couldn't open first stream"); + assert_eq!( + pair.client_streams(client_ch).open(Dir::Uni), + None, + "only one stream is permitted at a time" + ); + // Generate some activity to allow the server to see the stream + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Finished { id })) if id == s + ); + assert_eq!( + pair.client_streams(client_ch).open(Dir::Uni), + None, + "server does not immediately grant additional credit" + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + assert_eq!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); + + // Server will only send MAX_STREAM_ID now that the application's been notified + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Available { dir: Dir::Uni })) + ); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + + // Try opening the second stream again, now that we've made room + let s = pair + .client + .connections + .get_mut(&client_ch) + .unwrap() + .streams() + .open(Dir::Uni) + .expect("didn't get stream id budget"); + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive(); + // Make sure the server actually processes data on the newly-available stream + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); +} + +#[test] +fn key_update_simple() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + let s = pair + .client + .connections + .get_mut(&client_ch) + .unwrap() + .streams() + .open(Dir::Bi) + .expect("couldn't open first stream"); + + const MSG1: &[u8] = b"hello1"; + pair.client_send(client_ch, s).write(MSG1).unwrap(); + pair.drive(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Bi })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Bi), Some(stream) if stream == s); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG1 + ); + let _ = chunks.finalize(); + + info!("initiating key update"); + pair.client_conn_mut(client_ch).force_key_update(); + + const MSG2: &[u8] = b"hello2"; + pair.client_send(client_ch, s).write(MSG2).unwrap(); + pair.drive(); + + assert_matches!(pair.server_conn_mut(server_ch).poll(), Some(Event::Stream(StreamEvent::Readable { id })) if id == s); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 6 && chunk.bytes == MSG2 + ); + let _ = chunks.finalize(); + + assert_eq!(pair.client_conn_mut(client_ch).stats().path.lost_packets, 0); + assert_eq!(pair.server_conn_mut(server_ch).stats().path.lost_packets, 0); +} + +#[test] +fn key_update_reordered() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + let s = pair + .client + .connections + .get_mut(&client_ch) + .unwrap() + .streams() + .open(Dir::Bi) + .expect("couldn't open first stream"); + + const MSG1: &[u8] = b"1"; + pair.client_send(client_ch, s).write(MSG1).unwrap(); + pair.client.drive(pair.time, pair.server.addr); + assert!(!pair.client.outbound.is_empty()); + pair.client.delay_outbound(); + + pair.client_conn_mut(client_ch).force_key_update(); + info!("updated keys"); + + const MSG2: &[u8] = b"two"; + pair.client_send(client_ch, s).write(MSG2).unwrap(); + pair.client.drive(pair.time, pair.server.addr); + pair.client.finish_delay(); + pair.drive(); + + assert_eq!(pair.client_conn_mut(client_ch).stats().path.lost_packets, 0); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Bi })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Bi), Some(stream) if stream == s); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(true).unwrap(); + let buf1 = chunks.next(usize::MAX).unwrap().unwrap(); + assert_matches!(&*buf1.bytes, MSG1); + let buf2 = chunks.next(usize::MAX).unwrap().unwrap(); + assert_eq!(buf2.bytes, MSG2); + let _ = chunks.finalize(); + + assert_eq!(pair.client_conn_mut(client_ch).stats().path.lost_packets, 0); + assert_eq!(pair.server_conn_mut(server_ch).stats().path.lost_packets, 0); +} + +#[test] +fn initial_retransmit() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_ch = pair.begin_connect(client_config()); + pair.client.drive(pair.time, pair.server.addr); + pair.client.outbound.clear(); // Drop initial + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); +} + +#[test] +fn instant_close_1() { + let _guard = subscribe(); + let mut pair = Pair::default(); + info!("connecting"); + let client_ch = pair.begin_connect(client_config()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(0), Bytes::new()); + pair.drive(); + let server_ch = pair.server.assert_accept(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::ConnectionClosed(ConnectionClose { + error_code: TransportErrorCode::APPLICATION_ERROR, + .. + }), + }) + ); +} + +#[test] +fn instant_close_2() { + let _guard = subscribe(); + let mut pair = Pair::default(); + info!("connecting"); + let client_ch = pair.begin_connect(client_config()); + // Unlike `instant_close`, the server sees a valid Initial packet first. + pair.drive_client(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::ConnectionClosed(ConnectionClose { + error_code: TransportErrorCode::APPLICATION_ERROR, + .. + }), + }) + ); +} + +#[test] +fn instant_server_close() { + let _guard = subscribe(); + let mut pair = Pair::default(); + info!("connecting"); + pair.begin_connect(client_config()); + pair.drive_client(); + pair.server.drive_incoming(pair.time, pair.client.addr); + let server_ch = pair.server.assert_accept(); + info!("closing"); + pair.server + .connections + .get_mut(&server_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_matches!( + pair.client_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::ConnectionClosed(ConnectionClose { + error_code: TransportErrorCode::APPLICATION_ERROR, + .. + }), + }) + ); +} + +#[test] +fn idle_timeout() { + let _guard = subscribe(); + const IDLE_TIMEOUT: u64 = 100; + let server = ServerConfig { + transport: Arc::new(TransportConfig { + max_idle_timeout: Some(VarInt(IDLE_TIMEOUT)), + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + pair.client_conn_mut(client_ch).ping(); + let start = pair.time; + + while !pair.client_conn_mut(client_ch).is_closed() + || !pair.server_conn_mut(server_ch).is_closed() + { + if !pair.step() { + if let Some(t) = min_opt(pair.client.next_wakeup(), pair.server.next_wakeup()) { + pair.time = t; + } + } + pair.client.inbound.clear(); // Simulate total S->C packet loss + } + + assert!(pair.time - start < Duration::from_millis(2 * IDLE_TIMEOUT)); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::TimedOut, + }) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::TimedOut, + }) + ); +} + +#[test] +fn connection_close_sends_acks() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _server_ch) = pair.connect(); + + let client_acks = pair.client_conn_mut(client_ch).stats().frame_rx.acks; + + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + let time = pair.time; + pair.server_conn_mut(client_ch) + .close(time, VarInt(42), Bytes::new()); + + pair.drive(); + + let client_acks_2 = pair.client_conn_mut(client_ch).stats().frame_rx.acks; + assert!( + client_acks_2 > client_acks, + "Connection close should send pending ACKs" + ); +} + +#[test] +fn server_hs_retransmit() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_ch = pair.begin_connect(client_config()); + pair.step(); + assert!(!pair.client.inbound.is_empty()); // Initial + Handshakes + pair.client.inbound.clear(); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); +} + +#[test] +fn migration() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + pair.drive(); + + let client_stats_after_connect = pair.client_conn_mut(client_ch).stats(); + + pair.client.addr = SocketAddr::new( + Ipv4Addr::new(127, 0, 0, 1).into(), + CLIENT_PORTS.lock().unwrap().next().unwrap(), + ); + pair.client_conn_mut(client_ch).ping(); + + // Assert that just receiving the ping message is accounted into the servers + // anti-amplification budget + pair.drive_client(); + pair.drive_server(); + assert_ne!(pair.server_conn_mut(server_ch).total_recvd(), 0); + + pair.drive(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_eq!( + pair.server_conn_mut(server_ch).remote_address(), + pair.client.addr + ); + + // Assert that the client's response to the PATH_CHALLENGE was an IMMEDIATE_ACK, instead of a + // second ping + let client_stats_after_migrate = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + client_stats_after_migrate.frame_tx.ping - client_stats_after_connect.frame_tx.ping, + 1 + ); + assert_eq!( + client_stats_after_migrate.frame_tx.immediate_ack + - client_stats_after_connect.frame_tx.immediate_ack, + 1 + ); +} + +fn test_flow_control(config: TransportConfig, window_size: usize) { + let _guard = subscribe(); + let mut pair = Pair::new( + Default::default(), + ServerConfig { + transport: Arc::new(config), + ..server_config() + }, + ); + let (client_ch, server_ch) = pair.connect(); + let msg = vec![0xAB; window_size + 10]; + + // Stream reset before read + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + info!("writing"); + assert_eq!(pair.client_send(client_ch, s).write(&msg), Ok(window_size)); + assert_eq!( + pair.client_send(client_ch, s).write(&msg[window_size..]), + Err(WriteError::Blocked) + ); + pair.drive(); + info!("resetting"); + pair.client_send(client_ch, s).reset(VarInt(42)).unwrap(); + pair.drive(); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(true).unwrap(); + assert_eq!( + chunks.next(usize::MAX).err(), + Some(ReadError::Reset(VarInt(42))) + ); + let _ = chunks.finalize(); + + // Happy path + info!("writing"); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + assert_eq!(pair.client_send(client_ch, s).write(&msg), Ok(window_size)); + assert_eq!( + pair.client_send(client_ch, s).write(&msg[window_size..]), + Err(WriteError::Blocked) + ); + + pair.drive(); + let mut cursor = 0; + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(true).unwrap(); + loop { + match chunks.next(usize::MAX) { + Ok(Some(chunk)) => { + cursor += chunk.bytes.len(); + } + Ok(None) => { + panic!("end of stream"); + } + Err(ReadError::Blocked) => { + break; + } + Err(e) => { + panic!("{}", e); + } + } + } + let _ = chunks.finalize(); + + info!("finished reading"); + assert_eq!(cursor, window_size); + pair.drive(); + info!("writing"); + assert_eq!(pair.client_send(client_ch, s).write(&msg), Ok(window_size)); + assert_eq!( + pair.client_send(client_ch, s).write(&msg[window_size..]), + Err(WriteError::Blocked) + ); + + pair.drive(); + let mut cursor = 0; + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(true).unwrap(); + loop { + match chunks.next(usize::MAX) { + Ok(Some(chunk)) => { + cursor += chunk.bytes.len(); + } + Ok(None) => { + panic!("end of stream"); + } + Err(ReadError::Blocked) => { + break; + } + Err(e) => { + panic!("{}", e); + } + } + } + assert_eq!(cursor, window_size); + let _ = chunks.finalize(); + info!("finished reading"); +} + +#[test] +fn stream_flow_control() { + test_flow_control( + TransportConfig { + stream_receive_window: 2000u32.into(), + ..TransportConfig::default() + }, + 2000, + ); +} + +#[test] +fn conn_flow_control() { + test_flow_control( + TransportConfig { + receive_window: 2000u32.into(), + ..TransportConfig::default() + }, + 2000, + ); +} + +#[test] +fn stop_opens_bidi() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + assert_eq!(pair.client_streams(client_ch).send_streams(), 0); + let s = pair.client_streams(client_ch).open(Dir::Bi).unwrap(); + assert_eq!(pair.client_streams(client_ch).send_streams(), 1); + const ERROR: VarInt = VarInt(42); + pair.client + .connections + .get_mut(&server_ch) + .unwrap() + .recv_stream(s) + .stop(ERROR) + .unwrap(); + pair.drive(); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Bi })) + ); + assert_eq!(pair.server_conn_mut(client_ch).streams().send_streams(), 0); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Bi), Some(stream) if stream == s); + assert_eq!(pair.server_conn_mut(client_ch).streams().send_streams(), 1); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!(chunks.next(usize::MAX), Err(ReadError::Blocked)); + let _ = chunks.finalize(); + + assert_matches!( + pair.server_send(server_ch, s).write(b"foo"), + Err(WriteError::Stopped(ERROR)) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Stopped { + id: _, + error_code: ERROR + })) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); +} + +#[test] +fn implicit_open() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + let s1 = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + let s2 = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + pair.client_send(client_ch, s2).write(b"hello").unwrap(); + pair.drive(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_eq!(pair.server_streams(server_ch).accept(Dir::Uni), Some(s1)); + assert_eq!(pair.server_streams(server_ch).accept(Dir::Uni), Some(s2)); + assert_eq!(pair.server_streams(server_ch).accept(Dir::Uni), None); +} + +#[test] +fn zero_length_cid() { + let _guard = subscribe(); + let cid_generator_factory: fn() -> Box = + || Box::new(RandomConnectionIdGenerator::new(0)); + let mut pair = Pair::new( + Arc::new(EndpointConfig { + connection_id_generator_factory: Arc::new(cid_generator_factory), + ..EndpointConfig::default() + }), + server_config(), + ); + let (client_ch, server_ch) = pair.connect(); + // Ensure we can reconnect after a previous connection is cleaned up + info!("closing"); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + pair.server + .connections + .get_mut(&server_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.connect(); +} + +#[test] +fn keep_alive() { + let _guard = subscribe(); + const IDLE_TIMEOUT: u64 = 10; + let server = ServerConfig { + transport: Arc::new(TransportConfig { + keep_alive_interval: Some(Duration::from_millis(IDLE_TIMEOUT / 2)), + max_idle_timeout: Some(VarInt(IDLE_TIMEOUT)), + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + // Run a good while longer than the idle timeout + let end = pair.time + Duration::from_millis(20 * IDLE_TIMEOUT); + while pair.time < end { + if !pair.step() { + if let Some(time) = min_opt(pair.client.next_wakeup(), pair.server.next_wakeup()) { + pair.time = time; + } + } + assert!(!pair.client_conn_mut(client_ch).is_closed()); + assert!(!pair.server_conn_mut(server_ch).is_closed()); + } +} + +#[test] +fn cid_rotation() { + let _guard = subscribe(); + const CID_TIMEOUT: Duration = Duration::from_secs(2); + + let cid_generator_factory: fn() -> Box = + || Box::new(*RandomConnectionIdGenerator::new(8).set_lifetime(CID_TIMEOUT)); + + // Only test cid rotation on server side to have a clear output trace + let server = Endpoint::new( + Arc::new(EndpointConfig { + connection_id_generator_factory: Arc::new(cid_generator_factory), + ..EndpointConfig::default() + }), + Some(Arc::new(server_config())), + true, + None, + ); + let client = Endpoint::new(Arc::new(EndpointConfig::default()), None, true, None); + + let mut pair = Pair::new_from_endpoint(client, server); + let (_, server_ch) = pair.connect(); + + let mut round: u64 = 1; + let mut stop = pair.time; + let end = pair.time + 5 * CID_TIMEOUT; + + use crate::LOC_CID_COUNT; + use crate::cid_queue::CidQueue; + let mut active_cid_num = CidQueue::LEN as u64 + 1; + active_cid_num = active_cid_num.min(LOC_CID_COUNT); + let mut left_bound = 0; + let mut right_bound = active_cid_num - 1; + + while pair.time < end { + stop += CID_TIMEOUT; + // Run a while until PushNewCID timer fires + while pair.time < stop { + if !pair.step() { + if let Some(time) = min_opt(pair.client.next_wakeup(), pair.server.next_wakeup()) { + pair.time = time; + } + } + } + info!( + "Checking active cid sequence range before {:?} seconds", + round * CID_TIMEOUT.as_secs() + ); + let _bound = (left_bound, right_bound); + assert_matches!( + pair.server_conn_mut(server_ch).active_local_cid_seq(), + _bound + ); + round += 1; + left_bound += active_cid_num; + right_bound += active_cid_num; + pair.drive_server(); + } +} + +#[test] +fn cid_retirement() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + // Server retires current active remote CIDs + pair.server_conn_mut(server_ch) + .rotate_local_cid(1, Instant::now()); + pair.drive(); + // Any unexpected behavior may trigger TransportError::CONNECTION_ID_LIMIT_ERROR + assert!(!pair.client_conn_mut(client_ch).is_closed()); + assert!(!pair.server_conn_mut(server_ch).is_closed()); + assert_matches!(pair.client_conn_mut(client_ch).active_rem_cid_seq(), 1); + + use crate::LOC_CID_COUNT; + use crate::cid_queue::CidQueue; + let mut active_cid_num = CidQueue::LEN as u64; + active_cid_num = active_cid_num.min(LOC_CID_COUNT); + + let next_retire_prior_to = active_cid_num + 1; + pair.client_conn_mut(client_ch).ping(); + // Server retires all valid remote CIDs + pair.server_conn_mut(server_ch) + .rotate_local_cid(next_retire_prior_to, Instant::now()); + pair.drive(); + assert!(!pair.client_conn_mut(client_ch).is_closed()); + assert!(!pair.server_conn_mut(server_ch).is_closed()); + + assert_eq!( + pair.client_conn_mut(client_ch).active_rem_cid_seq(), + next_retire_prior_to, + ); +} + +#[test] +fn finish_stream_flow_control_reordered() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive_client(); // Send stream data + pair.server.drive(pair.time, pair.client.addr); // Receive + + // Issue flow control credit + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + let _ = chunks.finalize(); + + pair.server.drive(pair.time, pair.client.addr); + pair.server.delay_outbound(); // Delay it + + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive_client(); // Send FIN + pair.server.drive(pair.time, pair.client.addr); // Acknowledge + pair.server.finish_delay(); // Add flow control packets after + pair.drive(); + + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Finished { id })) if id == s + ); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); +} + +#[test] +fn handshake_1rtt_handling() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_ch = pair.begin_connect(client_config()); + pair.drive_client(); + pair.drive_server(); + let server_ch = pair.server.assert_accept(); + // Server now has 1-RTT keys, but remains in Handshake state until the TLS CFIN has + // authenticated the client. Delay the final client handshake flight so that doesn't happen yet. + pair.client.drive(pair.time, pair.server.addr); + pair.client.delay_outbound(); + + // Send some 1-RTT data which will be received first. + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.client_send(client_ch, s).finish().unwrap(); + pair.client.drive(pair.time, pair.server.addr); + + // Add the handshake flight back on. + pair.client.finish_delay(); + + pair.drive(); + + assert!(pair.client_conn_mut(client_ch).stats().path.lost_packets != 0); + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + let _ = chunks.finalize(); +} + +#[test] +fn stop_before_finish() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + + info!("stopping stream"); + const ERROR: VarInt = VarInt(42); + pair.server_recv(server_ch, s).stop(ERROR).unwrap(); + pair.drive(); + + assert_matches!( + pair.client_send(client_ch, s).finish(), + Err(FinishError::Stopped(ERROR)) + ); +} + +#[test] +fn stop_during_finish() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive(); + + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + info!("stopping and finishing stream"); + const ERROR: VarInt = VarInt(42); + pair.server_recv(server_ch, s).stop(ERROR).unwrap(); + pair.drive_server(); + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive_client(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Stopped { id, error_code: ERROR })) if id == s + ); +} + +// Ensure we can recover from loss of tail packets when the congestion window is full +#[test] +fn congested_tail_loss() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + + const TARGET: u64 = 2048; + assert!(pair.client_conn_mut(client_ch).congestion_window() > TARGET); + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + // Send data without receiving ACKs until the congestion state falls below target + while pair.client_conn_mut(client_ch).congestion_window() > TARGET { + let n = pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); + assert_eq!(n, 1024); + pair.drive_client(); + } + assert!(!pair.server.inbound.is_empty()); + pair.server.inbound.clear(); + // Ensure that the congestion state recovers after retransmits occur and are ACKed + info!("recovering"); + pair.drive(); + assert!(pair.client_conn_mut(client_ch).congestion_window() > TARGET); + pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); +} + +// Send a tail-loss probe when GSO segment_size is less than INITIAL_MTU +#[test] +fn tail_loss_small_segment_size() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + // No datagrams frames received in the handshake. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, 0); + + const DGRAM_LEN: usize = 1000; // Below INITIAL_MTU after packet overhead. + const DGRAM_NUM: u64 = 5; // Enough to build a GSO batch. + + info!("Sending an ack-eliciting datagram"); + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Drop these packets on the server side. + assert!(!pair.server.inbound.is_empty()); + pair.server.inbound.clear(); + + // Doing one step makes the client advance time to the PTO fire time. + info!("stepping forward to PTO"); + pair.step(); + + // Still no datagrams frames received by the server. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, 0); + + // Now we can send another batch of datagrams, so the PTO can send them instead of + // sending a ping. These are small enough that the segment_size is less than the + // INITIAL_MTU. + info!("Sending datagram batch"); + for _ in 0..DGRAM_NUM { + pair.client_datagrams(client_ch) + .send(vec![0; DGRAM_LEN].into(), false) + .unwrap(); + } + + // If this succeeds the datagrams are received by the server and the client did not + // crash. + pair.drive(); + + // Finally the server should have received some datagrams. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, DGRAM_NUM); +} + +// Respect max_datagrams when TLP happens +#[test] +fn tail_loss_respect_max_datagrams() { + let _guard = subscribe(); + let client_config = { + let mut c_config = client_config(); + let mut t_config = TransportConfig::default(); + //Disabling GSO, so only a single segment should be sent per iops + t_config.enable_segmentation_offload(false); + c_config.transport_config(t_config.into()); + c_config + }; + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect_with(client_config); + + const DGRAM_LEN: usize = 1000; // High enough so GSO batch could be built + const DGRAM_NUM: u64 = 5; // Enough to build a GSO batch. + + info!("Sending an ack-eliciting datagram"); + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Drop these packets on the server side. + assert!(!pair.server.inbound.is_empty()); + pair.server.inbound.clear(); + + // Doing one step makes the client advance time to the PTO fire time. + info!("stepping forward to PTO"); + pair.step(); + + // start sending datagram batches but the first should be a TLP + info!("Sending datagram batch"); + for _ in 0..DGRAM_NUM { + pair.client_datagrams(client_ch) + .send(vec![0; DGRAM_LEN].into(), false) + .unwrap(); + } + + pair.drive(); + + // Finally checking the number of sent udp datagrams match the number of iops + let client_stats = pair.client_conn_mut(client_ch).stats(); + assert_eq!(client_stats.udp_tx.ios, client_stats.udp_tx.datagrams); +} + +#[test] +fn datagram_send_recv() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + assert_matches!(pair.client_datagrams(client_ch).max_size(), Some(x) if x > 0); + + const DATA: &[u8] = b"whee"; + pair.client_datagrams(client_ch) + .send(DATA.into(), true) + .unwrap(); + pair.drive(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::DatagramReceived) + ); + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), DATA); + assert_matches!(pair.server_datagrams(server_ch).recv(), None); +} + +#[test] +fn datagram_recv_buffer_overflow() { + let _guard = subscribe(); + const WINDOW: usize = 100; + let server = ServerConfig { + transport: Arc::new(TransportConfig { + datagram_receive_buffer_size: Some(WINDOW), + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + assert_eq!( + pair.client_conn_mut(client_ch).datagrams().max_size(), + Some(WINDOW - Datagram::SIZE_BOUND) + ); + + const DATA1: &[u8] = &[0xAB; (WINDOW / 3) + 1]; + const DATA2: &[u8] = &[0xBC; (WINDOW / 3) + 1]; + const DATA3: &[u8] = &[0xCD; (WINDOW / 3) + 1]; + pair.client_datagrams(client_ch) + .send(DATA1.into(), true) + .unwrap(); + pair.client_datagrams(client_ch) + .send(DATA2.into(), true) + .unwrap(); + pair.client_datagrams(client_ch) + .send(DATA3.into(), true) + .unwrap(); + pair.drive(); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::DatagramReceived) + ); + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), DATA2); + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), DATA3); + assert_matches!(pair.server_datagrams(server_ch).recv(), None); + + pair.client_datagrams(client_ch) + .send(DATA1.into(), true) + .unwrap(); + pair.drive(); + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), DATA1); + assert_matches!(pair.server_datagrams(server_ch).recv(), None); +} + +#[test] +fn datagram_unsupported() { + let _guard = subscribe(); + let server = ServerConfig { + transport: Arc::new(TransportConfig { + datagram_receive_buffer_size: None, + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + assert_matches!(pair.client_datagrams(client_ch).max_size(), None); + + match pair.client_datagrams(client_ch).send(Bytes::new(), true) { + Err(SendDatagramError::UnsupportedByPeer) => {} + Err(e) => panic!("unexpected error: {e}"), + Ok(_) => panic!("unexpected success"), + } +} + +#[test] +fn large_initial() { + let _guard = subscribe(); + let server_config = + ServerConfig::with_crypto(Arc::new(server_crypto_with_alpn(vec![vec![0, 0, 0, 42]]))); + + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + let client_crypto = + client_crypto_with_alpn((0..1000u32).map(|x| x.to_be_bytes().to_vec()).collect()); + let cfg = ClientConfig::new(Arc::new(client_crypto)); + let client_ch = pair.begin_connect(cfg); + pair.drive(); + let server_ch = pair.server.assert_accept(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); +} + +#[test] +/// Ensure that we don't yield a finish event before the actual FIN is acked so the peer isn't left +/// hanging +fn finish_acked() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + info!("client sends data to server"); + pair.drive_client(); // send data to server + info!("server acknowledges data"); + pair.drive_server(); // process data and send data ack + + // Receive data + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + assert_matches!(pair.server_conn_mut(server_ch).poll(), None); + + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + assert_matches!(chunks.next(usize::MAX), Err(ReadError::Blocked)); + let _ = chunks.finalize(); + + // Finish before receiving data ack + pair.client_send(client_ch, s).finish().unwrap(); + // Send FIN, receive data ack + info!("client receives ACK, sends FIN"); + pair.drive_client(); + // Check for premature finish from data ack + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + // Process FIN ack + info!("server ACKs FIN"); + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Finished { id })) if id == s + ); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); +} + +#[test] +/// Ensure that we don't yield a finish event while there's still unacknowledged data +fn finish_retransmit() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + const MSG: &[u8] = b"hello"; + pair.client_send(client_ch, s).write(MSG).unwrap(); + pair.drive_client(); // send data to server + pair.server.inbound.clear(); // Lose it + + // Send FIN + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive_client(); + // Process FIN + pair.drive_server(); + // Receive FIN ack, but no data ack + pair.drive_client(); + // Check for premature finish from FIN ack + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + // Recover + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Stream(StreamEvent::Finished { id })) if id == s + ); + + assert_matches!( + pair.server_conn_mut(server_ch).poll(), + Some(Event::Stream(StreamEvent::Opened { dir: Dir::Uni })) + ); + + assert_matches!(pair.server_streams(server_ch).accept(Dir::Uni), Some(stream) if stream == s); + + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == MSG + ); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); +} + +/// Ensures that exchanging data on a client-initiated bidirectional stream works past the initial +/// stream window. +#[test] +fn repeated_request_response() { + let _guard = subscribe(); + let server = ServerConfig { + transport: Arc::new(TransportConfig { + max_concurrent_bidi_streams: 1u32.into(), + ..TransportConfig::default() + }), + ..server_config() + }; + let mut pair = Pair::new(Default::default(), server); + let (client_ch, server_ch) = pair.connect(); + const REQUEST: &[u8] = b"hello"; + const RESPONSE: &[u8] = b"world"; + for _ in 0..3 { + let s = pair.client_streams(client_ch).open(Dir::Bi).unwrap(); + + pair.client_send(client_ch, s).write(REQUEST).unwrap(); + pair.client_send(client_ch, s).finish().unwrap(); + + pair.drive(); + + assert_eq!(pair.server_streams(server_ch).accept(Dir::Bi), Some(s)); + let mut recv = pair.server_recv(server_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == REQUEST + ); + + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); + pair.server_send(server_ch, s).write(RESPONSE).unwrap(); + pair.server_send(server_ch, s).finish().unwrap(); + + pair.drive(); + + let mut recv = pair.client_recv(client_ch, s); + let mut chunks = recv.read(false).unwrap(); + assert_matches!( + chunks.next(usize::MAX), + Ok(Some(chunk)) if chunk.offset == 0 && chunk.bytes == RESPONSE + ); + assert_matches!(chunks.next(usize::MAX), Ok(None)); + let _ = chunks.finalize(); + } +} + +/// Ensures that the client sends an anti-deadlock probe after an incomplete server's first flight +#[test] +fn handshake_anti_deadlock_probe() { + let _guard = subscribe(); + + let (cert, key) = big_cert_and_key(); + let server = server_config_with_cert(cert.clone(), key); + let client = client_config_with_certs(vec![cert]); + let mut pair = Pair::new(Default::default(), server); + + let client_ch = pair.begin_connect(client); + // Client sends initial + pair.drive_client(); + // Server sends first flight, gets blocked on anti-amplification + pair.drive_server(); + // Client acks... + pair.drive_client(); + // ...but it's lost, so the server doesn't get anti-amplification credit from it + pair.server.inbound.clear(); + // Client sends an anti-deadlock probe, and the handshake completes as usual. + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); +} + +/// Ensures that the server can respond with 3 initial packets during the handshake +/// before the anti-amplification limit kicks in when MTUs are similar. +#[test] +fn server_can_send_3_inital_packets() { + let _guard = subscribe(); + + let (cert, key) = big_cert_and_key(); + let server = server_config_with_cert(cert.clone(), key); + let client = client_config_with_certs(vec![cert]); + let mut pair = Pair::new(Default::default(), server); + + let client_ch = pair.begin_connect(client); + // Client sends initial + pair.drive_client(); + // Server sends first flight, gets blocked on anti-amplification + pair.drive_server(); + // Server should have queued 3 packets at this time + assert_eq!(pair.client.inbound.len(), 3); + + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); +} + +/// Generate a big fat certificate that can't fit inside the initial anti-amplification limit +fn big_cert_and_key() -> (CertificateDer<'static>, PrivateKeyDer<'static>) { + let cert = rcgen::generate_simple_self_signed( + Some("localhost".into()) + .into_iter() + .chain((0..1000).map(|x| format!("foo_{x}"))) + .collect::>(), + ) + .unwrap(); + + ( + cert.cert.into(), + PrivateKeyDer::Pkcs8(cert.signing_key.serialize_der().into()), + ) +} + +#[test] +fn malformed_token_len() { + let _guard = subscribe(); + let client_addr = "[::2]:7890".parse().unwrap(); + let mut server = Endpoint::new( + Default::default(), + Some(Arc::new(server_config())), + true, + None, + ); + let mut buf = Vec::with_capacity(server.config().get_max_udp_payload_size() as usize); + server.handle( + Instant::now(), + client_addr, + None, + None, + hex!("8900 0000 0101 0000 1b1b 841b 0000 0000 3f00")[..].into(), + &mut buf, + ); +} + +#[test] +fn loss_probe_requests_immediate_ack() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + pair.drive(); + + let stats_after_connect = pair.client_conn_mut(client_ch).stats(); + + // Lose a ping + let default_mtu = mem::replace(&mut pair.mtu, 0); + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + pair.mtu = default_mtu; + + // Drive the connection further so a loss probe is sent + pair.drive(); + + // Assert that two IMMEDIATE_ACKs were sent (two loss probes) + let stats_after_recovery = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + stats_after_recovery.frame_tx.immediate_ack - stats_after_connect.frame_tx.immediate_ack, + 2 + ); +} + +#[test] +/// This is mostly a sanity check to ensure our testing code is correctly dropping packets above the +/// pmtu +fn connect_too_low_mtu() { + let _guard = subscribe(); + let mut pair = Pair::default(); + + // The maximum payload size is lower than 1200, so no packages will get through! + pair.mtu = 1000; + + pair.begin_connect(client_config()); + pair.drive(); + pair.server.assert_no_accept(); +} + +#[test] +fn connect_lost_mtu_probes_do_not_trigger_congestion_control() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.mtu = 1200; + + let (client_ch, server_ch) = pair.connect(); + pair.drive(); + + let client_stats = pair.client_conn_mut(client_ch).stats(); + let server_stats = pair.server_conn_mut(server_ch).stats(); + + // Sanity check (all MTU probes should have been lost) + assert_eq!(client_stats.path.sent_plpmtud_probes, 9); + assert_eq!(client_stats.path.lost_plpmtud_probes, 9); + assert_eq!(server_stats.path.sent_plpmtud_probes, 9); + assert_eq!(server_stats.path.lost_plpmtud_probes, 9); + + // No congestion events + assert_eq!(client_stats.path.congestion_events, 0); + assert_eq!(server_stats.path.congestion_events, 0); +} + +#[test] +fn connect_detects_mtu() { + let _guard = subscribe(); + let max_udp_payload_and_expected_mtu = &[(1200, 1200), (1400, 1389), (1500, 1452)]; + + for &(pair_max_udp, expected_mtu) in max_udp_payload_and_expected_mtu { + let mut pair = Pair::default(); + pair.mtu = pair_max_udp; + let (client_ch, server_ch) = pair.connect(); + pair.drive(); + + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), expected_mtu); + assert_eq!(pair.server_conn_mut(server_ch).path_mtu(), expected_mtu); + } +} + +#[test] +fn migrate_detects_new_mtu_and_respects_original_peer_max_udp_payload_size() { + let _guard = subscribe(); + + let client_max_udp_payload_size: u16 = 1400; + + // Set up a client with a max payload size of 1400 (and use the defaults for the server) + let server_endpoint_config = EndpointConfig::default(); + let server = Endpoint::new( + Arc::new(server_endpoint_config), + Some(Arc::new(server_config())), + true, + None, + ); + let client_endpoint_config = EndpointConfig { + max_udp_payload_size: VarInt::from(client_max_udp_payload_size), + ..EndpointConfig::default() + }; + let client = Endpoint::new(Arc::new(client_endpoint_config), None, true, None); + let mut pair = Pair::new_from_endpoint(client, server); + pair.mtu = 1300; + + // Connect + let (client_ch, server_ch) = pair.connect(); + pair.drive(); + + // Sanity check: MTUD ran to completion (the numbers differ because binary search stops when + // changes are smaller than 20, otherwise both endpoints would converge at the same MTU of 1300) + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), 1293); + assert_eq!(pair.server_conn_mut(server_ch).path_mtu(), 1300); + + // Migrate client to a different port (and simulate a higher path MTU) + pair.mtu = 1500; + pair.client.addr = SocketAddr::new( + Ipv4Addr::new(127, 0, 0, 1).into(), + CLIENT_PORTS.lock().unwrap().next().unwrap(), + ); + pair.client_conn_mut(client_ch).ping(); + pair.drive(); + + // Sanity check: the server saw that the client address was updated + assert_eq!( + pair.server_conn_mut(server_ch).remote_address(), + pair.client.addr + ); + + // MTU detection has successfully run after migrating + assert_eq!( + pair.server_conn_mut(server_ch).path_mtu(), + client_max_udp_payload_size + ); + + // Sanity check: the client keeps the old MTU, because migration is triggered by incoming + // packets from a different address + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), 1293); +} + +#[test] +fn connect_runs_mtud_again_after_600_seconds() { + let _guard = subscribe(); + let mut server_config = server_config(); + let mut client_config = client_config(); + + // Note: we use an infinite idle timeout to ensure we can wait 600 seconds without the + // connection closing + Arc::get_mut(&mut server_config.transport) + .unwrap() + .max_idle_timeout(None); + Arc::get_mut(&mut client_config.transport) + .unwrap() + .max_idle_timeout(None); + + let mut pair = Pair::new(Default::default(), server_config); + pair.mtu = 1400; + let (client_ch, server_ch) = pair.connect_with(client_config); + pair.drive(); + + // Sanity check: the mtu has been discovered + let client_conn = pair.client_conn_mut(client_ch); + assert_eq!(client_conn.path_mtu(), 1389); + assert_eq!(client_conn.stats().path.sent_plpmtud_probes, 5); + assert_eq!(client_conn.stats().path.lost_plpmtud_probes, 3); + let server_conn = pair.server_conn_mut(server_ch); + assert_eq!(server_conn.path_mtu(), 1389); + assert_eq!(server_conn.stats().path.sent_plpmtud_probes, 5); + assert_eq!(server_conn.stats().path.lost_plpmtud_probes, 3); + + // Sanity check: the mtu does not change after the fact, even though the link now supports a + // higher udp payload size + pair.mtu = 1500; + pair.drive(); + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), 1389); + assert_eq!(pair.server_conn_mut(server_ch).path_mtu(), 1389); + + // The MTU changes after 600 seconds, because now MTUD runs for the second time + pair.time += Duration::from_secs(600); + pair.drive(); + assert!(!pair.client_conn_mut(client_ch).is_closed()); + assert!(!pair.server_conn_mut(client_ch).is_closed()); + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), 1452); + assert_eq!(pair.server_conn_mut(server_ch).path_mtu(), 1452); +} + +#[test] +fn blackhole_after_mtu_change_repairs_itself() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.mtu = 1500; + let (client_ch, server_ch) = pair.connect(); + pair.drive(); + + // Sanity check + assert_eq!(pair.client_conn_mut(client_ch).path_mtu(), 1452); + assert_eq!(pair.server_conn_mut(server_ch).path_mtu(), 1452); + + // Back to the base MTU + pair.mtu = 1200; + + // The payload will be sent in a single packet, because the detected MTU was 1444, but it will + // be dropped because the link no longer supports that packet size! + let payload = vec![42; 1300]; + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + pair.client_send(client_ch, s).write(&payload).unwrap(); + let out_of_bounds = pair.drive_bounded(); + + if out_of_bounds { + panic!("Connections never reached an idle state"); + } + + let recv = pair.server_recv(server_ch, s); + let buf = stream_chunks(recv); + + // The whole packet arrived in the end + assert_eq!(buf.len(), 1300); + + // Sanity checks (black hole detected after 3 lost packets) + let client_stats = pair.client_conn_mut(client_ch).stats(); + assert!(client_stats.path.lost_packets >= 3); + assert!(client_stats.path.congestion_events >= 3); + assert_eq!(client_stats.path.black_holes_detected, 1); +} + +#[test] +fn mtud_probes_include_immediate_ack() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + pair.drive(); + + let stats = pair.client_conn_mut(client_ch).stats(); + assert_eq!(stats.path.sent_plpmtud_probes, 4); + + // Each probe contains a ping and an immediate ack + assert_eq!(stats.frame_tx.ping, 4); + assert_eq!(stats.frame_tx.immediate_ack, 4); +} + +#[test] +fn packet_splitting_with_default_mtu() { + let _guard = subscribe(); + + // The payload needs to be split in 2 in order to be sent, because it is higher than the max MTU + let payload = vec![42; 1300]; + + let mut pair = Pair::default(); + pair.mtu = 1200; + let (client_ch, _) = pair.connect(); + pair.drive(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + pair.client_send(client_ch, s).write(&payload).unwrap(); + pair.client.drive(pair.time, pair.server.addr); + assert_eq!(pair.client.outbound.len(), 2); + + pair.drive_client(); + assert_eq!(pair.server.inbound.len(), 2); +} + +#[test] +fn packet_splitting_not_necessary_after_higher_mtu_discovered() { + let _guard = subscribe(); + let payload = vec![42; 1300]; + + let mut pair = Pair::default(); + pair.mtu = 1500; + + let (client_ch, _) = pair.connect(); + pair.drive(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + pair.client_send(client_ch, s).write(&payload).unwrap(); + pair.client.drive(pair.time, pair.server.addr); + assert_eq!(pair.client.outbound.len(), 1); + + pair.drive_client(); + assert_eq!(pair.server.inbound.len(), 1); +} + +#[test] +fn single_ack_eliciting_packet_triggers_ack_after_delay() { + let _guard = subscribe(); + let mut pair = Pair::default_with_deterministic_pns(); + let (client_ch, _) = pair.connect_with(client_config_with_deterministic_pns()); + pair.drive(); + + let stats_after_connect = pair.client_conn_mut(client_ch).stats(); + + let start = pair.time; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); // Send ping + pair.drive_server(); // Process ping + pair.drive_client(); // Give the client a chance to process an ack, so our assertion can fail + + // Sanity check: the time hasn't advanced in the meantime) + assert_eq!(pair.time, start); + + let stats_after_ping = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + stats_after_ping.frame_tx.ping - stats_after_connect.frame_tx.ping, + 1 + ); + assert_eq!( + stats_after_ping.frame_rx.acks - stats_after_connect.frame_rx.acks, + 0 + ); + + pair.client.capture_inbound_packets = true; + pair.drive(); + let stats_after_drive = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + stats_after_drive.frame_rx.acks - stats_after_ping.frame_rx.acks, + 1 + ); + + // The time is start + max_ack_delay + let default_max_ack_delay_ms = TransportParameters::default().max_ack_delay.into_inner(); + assert_eq!( + pair.time, + start + Duration::from_millis(default_max_ack_delay_ms) + ); + + // The ACK delay is properly calculated + assert_eq!(pair.client.captured_packets.len(), 1); + let mut frames = frame::Iter::new(pair.client.captured_packets.remove(0).into()) + .unwrap() + .collect::, _>>() + .unwrap(); + assert_eq!(frames.len(), 1); + if let Frame::Ack(ack) = frames.remove(0) { + let ack_delay_exp = TransportParameters::default().ack_delay_exponent; + let delay = ack.delay << ack_delay_exp.into_inner(); + assert_eq!(delay, default_max_ack_delay_ms * 1_000); + } else { + panic!("Expected ACK frame"); + } + + // Sanity check: no loss probe was sent, because the delayed ACK was received on time + assert_eq!( + stats_after_drive.frame_tx.ping - stats_after_connect.frame_tx.ping, + 1 + ); +} + +#[test] +fn immediate_ack_triggers_ack() { + let _guard = subscribe(); + let mut pair = Pair::default_with_deterministic_pns(); + let (client_ch, _) = pair.connect_with(client_config_with_deterministic_pns()); + pair.drive(); + + let acks_after_connect = pair.client_conn_mut(client_ch).stats().frame_rx.acks; + + pair.client_conn_mut(client_ch).immediate_ack(); + pair.drive_client(); // Send immediate ack + pair.drive_server(); // Process immediate ack + pair.drive_client(); // Give the client a chance to process the ack + + let acks_after_ping = pair.client_conn_mut(client_ch).stats().frame_rx.acks; + + assert_eq!(acks_after_ping - acks_after_connect, 1); +} + +#[test] +fn out_of_order_ack_eliciting_packet_triggers_ack() { + let _guard = subscribe(); + let mut pair = Pair::default_with_deterministic_pns(); + let (client_ch, server_ch) = pair.connect_with(client_config_with_deterministic_pns()); + pair.drive(); + + let default_mtu = pair.mtu; + + let client_stats_after_connect = pair.client_conn_mut(client_ch).stats(); + let server_stats_after_connect = pair.server_conn_mut(server_ch).stats(); + + // Send a packet that won't arrive right away (it will be dropped and be re-sent later) + pair.mtu = 0; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Sanity check (ping sent, no ACK received) + let client_stats_after_first_ping = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + client_stats_after_first_ping.frame_tx.ping - client_stats_after_connect.frame_tx.ping, + 1 + ); + assert_eq!( + client_stats_after_first_ping.frame_rx.acks - client_stats_after_connect.frame_rx.acks, + 0 + ); + + // Restore the default MTU and send another ping, which will arrive earlier than the dropped one + pair.mtu = default_mtu; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + pair.drive_server(); + pair.drive_client(); + + // Client sanity check (ping sent, one ACK received) + let client_stats_after_second_ping = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + client_stats_after_second_ping.frame_tx.ping - client_stats_after_connect.frame_tx.ping, + 2 + ); + assert_eq!( + client_stats_after_second_ping.frame_rx.acks - client_stats_after_connect.frame_rx.acks, + 1 + ); + + // Server checks (single ping received, ACK sent) + let server_stats_after_second_ping = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after_second_ping.frame_rx.ping - server_stats_after_connect.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after_second_ping.frame_tx.acks - server_stats_after_connect.frame_tx.acks, + 1 + ); +} + +#[test] +fn single_ack_eliciting_packet_with_ce_bit_triggers_immediate_ack() { + let _guard = subscribe(); + let mut pair = Pair::default_with_deterministic_pns(); + let (client_ch, _) = pair.connect_with(client_config_with_deterministic_pns()); + pair.drive(); + + let stats_after_connect = pair.client_conn_mut(client_ch).stats(); + + let start = pair.time; + + pair.client_conn_mut(client_ch).ping(); + + pair.congestion_experienced = true; + pair.drive_client(); // Send ping + pair.congestion_experienced = false; + + pair.drive_server(); // Process ping, send ACK in response to congestion + pair.drive_client(); // Process ACK + + // Sanity check: the time hasn't advanced in the meantime) + assert_eq!(pair.time, start); + + let stats_after_ping = pair.client_conn_mut(client_ch).stats(); + assert_eq!( + stats_after_ping.frame_tx.ping - stats_after_connect.frame_tx.ping, + 1 + ); + assert_eq!( + stats_after_ping.frame_rx.acks - stats_after_connect.frame_rx.acks, + 1 + ); + assert_eq!( + stats_after_ping.path.congestion_events - stats_after_connect.path.congestion_events, + 1 + ); +} + +fn setup_ack_frequency_test(max_ack_delay: Duration) -> (Pair, ConnectionHandle, ConnectionHandle) { + let mut client_config = client_config_with_deterministic_pns(); + let mut ack_freq_config = AckFrequencyConfig::default(); + ack_freq_config + .ack_eliciting_threshold(10u32.into()) + .max_ack_delay(Some(max_ack_delay)); + Arc::get_mut(&mut client_config.transport) + .unwrap() + .ack_frequency_config(Some(ack_freq_config)) + .mtu_discovery_config(None); // To keep traffic cleaner + + let mut pair = Pair::default_with_deterministic_pns(); + pair.latency = Duration::from_millis(10); // Need latency to avoid an RTT = 0 + let (client_ch, server_ch) = pair.connect_with(client_config); + pair.drive(); + + assert_eq!( + pair.client_conn_mut(client_ch) + .stats() + .frame_tx + .ack_frequency, + 1 + ); + assert_eq!(pair.client_conn_mut(client_ch).stats().frame_tx.ping, 0); + (pair, client_ch, server_ch) +} + +/// Verify that max ACK delay is counted from the first ACK-eliciting packet +#[test] +fn ack_frequency_ack_delayed_from_first_of_flight() { + let _guard = subscribe(); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(Duration::from_millis(30)); + + // The client sends the following frames: + // + // * 0 ms: ping + // * 5 ms: ping x2 + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + pair.time += Duration::from_millis(5); + for _ in 0..2 { + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + } + + pair.time += Duration::from_millis(5); + // Server: receive the first ping and send no ACK + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: receive the second and third pings and send no ACK + pair.time += Duration::from_millis(10); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 2 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: Send an ACK after ACK delay expires + pair.time += Duration::from_millis(20); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 1 + ); +} + +#[test] +fn ack_frequency_ack_sent_after_max_ack_delay() { + let _guard = subscribe(); + let max_ack_delay = Duration::from_millis(30); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); + + // Client sends a ping + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Server: receive the ping, send no ACK + pair.time += pair.latency; + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: send an ack after max_ack_delay has elapsed + pair.time += max_ack_delay; + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 0 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 1 + ); +} + +#[test] +fn ack_frequency_ack_sent_after_packets_above_threshold() { + let _guard = subscribe(); + let max_ack_delay = Duration::from_millis(30); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); + + // The client sends the following frames: + // + // * 0 ms: ping + // * 5 ms: ping (11x) + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + pair.time += Duration::from_millis(5); + for _ in 0..11 { + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + } + + // Server: receive the first ping, send no ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: receive the remaining pings, send ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 11 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 1 + ); +} + +#[test] +fn ack_frequency_ack_sent_after_reordered_packets_below_threshold() { + let _guard = subscribe(); + let max_ack_delay = Duration::from_millis(30); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); + + // The client sends the following frames: + // + // * 0 ms: ping + // * 5 ms: ping (lost) + // * 5 ms: ping + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + pair.time += Duration::from_millis(5); + + // Send and lose an ack-eliciting packet + pair.mtu = 0; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Restore the default MTU and send another ping, which will arrive earlier than the dropped one + pair.mtu = DEFAULT_MTU; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Server: receive first ping, send no ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: receive second ping, send no ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); +} + +#[test] +fn ack_frequency_ack_sent_after_reordered_packets_above_threshold() { + let _guard = subscribe(); + let max_ack_delay = Duration::from_millis(30); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); + + // Send a ping + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Send and lose two ack-eliciting packets + pair.time += Duration::from_millis(5); + pair.mtu = 0; + for _ in 0..2 { + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + } + + // Restore the default MTU and send another ping, which will arrive earlier than the dropped ones + pair.mtu = DEFAULT_MTU; + pair.client_conn_mut(client_ch).ping(); + pair.drive_client(); + + // Server: receive first ping, send no ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 0 + ); + + // Server: receive remaining ping, send ACK + pair.time += Duration::from_millis(5); + let server_stats_before = pair.server_conn_mut(server_ch).stats(); + pair.drive_server(); + let server_stats_after = pair.server_conn_mut(server_ch).stats(); + assert_eq!( + server_stats_after.frame_rx.ping - server_stats_before.frame_rx.ping, + 1 + ); + assert_eq!( + server_stats_after.frame_tx.acks - server_stats_before.frame_tx.acks, + 1 + ); +} + +#[test] +fn ack_frequency_update_max_delay() { + let _guard = subscribe(); + let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(Duration::from_millis(200)); + + // Ack frequency was sent initially + assert_eq!( + pair.server_conn_mut(server_ch) + .stats() + .frame_rx + .ack_frequency, + 1 + ); + + // Client sends a PING + info!("first ping"); + pair.client_conn_mut(client_ch).ping(); + pair.drive(); + + // No change in ACK frequency + assert_eq!( + pair.server_conn_mut(server_ch) + .stats() + .frame_rx + .ack_frequency, + 1 + ); + + // RTT jumps, client sends another ping + info!("delayed ping"); + pair.latency *= 10; + pair.client_conn_mut(client_ch).ping(); + pair.drive(); + + // ACK frequency updated + assert!( + pair.server_conn_mut(server_ch) + .stats() + .frame_rx + .ack_frequency + >= 2 + ); +} + +fn stream_chunks(mut recv: RecvStream) -> Vec { + let mut buf = Vec::new(); + + let mut chunks = recv.read(true).unwrap(); + while let Ok(Some(chunk)) = chunks.next(usize::MAX) { + buf.extend(chunk.bytes); + } + + let _ = chunks.finalize(); + + buf +} + +/// Verify that an endpoint which receives but does not send ACK-eliciting data still receives ACKs +/// occasionally. This is not required for conformance, but makes loss detection more responsive and +/// reduces receiver memory use. +#[test] +fn pure_sender_voluntarily_acks() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let receiver_acks_initial = pair.server_conn_mut(server_ch).stats().frame_rx.acks; + + for _ in 0..100 { + const MSG: &[u8] = b"hello"; + pair.client_datagrams(client_ch) + .send(Bytes::from_static(MSG), true) + .unwrap(); + pair.drive(); + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), MSG); + } + + let receiver_acks_final = pair.server_conn_mut(server_ch).stats().frame_rx.acks; + assert!(receiver_acks_final > receiver_acks_initial); +} + +#[test] +fn reject_manually() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(|_| IncomingConnectionBehavior::Reject); + + // The server should now reject incoming connections. + let client_ch = pair.begin_connect(client_config()); + pair.drive(); + pair.server.assert_no_accept(); + let client = pair.client.connections.get_mut(&client_ch).unwrap(); + assert!(client.is_closed()); + assert!(matches!( + client.poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::ConnectionClosed(close) + }) if close.error_code == TransportErrorCode::CONNECTION_REFUSED + )); +} + +#[test] +fn validate_then_reject_manually() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new({ + let mut i = 0; + move |incoming| { + if incoming.remote_address_validated() { + assert_eq!(i, 1); + i += 1; + IncomingConnectionBehavior::Reject + } else { + assert_eq!(i, 0); + i += 1; + IncomingConnectionBehavior::Retry + } + } + }); + + // The server should now retry and reject incoming connections. + let client_ch = pair.begin_connect(client_config()); + pair.drive(); + pair.server.assert_no_accept(); + let client = pair.client.connections.get_mut(&client_ch).unwrap(); + assert!(client.is_closed()); + assert!(matches!( + client.poll(), + Some(Event::ConnectionLost { + reason: ConnectionError::ConnectionClosed(close) + }) if close.error_code == TransportErrorCode::CONNECTION_REFUSED + )); + pair.drive(); + assert_matches!(pair.client_conn_mut(client_ch).poll(), None); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn endpoint_and_connection_impl_send_sync() { + const fn is_send_sync() {} + is_send_sync::(); + is_send_sync::(); +} + +#[test] +fn stream_gso() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + + let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); + + let initial_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + + // Send 20KiB of stream data, which comfortably fits inside two `tests::util::MAX_DATAGRAMS` + // datagram batches + info!("sending"); + for _ in 0..20 { + pair.client_send(client_ch, s).write(&[0; 1024]).unwrap(); + } + pair.client_send(client_ch, s).finish().unwrap(); + pair.drive(); + let final_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + assert_eq!(final_ios - initial_ios, 2); +} + +#[test] +fn datagram_gso() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + + let initial_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + let initial_bytes = pair.client_conn_mut(client_ch).stats().udp_tx.bytes; + + // Send 10 datagrams above half the MTU, which fits inside a `tests::util::MAX_DATAGRAMS` + // datagram batch + info!("sending"); + const DATAGRAM_LEN: usize = 1024; + const DATAGRAMS: usize = 10; + for _ in 0..DATAGRAMS { + pair.client_datagrams(client_ch) + .send(Bytes::from_static(&[0; DATAGRAM_LEN]), false) + .unwrap(); + } + pair.drive(); + let final_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + let final_bytes = pair.client_conn_mut(client_ch).stats().udp_tx.bytes; + assert_eq!(final_ios - initial_ios, 1); + // Expected overhead: flags + CID + PN + tag + frame type + frame length = 1 + 8 + 1 + 16 + 1 + 2 = 29 + assert_eq!( + final_bytes - initial_bytes, + ((29 + DATAGRAM_LEN) * DATAGRAMS) as u64 + ); +} + +#[test] +fn gso_truncation() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + let initial_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + + // Send three application datagrams such that each is large to be combined with another in a + // single MTU, and the second datagram would require an unreasonably large amount of padding to + // produce a QUIC packet of the same length as the first. + info!("sending"); + const SIZES: [usize; 3] = [1024, 768, 768]; + for len in SIZES { + pair.client_datagrams(client_ch) + .send(vec![0; len].into(), false) + .unwrap(); + } + pair.drive(); + let final_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + assert_eq!(final_ios - initial_ios, 2); + for len in SIZES { + assert_eq!( + pair.server_datagrams(server_ch) + .recv() + .expect("datagram lost") + .len(), + len + ); + } +} + +/// Verify that UDP datagrams are padded to MTU if specified in the transport config. +#[test] +fn pad_to_mtu() { + let _guard = subscribe(); + const MTU: u16 = 1333; + let client_config = { + let mut c_config = client_config(); + let t_config = TransportConfig { + initial_mtu: MTU, + mtu_discovery_config: None, + pad_to_mtu: true, + ..TransportConfig::default() + }; + c_config.transport_config(t_config.into()); + c_config + }; + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect_with(client_config); + + let initial_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + pair.server.capture_inbound_packets = true; + + info!("sending"); + // Send two datagrams significantly smaller than MTU, but large enough to require two UDP datagrams. + const LEN_1: usize = 800; + const LEN_2: usize = 600; + pair.client_datagrams(client_ch) + .send(vec![0; LEN_1].into(), false) + .unwrap(); + pair.client_datagrams(client_ch) + .send(vec![0; LEN_2].into(), false) + .unwrap(); + pair.client.drive(pair.time, pair.server.addr); + + // Check padding + assert_eq!(pair.client.outbound.len(), 2); + assert_eq!(pair.client.outbound[0].0.size, usize::from(MTU)); + assert_eq!(pair.client.outbound[0].1.len(), usize::from(MTU)); + assert_eq!(pair.client.outbound[1].0.size, usize::from(MTU)); + assert_eq!(pair.client.outbound[1].1.len(), usize::from(MTU)); + pair.drive_client(); + assert_eq!(pair.server.inbound.len(), 2); + assert_eq!(pair.server.inbound[0].2.len(), usize::from(MTU)); + assert_eq!(pair.server.inbound[1].2.len(), usize::from(MTU)); + pair.drive(); + + // Check that both datagrams ended up in the same GSO batch + let final_ios = pair.client_conn_mut(client_ch).stats().udp_tx.ios; + assert_eq!(final_ios - initial_ios, 1); + + assert_eq!( + pair.server_datagrams(server_ch) + .recv() + .expect("datagram lost") + .len(), + LEN_1 + ); + assert_eq!( + pair.server_datagrams(server_ch) + .recv() + .expect("datagram lost") + .len(), + LEN_2 + ); +} + +/// Verify that a large application datagram is sent successfully when an ACK frame too large to fit +/// alongside it is also queued, in exactly 2 UDP datagrams. +#[test] +fn large_datagram_with_acks() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + // Force the client to generate a large ACK frame by dropping several packets + for _ in 0..10 { + pair.server_conn_mut(server_ch).ping(); + pair.drive_server(); + pair.client.inbound.pop_back(); + pair.server_conn_mut(server_ch).ping(); + pair.drive_server(); + } + + let max_size = pair.client_datagrams(client_ch).max_size().unwrap(); + let msg = Bytes::from(vec![0; max_size]); + pair.client_datagrams(client_ch) + .send(msg.clone(), true) + .unwrap(); + let initial_datagrams = pair.client_conn_mut(client_ch).stats().udp_tx.datagrams; + pair.drive(); + let final_datagrams = pair.client_conn_mut(client_ch).stats().udp_tx.datagrams; + assert_eq!(pair.server_datagrams(server_ch).recv().unwrap(), msg); + assert_eq!(final_datagrams - initial_datagrams, 2); +} + +/// Verify that an ACK prompted by receipt of many non-ACK-eliciting packets is sent alongside +/// outgoing application datagrams too large to coexist in the same packet with it. +#[test] +fn voluntary_ack_with_large_datagrams() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect(); + + // Prompt many large ACKs from the server + let initial_datagrams = pair.client_conn_mut(client_ch).stats().udp_tx.datagrams; + // Send enough packets that we're confident some packet numbers will be skipped, ensuring that + // larger ACKs occur + const COUNT: usize = 256; + for _ in 0..COUNT { + let max_size = pair.client_datagrams(client_ch).max_size().unwrap(); + pair.client_datagrams(client_ch) + .send(vec![0; max_size].into(), true) + .unwrap(); + pair.drive(); + } + let final_datagrams = pair.client_conn_mut(client_ch).stats().udp_tx.datagrams; + // Failure may indicate `max_size` is too small and ACKs are reliably being packed into the same + // datagram, which is reasonable behavior but makes this test ineffective. + assert_ne!( + final_datagrams - initial_datagrams, + COUNT as u64, + "client should have sent some ACK-only packets" + ); +} + +#[test] +fn reject_short_idcid() { + let _guard = subscribe(); + let client_addr = "[::2]:7890".parse().unwrap(); + let mut server = Endpoint::new( + Default::default(), + Some(Arc::new(server_config())), + true, + None, + ); + let now = Instant::now(); + let mut buf = Vec::with_capacity(server.config().get_max_udp_payload_size() as usize); + // Initial header that has an empty DCID but is otherwise well-formed + let mut initial = BytesMut::from(hex!("c4 00000001 00 00 00 3f").as_ref()); + initial.resize(MIN_INITIAL_SIZE.into(), 0); + let event = server.handle(now, client_addr, None, None, initial, &mut buf); + let Some(DatagramEvent::Response(Transmit { .. })) = event else { + panic!("expected an initial close"); + }; +} + +/// Ensure that a connection can be made when a preferred address is advertised by the server, +/// regardless of whether the address is actually used. +#[test] +fn preferred_address() { + let _guard = subscribe(); + let mut server_config = server_config(); + server_config.preferred_address_v6(Some("[::1]:65535".parse().unwrap())); + + let mut pair = Pair::new(Arc::new(EndpointConfig::default()), server_config); + pair.connect(); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/token.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/token.rs new file mode 100644 index 0000000000000000000000000000000000000000..ac466c6266f46d233902f46f61cb165cf062324a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/token.rs @@ -0,0 +1,333 @@ +//! Tests specifically for tokens + +use super::*; + +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn stateless_retry() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(validate_incoming); + let (client_ch, _server_ch) = pair.connect(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn retry_token_expired() { + let _guard = subscribe(); + + let fake_time = Arc::new(FakeTimeSource::new()); + let retry_token_lifetime = Duration::from_secs(1); + + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(validate_incoming); + + let mut config = server_config(); + config + .time_source(Arc::clone(&fake_time) as _) + .retry_token_lifetime(retry_token_lifetime); + pair.server.set_server_config(Some(Arc::new(config))); + + let client_ch = pair.begin_connect(client_config()); + pair.drive_client(); + pair.drive_server(); + pair.drive_client(); + + // to expire retry token + fake_time.advance(retry_token_lifetime + Duration::from_millis(1)); + + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(err) }) + if err.error_code == TransportErrorCode::INVALID_TOKEN + ); + + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn retry_then_use_token() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + pair.server.handle_incoming = Box::new(validate_incoming); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token_then_retry() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new({ + let mut i = 0; + move |incoming| { + if i == 0 { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + i += 1; + IncomingConnectionBehavior::Retry + } else if i == 1 { + assert!(incoming.remote_address_validated()); + assert!(!incoming.may_retry()); + i += 1; + IncomingConnectionBehavior::Accept + } else { + panic!("too many handle_incoming iterations") + } + } + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_same_token_twice() { + #[derive(Default)] + struct EvilTokenStore(Mutex); + + impl TokenStore for EvilTokenStore { + fn insert(&self, _server_name: &str, token: Bytes) { + let mut lock = self.0.lock().unwrap(); + if lock.is_empty() { + *lock = token; + } + } + + fn take(&self, _server_name: &str) -> Option { + let lock = self.0.lock().unwrap(); + if lock.is_empty() { + None + } else { + Some(lock.clone()) + } + } + } + + let _guard = subscribe(); + let mut pair = Pair::default(); + let mut client_config = client_config(); + client_config.token_store(Arc::new(EvilTokenStore::default())); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(!incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_3, _server_ch_3) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_3) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token_expired() { + let _guard = subscribe(); + let fake_time = Arc::new(FakeTimeSource::new()); + let lifetime = Duration::from_secs(10000); + let mut server_config = server_config(); + server_config + .time_source(Arc::clone(&fake_time) as _) + .validation_token + .lifetime(lifetime); + let mut pair = Pair::new(Default::default(), server_config); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + fake_time.advance(lifetime + Duration::from_secs(1)); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(!incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_3, _server_ch_3) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_3) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +pub(super) struct FakeTimeSource(Mutex); + +impl FakeTimeSource { + pub(super) fn new() -> Self { + Self(Mutex::new(SystemTime::now())) + } + + pub(super) fn advance(&self, dur: Duration) { + *self.0.lock().unwrap() += dur; + } +} + +impl TimeSource for FakeTimeSource { + fn now(&self) -> SystemTime { + *self.0.lock().unwrap() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/util.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..fd61e6118f06b547aba1bd66e854e2b40f66cada --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/tests/util.rs @@ -0,0 +1,745 @@ +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + env, + io::{self, Write}, + mem, + net::{Ipv6Addr, SocketAddr, UdpSocket}, + ops::RangeFrom, + str, + sync::{Arc, Mutex}, +}; + +use assert_matches::assert_matches; +use bytes::BytesMut; +use lazy_static::lazy_static; +use rustls::{ + KeyLogFile, + client::WebPkiServerVerifier, + pki_types::{CertificateDer, PrivateKeyDer}, +}; +use tracing::{info_span, trace}; + +use super::crypto::rustls::{QuicClientConfig, QuicServerConfig, configured_provider}; +use super::*; +use crate::{Duration, Instant}; + +pub(super) const DEFAULT_MTU: usize = 1452; + +pub(super) struct Pair { + pub(super) server: TestEndpoint, + pub(super) client: TestEndpoint, + /// Start time + epoch: Instant, + /// Current time + pub(super) time: Instant, + /// Simulates the maximum size allowed for UDP payloads by the link (packets exceeding this size will be dropped) + pub(super) mtu: usize, + /// Simulates explicit congestion notification + pub(super) congestion_experienced: bool, + // One-way + pub(super) latency: Duration, + /// Number of spin bit flips + pub(super) spins: u64, + last_spin: bool, +} + +impl Pair { + pub(super) fn default_with_deterministic_pns() -> Self { + let mut cfg = server_config(); + let mut transport = TransportConfig::default(); + transport.deterministic_packet_numbers(true); + cfg.transport = Arc::new(transport); + Self::new(Default::default(), cfg) + } + + pub(super) fn new(endpoint_config: Arc, server_config: ServerConfig) -> Self { + let server = Endpoint::new( + endpoint_config.clone(), + Some(Arc::new(server_config)), + true, + None, + ); + let client = Endpoint::new(endpoint_config, None, true, None); + + Self::new_from_endpoint(client, server) + } + + pub(super) fn new_from_endpoint(client: Endpoint, server: Endpoint) -> Self { + let server_addr = SocketAddr::new( + Ipv6Addr::LOCALHOST.into(), + SERVER_PORTS.lock().unwrap().next().unwrap(), + ); + let client_addr = SocketAddr::new( + Ipv6Addr::LOCALHOST.into(), + CLIENT_PORTS.lock().unwrap().next().unwrap(), + ); + let now = Instant::now(); + Self { + server: TestEndpoint::new(server, server_addr), + client: TestEndpoint::new(client, client_addr), + epoch: now, + time: now, + mtu: DEFAULT_MTU, + latency: Duration::ZERO, + spins: 0, + last_spin: false, + congestion_experienced: false, + } + } + + /// Returns whether the connection is not idle + pub(super) fn step(&mut self) -> bool { + self.drive_client(); + self.drive_server(); + if self.client.is_idle() && self.server.is_idle() { + return false; + } + + let client_t = self.client.next_wakeup(); + let server_t = self.server.next_wakeup(); + match min_opt(client_t, server_t) { + Some(t) if Some(t) == client_t => { + if t != self.time { + self.time = self.time.max(t); + trace!("advancing to {:?} for client", self.time - self.epoch); + } + true + } + Some(t) if Some(t) == server_t => { + if t != self.time { + self.time = self.time.max(t); + trace!("advancing to {:?} for server", self.time - self.epoch); + } + true + } + Some(_) => unreachable!(), + None => false, + } + } + + /// Advance time until both connections are idle + pub(super) fn drive(&mut self) { + while self.step() {} + } + + /// Advance time until both connections are idle, or after 100 steps have been executed + /// + /// Returns true if the amount of steps exceeds the bounds, because the connections never became + /// idle + pub(super) fn drive_bounded(&mut self) -> bool { + for _ in 0..100 { + if !self.step() { + return false; + } + } + + true + } + + pub(super) fn drive_client(&mut self) { + let span = info_span!("client"); + let _guard = span.enter(); + self.client.drive(self.time, self.server.addr); + for (packet, buffer) in self.client.outbound.drain(..) { + let packet_size = packet_size(&packet, &buffer); + if packet_size > self.mtu { + info!(packet_size, "dropping packet (max size exceeded)"); + continue; + } + if buffer[0] & packet::LONG_HEADER_FORM == 0 { + let spin = buffer[0] & packet::SPIN_BIT != 0; + self.spins += (spin == self.last_spin) as u64; + self.last_spin = spin; + } + if let Some(ref socket) = self.client.socket { + socket.send_to(&buffer, packet.destination).unwrap(); + } + if self.server.addr == packet.destination { + let ecn = set_congestion_experienced(packet.ecn, self.congestion_experienced); + self.server.inbound.push_back(( + self.time + self.latency, + ecn, + buffer.as_ref().into(), + )); + } + } + } + + pub(super) fn drive_server(&mut self) { + let span = info_span!("server"); + let _guard = span.enter(); + self.server.drive(self.time, self.client.addr); + for (packet, buffer) in self.server.outbound.drain(..) { + let packet_size = packet_size(&packet, &buffer); + if packet_size > self.mtu { + info!(packet_size, "dropping packet (max size exceeded)"); + continue; + } + if let Some(ref socket) = self.server.socket { + socket.send_to(&buffer, packet.destination).unwrap(); + } + if self.client.addr == packet.destination { + let ecn = set_congestion_experienced(packet.ecn, self.congestion_experienced); + self.client.inbound.push_back(( + self.time + self.latency, + ecn, + buffer.as_ref().into(), + )); + } + } + } + + pub(super) fn connect(&mut self) -> (ConnectionHandle, ConnectionHandle) { + self.connect_with(client_config()) + } + + pub(super) fn connect_with( + &mut self, + config: ClientConfig, + ) -> (ConnectionHandle, ConnectionHandle) { + info!("connecting"); + let client_ch = self.begin_connect(config); + self.drive(); + let server_ch = self.server.assert_accept(); + self.finish_connect(client_ch, server_ch); + (client_ch, server_ch) + } + + /// Just start connecting the client + pub(super) fn begin_connect(&mut self, config: ClientConfig) -> ConnectionHandle { + let span = info_span!("client"); + let _guard = span.enter(); + let (client_ch, client_conn) = self + .client + .connect(self.time, config, self.server.addr, "localhost") + .unwrap(); + self.client.connections.insert(client_ch, client_conn); + client_ch + } + + fn finish_connect(&mut self, client_ch: ConnectionHandle, server_ch: ConnectionHandle) { + assert_matches!( + self.client_conn_mut(client_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + self.client_conn_mut(client_ch).poll(), + Some(Event::Connected) + ); + assert_matches!( + self.server_conn_mut(server_ch).poll(), + Some(Event::HandshakeDataReady) + ); + assert_matches!( + self.server_conn_mut(server_ch).poll(), + Some(Event::Connected) + ); + } + + pub(super) fn client_conn_mut(&mut self, ch: ConnectionHandle) -> &mut Connection { + self.client.connections.get_mut(&ch).unwrap() + } + + pub(super) fn client_streams(&mut self, ch: ConnectionHandle) -> Streams<'_> { + self.client_conn_mut(ch).streams() + } + + pub(super) fn client_send(&mut self, ch: ConnectionHandle, s: StreamId) -> SendStream<'_> { + self.client_conn_mut(ch).send_stream(s) + } + + pub(super) fn client_recv(&mut self, ch: ConnectionHandle, s: StreamId) -> RecvStream<'_> { + self.client_conn_mut(ch).recv_stream(s) + } + + pub(super) fn client_datagrams(&mut self, ch: ConnectionHandle) -> Datagrams<'_> { + self.client_conn_mut(ch).datagrams() + } + + pub(super) fn server_conn_mut(&mut self, ch: ConnectionHandle) -> &mut Connection { + self.server.connections.get_mut(&ch).unwrap() + } + + pub(super) fn server_streams(&mut self, ch: ConnectionHandle) -> Streams<'_> { + self.server_conn_mut(ch).streams() + } + + pub(super) fn server_send(&mut self, ch: ConnectionHandle, s: StreamId) -> SendStream<'_> { + self.server_conn_mut(ch).send_stream(s) + } + + pub(super) fn server_recv(&mut self, ch: ConnectionHandle, s: StreamId) -> RecvStream<'_> { + self.server_conn_mut(ch).recv_stream(s) + } + + pub(super) fn server_datagrams(&mut self, ch: ConnectionHandle) -> Datagrams<'_> { + self.server_conn_mut(ch).datagrams() + } +} + +impl Default for Pair { + fn default() -> Self { + Self::new(Default::default(), server_config()) + } +} + +pub(super) struct TestEndpoint { + pub(super) endpoint: Endpoint, + pub(super) addr: SocketAddr, + socket: Option, + timeout: Option, + pub(super) outbound: VecDeque<(Transmit, Bytes)>, + delayed: VecDeque<(Transmit, Bytes)>, + pub(super) inbound: VecDeque<(Instant, Option, BytesMut)>, + accepted: Option>, + pub(super) connections: HashMap, + conn_events: HashMap>, + pub(super) captured_packets: Vec>, + pub(super) capture_inbound_packets: bool, + pub(super) handle_incoming: Box IncomingConnectionBehavior>, + pub(super) waiting_incoming: Vec, +} + +#[derive(Debug, Copy, Clone)] +pub(super) enum IncomingConnectionBehavior { + Accept, + Reject, + Retry, + Wait, +} + +pub(super) fn validate_incoming(incoming: &Incoming) -> IncomingConnectionBehavior { + if incoming.remote_address_validated() { + IncomingConnectionBehavior::Accept + } else { + IncomingConnectionBehavior::Retry + } +} + +impl TestEndpoint { + fn new(endpoint: Endpoint, addr: SocketAddr) -> Self { + let socket = if env::var_os("SSLKEYLOGFILE").is_some() { + let socket = UdpSocket::bind(addr).expect("failed to bind UDP socket"); + socket + .set_read_timeout(Some(Duration::from_millis(10))) + .unwrap(); + Some(socket) + } else { + None + }; + Self { + endpoint, + addr, + socket, + timeout: None, + outbound: VecDeque::new(), + delayed: VecDeque::new(), + inbound: VecDeque::new(), + accepted: None, + connections: HashMap::default(), + conn_events: HashMap::default(), + captured_packets: Vec::new(), + capture_inbound_packets: false, + handle_incoming: Box::new(|_| IncomingConnectionBehavior::Accept), + waiting_incoming: Vec::new(), + } + } + + pub(super) fn drive(&mut self, now: Instant, remote: SocketAddr) { + self.drive_incoming(now, remote); + self.drive_outgoing(now); + } + + pub(super) fn drive_incoming(&mut self, now: Instant, remote: SocketAddr) { + if let Some(ref socket) = self.socket { + loop { + let mut buf = [0; 8192]; + if socket.recv_from(&mut buf).is_err() { + break; + } + } + } + let buffer_size = self.endpoint.config().get_max_udp_payload_size() as usize; + let mut buf = Vec::with_capacity(buffer_size); + + while self.inbound.front().is_some_and(|x| x.0 <= now) { + let (recv_time, ecn, packet) = self.inbound.pop_front().unwrap(); + if let Some(event) = self + .endpoint + .handle(recv_time, remote, None, ecn, packet, &mut buf) + { + match event { + DatagramEvent::NewConnection(incoming) => { + match (self.handle_incoming)(&incoming) { + IncomingConnectionBehavior::Accept => { + let _ = self.try_accept(incoming, now); + } + IncomingConnectionBehavior::Reject => { + self.reject(incoming); + } + IncomingConnectionBehavior::Retry => { + self.retry(incoming); + } + IncomingConnectionBehavior::Wait => { + self.waiting_incoming.push(incoming); + } + } + } + DatagramEvent::ConnectionEvent(ch, event) => { + if self.capture_inbound_packets { + let packet = self.connections[&ch].decode_packet(&event); + self.captured_packets.extend(packet); + } + + self.conn_events.entry(ch).or_default().push_back(event); + } + DatagramEvent::Response(transmit) => { + let size = transmit.size; + self.outbound.extend(split_transmit(transmit, &buf[..size])); + buf.clear(); + } + } + } + } + } + + pub(super) fn drive_outgoing(&mut self, now: Instant) { + let buffer_size = self.endpoint.config().get_max_udp_payload_size() as usize; + let mut buf = Vec::with_capacity(buffer_size); + + loop { + let mut endpoint_events: Vec<(ConnectionHandle, EndpointEvent)> = vec![]; + for (ch, conn) in self.connections.iter_mut() { + if self.timeout.is_some_and(|x| x <= now) { + self.timeout = None; + conn.handle_timeout(now); + } + + for (_, mut events) in self.conn_events.drain() { + for event in events.drain(..) { + conn.handle_event(event); + } + } + + while let Some(event) = conn.poll_endpoint_events() { + endpoint_events.push((*ch, event)); + } + while let Some(transmit) = conn.poll_transmit(now, MAX_DATAGRAMS, &mut buf) { + let size = transmit.size; + self.outbound.extend(split_transmit(transmit, &buf[..size])); + buf.clear(); + } + self.timeout = conn.poll_timeout(); + } + + if endpoint_events.is_empty() { + break; + } + + for (ch, event) in endpoint_events { + if let Some(event) = self.handle_event(ch, event) { + if let Some(conn) = self.connections.get_mut(&ch) { + conn.handle_event(event); + } + } + } + } + } + + pub(super) fn next_wakeup(&self) -> Option { + let next_inbound = self.inbound.front().map(|x| x.0); + min_opt(self.timeout, next_inbound) + } + + fn is_idle(&self) -> bool { + self.connections.values().all(|x| x.is_idle()) + } + + pub(super) fn delay_outbound(&mut self) { + assert!(self.delayed.is_empty()); + mem::swap(&mut self.delayed, &mut self.outbound); + } + + pub(super) fn finish_delay(&mut self) { + self.outbound.extend(self.delayed.drain(..)); + } + + pub(super) fn try_accept( + &mut self, + incoming: Incoming, + now: Instant, + ) -> Result { + let mut buf = Vec::new(); + match self.endpoint.accept(incoming, now, &mut buf, None) { + Ok((ch, conn)) => { + self.connections.insert(ch, conn); + self.accepted = Some(Ok(ch)); + Ok(ch) + } + Err(error) => { + if let Some(transmit) = error.response { + let size = transmit.size; + self.outbound.extend(split_transmit(transmit, &buf[..size])); + } + self.accepted = Some(Err(error.cause.clone())); + Err(error.cause) + } + } + } + + pub(super) fn retry(&mut self, incoming: Incoming) { + let mut buf = Vec::new(); + let transmit = self.endpoint.retry(incoming, &mut buf).unwrap(); + let size = transmit.size; + self.outbound.extend(split_transmit(transmit, &buf[..size])); + } + + pub(super) fn reject(&mut self, incoming: Incoming) { + let mut buf = Vec::new(); + let transmit = self.endpoint.refuse(incoming, &mut buf); + let size = transmit.size; + self.outbound.extend(split_transmit(transmit, &buf[..size])); + } + + pub(super) fn assert_accept(&mut self) -> ConnectionHandle { + self.accepted + .take() + .expect("server didn't try connecting") + .expect("server experienced error connecting") + } + + pub(super) fn assert_accept_error(&mut self) -> ConnectionError { + self.accepted + .take() + .expect("server didn't try connecting") + .expect_err("server did unexpectedly connect without error") + } + + pub(super) fn assert_no_accept(&self) { + assert!(self.accepted.is_none(), "server did unexpectedly connect") + } +} + +impl ::std::ops::Deref for TestEndpoint { + type Target = Endpoint; + fn deref(&self) -> &Endpoint { + &self.endpoint + } +} + +impl ::std::ops::DerefMut for TestEndpoint { + fn deref_mut(&mut self) -> &mut Endpoint { + &mut self.endpoint + } +} + +pub(super) fn subscribe() -> tracing::subscriber::DefaultGuard { + let builder = tracing_subscriber::FmtSubscriber::builder() + .with_max_level(tracing::Level::TRACE) + .with_writer(|| TestWriter); + // tracing uses std::time to trace time, which panics in wasm. + #[cfg(all(target_family = "wasm", target_os = "unknown"))] + let builder = builder.without_time(); + tracing::subscriber::set_default(builder.finish()) +} + +struct TestWriter; + +impl Write for TestWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + print!( + "{}", + str::from_utf8(buf).expect("tried to log invalid UTF-8") + ); + Ok(buf.len()) + } + fn flush(&mut self) -> io::Result<()> { + io::stdout().flush() + } +} + +pub(super) fn server_config() -> ServerConfig { + let mut config = ServerConfig::with_crypto(Arc::new(server_crypto())); + if !cfg!(feature = "bloom") { + config + .validation_token + .sent(2) + .log(Arc::new(SimpleTokenLog::default())); + } + config +} + +pub(super) fn server_config_with_cert( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> ServerConfig { + let mut config = ServerConfig::with_crypto(Arc::new(server_crypto_with_cert(cert, key))); + config + .validation_token + .sent(2) + .log(Arc::new(SimpleTokenLog::default())); + config +} + +pub(super) fn server_crypto() -> QuicServerConfig { + server_crypto_inner(None, None) +} + +pub(super) fn server_crypto_with_alpn(alpn: Vec>) -> QuicServerConfig { + server_crypto_inner(None, Some(alpn)) +} + +pub(super) fn server_crypto_with_cert( + cert: CertificateDer<'static>, + key: PrivateKeyDer<'static>, +) -> QuicServerConfig { + server_crypto_inner(Some((cert, key)), None) +} + +fn server_crypto_inner( + identity: Option<(CertificateDer<'static>, PrivateKeyDer<'static>)>, + alpn: Option>>, +) -> QuicServerConfig { + let (cert, key) = identity.unwrap_or_else(|| { + ( + CERTIFIED_KEY.cert.der().clone(), + PrivateKeyDer::Pkcs8(CERTIFIED_KEY.signing_key.serialize_der().into()), + ) + }); + + let mut config = QuicServerConfig::inner(vec![cert], key).unwrap(); + if let Some(alpn) = alpn { + config.alpn_protocols = alpn; + } + + config.try_into().unwrap() +} + +pub(super) fn client_config() -> ClientConfig { + ClientConfig::new(Arc::new(client_crypto())) +} + +pub(super) fn client_config_with_deterministic_pns() -> ClientConfig { + let mut cfg = ClientConfig::new(Arc::new(client_crypto())); + let mut transport = TransportConfig::default(); + transport.deterministic_packet_numbers(true); + cfg.transport = Arc::new(transport); + cfg +} + +pub(super) fn client_config_with_certs(certs: Vec>) -> ClientConfig { + ClientConfig::new(Arc::new(client_crypto_inner(Some(certs), None))) +} + +pub(super) fn client_crypto() -> QuicClientConfig { + client_crypto_inner(None, None) +} + +pub(super) fn client_crypto_with_alpn(protocols: Vec>) -> QuicClientConfig { + client_crypto_inner(None, Some(protocols)) +} + +fn client_crypto_inner( + certs: Option>>, + alpn: Option>>, +) -> QuicClientConfig { + let mut roots = rustls::RootCertStore::empty(); + for cert in certs.unwrap_or_else(|| vec![CERTIFIED_KEY.cert.der().clone()]) { + roots.add(cert).unwrap(); + } + + let mut inner = QuicClientConfig::inner( + WebPkiServerVerifier::builder_with_provider(Arc::new(roots), configured_provider()) + .build() + .unwrap(), + ); + inner.key_log = Arc::new(KeyLogFile::new()); + if let Some(alpn) = alpn { + inner.alpn_protocols = alpn; + } + + inner.try_into().unwrap() +} + +pub(super) fn min_opt(x: Option, y: Option) -> Option { + match (x, y) { + (Some(x), Some(y)) => Some(cmp::min(x, y)), + (Some(x), _) => Some(x), + (_, Some(y)) => Some(y), + _ => None, + } +} + +/// The maximum of datagrams TestEndpoint will produce via `poll_transmit` +const MAX_DATAGRAMS: usize = 10; + +fn split_transmit(transmit: Transmit, buffer: &[u8]) -> Vec<(Transmit, Bytes)> { + let mut buffer = Bytes::copy_from_slice(buffer); + let segment_size = match transmit.segment_size { + Some(segment_size) => segment_size, + _ => return vec![(transmit, buffer)], + }; + + let mut transmits = Vec::new(); + while !buffer.is_empty() { + let end = segment_size.min(buffer.len()); + + let contents = buffer.split_to(end); + transmits.push(( + Transmit { + destination: transmit.destination, + size: contents.len(), + ecn: transmit.ecn, + segment_size: None, + src_ip: transmit.src_ip, + }, + contents, + )); + } + + transmits +} + +fn packet_size(transmit: &Transmit, buffer: &Bytes) -> usize { + if transmit.segment_size.is_some() { + panic!("This transmit is meant to be split into multiple packets!"); + } + + buffer.len() +} + +fn set_congestion_experienced( + x: Option, + congestion_experienced: bool, +) -> Option { + x.map(|codepoint| match congestion_experienced { + true => EcnCodepoint::Ce, + false => codepoint, + }) +} + +lazy_static! { + pub static ref SERVER_PORTS: Mutex> = Mutex::new(4433..); + pub static ref CLIENT_PORTS: Mutex> = Mutex::new(44433..); + pub(crate) static ref CERTIFIED_KEY: rcgen::CertifiedKey = + rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); +} + +#[derive(Default)] +struct SimpleTokenLog(Mutex>); + +impl TokenLog for SimpleTokenLog { + fn check_and_insert( + &self, + nonce: u128, + _issued: SystemTime, + _lifetime: Duration, + ) -> Result<(), TokenReuseError> { + if self.0.lock().unwrap().insert(nonce) { + Ok(()) + } else { + Err(TokenReuseError) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4ab5ea529246dcabd5adaf207e3e5a63c8bc8bc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token.rs @@ -0,0 +1,507 @@ +use std::{ + fmt, + mem::size_of, + net::{IpAddr, SocketAddr}, +}; + +use bytes::{Buf, BufMut, Bytes}; +use rand::Rng; + +use crate::{ + Duration, RESET_TOKEN_SIZE, ServerConfig, SystemTime, UNIX_EPOCH, + coding::{BufExt, BufMutExt}, + crypto::{HandshakeTokenKey, HmacKey}, + packet::InitialHeader, + shared::ConnectionId, +}; + +/// Responsible for limiting clients' ability to reuse validation tokens +/// +/// [_RFC 9000 § 8.1.4:_](https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.4) +/// +/// > Attackers could replay tokens to use servers as amplifiers in DDoS attacks. To protect +/// > against such attacks, servers MUST ensure that replay of tokens is prevented or limited. +/// > Servers SHOULD ensure that tokens sent in Retry packets are only accepted for a short time, +/// > as they are returned immediately by clients. Tokens that are provided in NEW_TOKEN frames +/// > (Section 19.7) need to be valid for longer but SHOULD NOT be accepted multiple times. +/// > Servers are encouraged to allow tokens to be used only once, if possible; tokens MAY include +/// > additional information about clients to further narrow applicability or reuse. +/// +/// `TokenLog` pertains only to tokens provided in NEW_TOKEN frames. +pub trait TokenLog: Send + Sync { + /// Record that the token was used and, ideally, return a token reuse error if the token may + /// have been already used previously + /// + /// False negatives and false positives are both permissible. Called when a client uses an + /// address validation token. + /// + /// Parameters: + /// - `nonce`: A server-generated random unique value for the token. + /// - `issued`: The time the server issued the token. + /// - `lifetime`: The expiration time of address validation tokens sent via NEW_TOKEN frames, + /// as configured by [`ServerValidationTokenConfig::lifetime`][1]. + /// + /// [1]: crate::ValidationTokenConfig::lifetime + /// + /// ## Security & Performance + /// + /// To the extent that it is possible to repeatedly trigger false negatives (returning `Ok` for + /// a token which has been reused), an attacker could use the server to perform [amplification + /// attacks][2]. The QUIC specification requires that this be limited, if not prevented fully. + /// + /// A false positive (returning `Err` for a token which has never been used) is not a security + /// vulnerability; it is permissible for a `TokenLog` to always return `Err`. A false positive + /// causes the token to be ignored, which may cause the transmission of some 0.5-RTT data to be + /// delayed until the handshake completes, if a sufficient amount of 0.5-RTT data it sent. + /// + /// [2]: https://en.wikipedia.org/wiki/Denial-of-service_attack#Amplification + fn check_and_insert( + &self, + nonce: u128, + issued: SystemTime, + lifetime: Duration, + ) -> Result<(), TokenReuseError>; +} + +/// Error for when a validation token may have been reused +pub struct TokenReuseError; + +/// Null implementation of [`TokenLog`], which never accepts tokens +pub struct NoneTokenLog; + +impl TokenLog for NoneTokenLog { + fn check_and_insert(&self, _: u128, _: SystemTime, _: Duration) -> Result<(), TokenReuseError> { + Err(TokenReuseError) + } +} + +/// Responsible for storing validation tokens received from servers and retrieving them for use in +/// subsequent connections +pub trait TokenStore: Send + Sync { + /// Potentially store a token for later one-time use + /// + /// Called when a NEW_TOKEN frame is received from the server. + fn insert(&self, server_name: &str, token: Bytes); + + /// Try to find and take a token that was stored with the given server name + /// + /// The same token must never be returned from `take` twice, as doing so can be used to + /// de-anonymize a client's traffic. + /// + /// Called when trying to connect to a server. It is always ok for this to return `None`. + fn take(&self, server_name: &str) -> Option; +} + +/// Null implementation of [`TokenStore`], which does not store any tokens +pub struct NoneTokenStore; + +impl TokenStore for NoneTokenStore { + fn insert(&self, _: &str, _: Bytes) {} + fn take(&self, _: &str) -> Option { + None + } +} + +/// State in an `Incoming` determined by a token or lack thereof +#[derive(Debug)] +pub(crate) struct IncomingToken { + pub(crate) retry_src_cid: Option, + pub(crate) orig_dst_cid: ConnectionId, + pub(crate) validated: bool, +} + +impl IncomingToken { + /// Construct for an `Incoming` given the first packet header, or error if the connection + /// cannot be established + pub(crate) fn from_header( + header: &InitialHeader, + server_config: &ServerConfig, + remote_address: SocketAddr, + ) -> Result { + let unvalidated = Self { + retry_src_cid: None, + orig_dst_cid: header.dst_cid, + validated: false, + }; + + // Decode token or short-circuit + if header.token.is_empty() { + return Ok(unvalidated); + } + + // In cases where a token cannot be decrypted/decoded, we must allow for the possibility + // that this is caused not by client malfeasance, but by the token having been generated by + // an incompatible endpoint, e.g. a different version or a neighbor behind the same load + // balancer. In such cases we proceed as if there was no token. + // + // [_RFC 9000 § 8.1.3:_](https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.3-10) + // + // > If the token is invalid, then the server SHOULD proceed as if the client did not have + // > a validated address, including potentially sending a Retry packet. + let Some(retry) = Token::decode(&*server_config.token_key, &header.token) else { + return Ok(unvalidated); + }; + + // Validate token, then convert into Self + match retry.payload { + TokenPayload::Retry { + address, + orig_dst_cid, + issued, + } => { + if address != remote_address { + return Err(InvalidRetryTokenError); + } + if issued + server_config.retry_token_lifetime < server_config.time_source.now() { + return Err(InvalidRetryTokenError); + } + + Ok(Self { + retry_src_cid: Some(header.dst_cid), + orig_dst_cid, + validated: true, + }) + } + TokenPayload::Validation { ip, issued } => { + if ip != remote_address.ip() { + return Ok(unvalidated); + } + if issued + server_config.validation_token.lifetime + < server_config.time_source.now() + { + return Ok(unvalidated); + } + if server_config + .validation_token + .log + .check_and_insert(retry.nonce, issued, server_config.validation_token.lifetime) + .is_err() + { + return Ok(unvalidated); + } + + Ok(Self { + retry_src_cid: None, + orig_dst_cid: header.dst_cid, + validated: true, + }) + } + } + } +} + +/// Error for a token being unambiguously from a Retry packet, and not valid +/// +/// The connection cannot be established. +pub(crate) struct InvalidRetryTokenError; + +/// Retry or validation token +pub(crate) struct Token { + /// Content that is encrypted from the client + pub(crate) payload: TokenPayload, + /// Randomly generated value, which must be unique, and is visible to the client + nonce: u128, +} + +impl Token { + /// Construct with newly sampled randomness + pub(crate) fn new(payload: TokenPayload, rng: &mut impl Rng) -> Self { + Self { + nonce: rng.random(), + payload, + } + } + + /// Encode and encrypt + pub(crate) fn encode(&self, key: &dyn HandshakeTokenKey) -> Vec { + let mut buf = Vec::new(); + + // Encode payload + match self.payload { + TokenPayload::Retry { + address, + orig_dst_cid, + issued, + } => { + buf.put_u8(TokenType::Retry as u8); + encode_addr(&mut buf, address); + orig_dst_cid.encode_long(&mut buf); + encode_unix_secs(&mut buf, issued); + } + TokenPayload::Validation { ip, issued } => { + buf.put_u8(TokenType::Validation as u8); + encode_ip(&mut buf, ip); + encode_unix_secs(&mut buf, issued); + } + } + + // Encrypt + let aead_key = key.aead_from_hkdf(&self.nonce.to_le_bytes()); + aead_key.seal(&mut buf, &[]).unwrap(); + buf.extend(&self.nonce.to_le_bytes()); + + buf + } + + /// Decode and decrypt + fn decode(key: &dyn HandshakeTokenKey, raw_token_bytes: &[u8]) -> Option { + // Decrypt + + // MSRV: split_at_checked requires 1.80.0 + let nonce_slice_start = raw_token_bytes.len().checked_sub(size_of::())?; + let (sealed_token, nonce_bytes) = raw_token_bytes.split_at(nonce_slice_start); + + let nonce = u128::from_le_bytes(nonce_bytes.try_into().unwrap()); + + let aead_key = key.aead_from_hkdf(nonce_bytes); + let mut sealed_token = sealed_token.to_vec(); + let data = aead_key.open(&mut sealed_token, &[]).ok()?; + + // Decode payload + let mut reader = &data[..]; + let payload = match TokenType::from_byte((&mut reader).get::().ok()?)? { + TokenType::Retry => TokenPayload::Retry { + address: decode_addr(&mut reader)?, + orig_dst_cid: ConnectionId::decode_long(&mut reader)?, + issued: decode_unix_secs(&mut reader)?, + }, + TokenType::Validation => TokenPayload::Validation { + ip: decode_ip(&mut reader)?, + issued: decode_unix_secs(&mut reader)?, + }, + }; + + if !reader.is_empty() { + // Consider extra bytes a decoding error (it may be from an incompatible endpoint) + return None; + } + + Some(Self { nonce, payload }) + } +} + +/// Content of a [`Token`] that is encrypted from the client +pub(crate) enum TokenPayload { + /// Token originating from a Retry packet + Retry { + /// The client's address + address: SocketAddr, + /// The destination connection ID set in the very first packet from the client + orig_dst_cid: ConnectionId, + /// The time at which this token was issued + issued: SystemTime, + }, + /// Token originating from a NEW_TOKEN frame + Validation { + /// The client's IP address (its port is likely to change between sessions) + ip: IpAddr, + /// The time at which this token was issued + issued: SystemTime, + }, +} + +/// Variant tag for a [`TokenPayload`] +#[derive(Copy, Clone)] +#[repr(u8)] +enum TokenType { + Retry = 0, + Validation = 1, +} + +impl TokenType { + fn from_byte(n: u8) -> Option { + use TokenType::*; + [Retry, Validation].into_iter().find(|ty| *ty as u8 == n) + } +} + +fn encode_addr(buf: &mut Vec, address: SocketAddr) { + encode_ip(buf, address.ip()); + buf.put_u16(address.port()); +} + +fn decode_addr(buf: &mut B) -> Option { + let ip = decode_ip(buf)?; + let port = buf.get().ok()?; + Some(SocketAddr::new(ip, port)) +} + +fn encode_ip(buf: &mut Vec, ip: IpAddr) { + match ip { + IpAddr::V4(x) => { + buf.put_u8(0); + buf.put_slice(&x.octets()); + } + IpAddr::V6(x) => { + buf.put_u8(1); + buf.put_slice(&x.octets()); + } + } +} + +fn decode_ip(buf: &mut B) -> Option { + match buf.get::().ok()? { + 0 => buf.get().ok().map(IpAddr::V4), + 1 => buf.get().ok().map(IpAddr::V6), + _ => None, + } +} + +fn encode_unix_secs(buf: &mut Vec, time: SystemTime) { + buf.write::( + time.duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ); +} + +fn decode_unix_secs(buf: &mut B) -> Option { + Some(UNIX_EPOCH + Duration::from_secs(buf.get::().ok()?)) +} + +/// Stateless reset token +/// +/// Used for an endpoint to securely communicate that it has lost state for a connection. +#[allow(clippy::derived_hash_with_manual_eq)] // Custom PartialEq impl matches derived semantics +#[derive(Debug, Copy, Clone, Hash)] +pub(crate) struct ResetToken([u8; RESET_TOKEN_SIZE]); + +impl ResetToken { + pub(crate) fn new(key: &dyn HmacKey, id: ConnectionId) -> Self { + let mut signature = vec![0; key.signature_len()]; + key.sign(&id, &mut signature); + // TODO: Server ID?? + let mut result = [0; RESET_TOKEN_SIZE]; + result.copy_from_slice(&signature[..RESET_TOKEN_SIZE]); + result.into() + } +} + +impl PartialEq for ResetToken { + fn eq(&self, other: &Self) -> bool { + crate::constant_time::eq(&self.0, &other.0) + } +} + +impl Eq for ResetToken {} + +impl From<[u8; RESET_TOKEN_SIZE]> for ResetToken { + fn from(x: [u8; RESET_TOKEN_SIZE]) -> Self { + Self(x) + } +} + +impl std::ops::Deref for ResetToken { + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0 + } +} + +impl fmt::Display for ResetToken { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for byte in self.iter() { + write!(f, "{byte:02x}")?; + } + Ok(()) + } +} + +#[cfg(all(test, any(feature = "aws-lc-rs", feature = "ring")))] +mod test { + use super::*; + #[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] + use aws_lc_rs::hkdf; + use rand::prelude::*; + #[cfg(feature = "ring")] + use ring::hkdf; + + fn token_round_trip(payload: TokenPayload) -> TokenPayload { + let rng = &mut rand::rng(); + let token = Token::new(payload, rng); + let mut master_key = [0; 64]; + rng.fill_bytes(&mut master_key); + let prk = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); + let encoded = token.encode(&prk); + let decoded = Token::decode(&prk, &encoded).expect("token didn't decrypt / decode"); + assert_eq!(token.nonce, decoded.nonce); + decoded.payload + } + + #[test] + fn retry_token_sanity() { + use crate::MAX_CID_SIZE; + use crate::cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}; + use crate::{Duration, UNIX_EPOCH}; + + use std::net::Ipv6Addr; + + let address_1 = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 4433); + let orig_dst_cid_1 = RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid(); + let issued_1 = UNIX_EPOCH + Duration::from_secs(42); // Fractional seconds would be lost + let payload_1 = TokenPayload::Retry { + address: address_1, + orig_dst_cid: orig_dst_cid_1, + issued: issued_1, + }; + let TokenPayload::Retry { + address: address_2, + orig_dst_cid: orig_dst_cid_2, + issued: issued_2, + } = token_round_trip(payload_1) + else { + panic!("token decoded as wrong variant"); + }; + + assert_eq!(address_1, address_2); + assert_eq!(orig_dst_cid_1, orig_dst_cid_2); + assert_eq!(issued_1, issued_2); + } + + #[test] + fn validation_token_sanity() { + use crate::{Duration, UNIX_EPOCH}; + + use std::net::Ipv6Addr; + + let ip_1 = Ipv6Addr::LOCALHOST.into(); + let issued_1 = UNIX_EPOCH + Duration::from_secs(42); // Fractional seconds would be lost + + let payload_1 = TokenPayload::Validation { + ip: ip_1, + issued: issued_1, + }; + let TokenPayload::Validation { + ip: ip_2, + issued: issued_2, + } = token_round_trip(payload_1) + else { + panic!("token decoded as wrong variant"); + }; + + assert_eq!(ip_1, ip_2); + assert_eq!(issued_1, issued_2); + } + + #[test] + fn invalid_token_returns_err() { + use super::*; + use rand::RngCore; + + let rng = &mut rand::rng(); + + let mut master_key = [0; 64]; + rng.fill_bytes(&mut master_key); + + let prk = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); + + let mut invalid_token = Vec::new(); + + let mut random_data = [0; 32]; + rand::rng().fill_bytes(&mut random_data); + invalid_token.put_slice(&random_data); + + // Assert: garbage sealed data returns err + assert!(Token::decode(&prk, &invalid_token).is_none()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token_memory_cache.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token_memory_cache.rs new file mode 100644 index 0000000000000000000000000000000000000000..3fce05f8abc0b3651d4a1752aea55e08ff6ce5bb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/token_memory_cache.rs @@ -0,0 +1,246 @@ +//! Storing tokens sent from servers in NEW_TOKEN frames and using them in subsequent connections + +use std::{ + collections::{HashMap, VecDeque, hash_map}, + sync::{Arc, Mutex}, +}; + +use bytes::Bytes; +use lru_slab::LruSlab; +use tracing::trace; + +use crate::token::TokenStore; + +/// `TokenStore` implementation that stores up to `N` tokens per server name for up to a +/// limited number of server names, in-memory +#[derive(Debug)] +pub struct TokenMemoryCache(Mutex); + +impl TokenMemoryCache { + /// Construct empty + pub fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self { + Self(Mutex::new(State::new( + max_server_names, + max_tokens_per_server, + ))) + } +} + +impl TokenStore for TokenMemoryCache { + fn insert(&self, server_name: &str, token: Bytes) { + trace!(%server_name, "storing token"); + self.0.lock().unwrap().store(server_name, token) + } + + fn take(&self, server_name: &str) -> Option { + let token = self.0.lock().unwrap().take(server_name); + trace!(%server_name, found=%token.is_some(), "taking token"); + token + } +} + +/// Defaults to a maximum of 256 servers and 2 tokens per server +impl Default for TokenMemoryCache { + fn default() -> Self { + Self::new(256, 2) + } +} + +/// Lockable inner state of `TokenMemoryCache` +#[derive(Debug)] +struct State { + max_server_names: u32, + max_tokens_per_server: usize, + // map from server name to index in lru + lookup: HashMap, u32>, + lru: LruSlab, +} + +impl State { + fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self { + Self { + max_server_names, + max_tokens_per_server, + lookup: HashMap::new(), + lru: LruSlab::default(), + } + } + + fn store(&mut self, server_name: &str, token: Bytes) { + if self.max_server_names == 0 { + // the rest of this method assumes that we can always insert a new entry so long as + // we're willing to evict a pre-existing entry. thus, an entry limit of 0 is an edge + // case we must short-circuit on now. + return; + } + if self.max_tokens_per_server == 0 { + // similarly to above, the rest of this method assumes that we can always push a new + // token to a queue so long as we're willing to evict a pre-existing token, so we + // short-circuit on the edge case of a token limit of 0. + return; + } + + let server_name = Arc::::from(server_name); + match self.lookup.entry(server_name.clone()) { + hash_map::Entry::Occupied(hmap_entry) => { + // key already exists, push the new token to its token queue + let tokens = &mut self.lru.get_mut(*hmap_entry.get()).tokens; + if tokens.len() >= self.max_tokens_per_server { + debug_assert!(tokens.len() == self.max_tokens_per_server); + tokens.pop_front().unwrap(); + } + tokens.push_back(token); + } + hash_map::Entry::Vacant(hmap_entry) => { + // key does not yet exist, create a new one, evicting the oldest if necessary + let removed_key = if self.lru.len() >= self.max_server_names { + // unwrap safety: max_server_names is > 0, so there's at least one entry, so + // lru() is some + Some(self.lru.remove(self.lru.lru().unwrap()).server_name) + } else { + None + }; + + hmap_entry.insert(self.lru.insert(CacheEntry::new(server_name, token))); + + // for borrowing reasons, we must defer removing the evicted hmap entry to here + if let Some(removed_slot) = removed_key { + let removed = self.lookup.remove(&removed_slot); + debug_assert!(removed.is_some()); + } + } + }; + } + + fn take(&mut self, server_name: &str) -> Option { + let slab_key = *self.lookup.get(server_name)?; + + // pop from entry's token queue + let entry = self.lru.get_mut(slab_key); + // unwrap safety: we never leave tokens empty + let token = entry.tokens.pop_front().unwrap(); + + if entry.tokens.is_empty() { + // token stack emptied, remove entry + self.lru.remove(slab_key); + self.lookup.remove(server_name); + } + + Some(token) + } +} + +/// Cache entry within `TokenMemoryCache`'s LRU slab +#[derive(Debug)] +struct CacheEntry { + server_name: Arc, + // invariant: tokens is never empty + tokens: VecDeque, +} + +impl CacheEntry { + /// Construct with a single token + fn new(server_name: Arc, token: Bytes) -> Self { + let mut tokens = VecDeque::new(); + tokens.push_back(token); + Self { + server_name, + tokens, + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::VecDeque; + + use super::*; + use rand::prelude::*; + use rand_pcg::Pcg32; + + fn new_rng() -> impl Rng { + Pcg32::from_seed(0xdeadbeefdeadbeefdeadbeefdeadbeefu128.to_le_bytes()) + } + + #[test] + fn cache_test() { + let mut rng = new_rng(); + const N: usize = 2; + + for _ in 0..10 { + let mut cache_1: Vec<(u32, VecDeque)> = Vec::new(); // keep it sorted oldest to newest + let cache_2 = TokenMemoryCache::new(20, 2); + + for i in 0..200 { + let server_name = rng.random::() % 10; + if rng.random_bool(0.666) { + // store + let token = Bytes::from(vec![i]); + println!("STORE {server_name} {token:?}"); + if let Some((j, _)) = cache_1 + .iter() + .enumerate() + .find(|&(_, &(server_name_2, _))| server_name_2 == server_name) + { + let (_, mut queue) = cache_1.remove(j); + queue.push_back(token.clone()); + if queue.len() > N { + queue.pop_front(); + } + cache_1.push((server_name, queue)); + } else { + let mut queue = VecDeque::new(); + queue.push_back(token.clone()); + cache_1.push((server_name, queue)); + if cache_1.len() > 20 { + cache_1.remove(0); + } + } + cache_2.insert(&server_name.to_string(), token); + } else { + // take + println!("TAKE {server_name}"); + let expecting = cache_1 + .iter() + .enumerate() + .find(|&(_, &(server_name_2, _))| server_name_2 == server_name) + .map(|(j, _)| j) + .map(|j| { + let (_, mut queue) = cache_1.remove(j); + let token = queue.pop_front().unwrap(); + if !queue.is_empty() { + cache_1.push((server_name, queue)); + } + token + }); + println!("EXPECTING {expecting:?}"); + assert_eq!(cache_2.take(&server_name.to_string()), expecting); + } + } + } + } + + #[test] + fn zero_max_server_names() { + // test that this edge case doesn't panic + let cache = TokenMemoryCache::new(0, 2); + for i in 0..10 { + cache.insert(&i.to_string(), Bytes::from(vec![i])); + for j in 0..10 { + assert!(cache.take(&j.to_string()).is_none()); + } + } + } + + #[test] + fn zero_queue_length() { + // test that this edge case doesn't panic + let cache = TokenMemoryCache::new(256, 0); + for i in 0..10 { + cache.insert(&i.to_string(), Bytes::from(vec![i])); + for j in 0..10 { + assert!(cache.take(&j.to_string()).is_none()); + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_error.rs new file mode 100644 index 0000000000000000000000000000000000000000..047cd0acc1e715c8d160f93031c7bae07153f8ad --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_error.rs @@ -0,0 +1,132 @@ +use std::fmt; + +use bytes::{Buf, BufMut}; + +use crate::{ + coding::{self, BufExt, BufMutExt}, + frame, +}; + +/// Transport-level errors occur when a peer violates the protocol specification +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Error { + /// Type of error + pub code: Code, + /// Frame type that triggered the error + pub frame: Option, + /// Human-readable explanation of the reason + pub reason: String, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.code.fmt(f)?; + if let Some(frame) = self.frame { + write!(f, " in {frame}")?; + } + if !self.reason.is_empty() { + write!(f, ": {}", self.reason)?; + } + Ok(()) + } +} + +impl std::error::Error for Error {} + +impl From for Error { + fn from(x: Code) -> Self { + Self { + code: x, + frame: None, + reason: "".to_string(), + } + } +} + +/// Transport-level error code +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Code(u64); + +impl Code { + /// Create QUIC error code from TLS alert code + pub fn crypto(code: u8) -> Self { + Self(0x100 | u64::from(code)) + } +} + +impl coding::Codec for Code { + fn decode(buf: &mut B) -> coding::Result { + Ok(Self(buf.get_var()?)) + } + fn encode(&self, buf: &mut B) { + buf.write_var(self.0) + } +} + +impl From for u64 { + fn from(x: Code) -> Self { + x.0 + } +} + +macro_rules! errors { + {$($name:ident($val:expr) $desc:expr;)*} => { + #[allow(non_snake_case, unused)] + impl Error { + $( + pub(crate) fn $name(reason: T) -> Self where T: Into { + Self { + code: Code::$name, + frame: None, + reason: reason.into(), + } + } + )* + } + + impl Code { + $(#[doc = $desc] pub const $name: Self = Code($val);)* + } + + impl fmt::Debug for Code { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + $($val => f.write_str(stringify!($name)),)* + x if (0x100..0x200).contains(&x) => write!(f, "Code::crypto({:02x})", self.0 as u8), + _ => write!(f, "Code({:x})", self.0), + } + } + } + + impl fmt::Display for Code { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + $($val => f.write_str($desc),)* + // We're trying to be abstract over the crypto protocol, so human-readable descriptions here is tricky. + _ if self.0 >= 0x100 && self.0 < 0x200 => write!(f, "the cryptographic handshake failed: error {}", self.0 & 0xFF), + _ => f.write_str("unknown error"), + } + } + } + } +} + +errors! { + NO_ERROR(0x0) "the connection is being closed abruptly in the absence of any error"; + INTERNAL_ERROR(0x1) "the endpoint encountered an internal error and cannot continue with the connection"; + CONNECTION_REFUSED(0x2) "the server refused to accept a new connection"; + FLOW_CONTROL_ERROR(0x3) "received more data than permitted in advertised data limits"; + STREAM_LIMIT_ERROR(0x4) "received a frame for a stream identifier that exceeded advertised the stream limit for the corresponding stream type"; + STREAM_STATE_ERROR(0x5) "received a frame for a stream that was not in a state that permitted that frame"; + FINAL_SIZE_ERROR(0x6) "received a STREAM frame or a RESET_STREAM frame containing a different final size to the one already established"; + FRAME_ENCODING_ERROR(0x7) "received a frame that was badly formatted"; + TRANSPORT_PARAMETER_ERROR(0x8) "received transport parameters that were badly formatted, included an invalid value, was absent even though it is mandatory, was present though it is forbidden, or is otherwise in error"; + CONNECTION_ID_LIMIT_ERROR(0x9) "the number of connection IDs provided by the peer exceeds the advertised active_connection_id_limit"; + PROTOCOL_VIOLATION(0xA) "detected an error with protocol compliance that was not covered by more specific error codes"; + INVALID_TOKEN(0xB) "received an invalid Retry Token in a client Initial"; + APPLICATION_ERROR(0xC) "the application or application protocol caused the connection to be closed during the handshake"; + CRYPTO_BUFFER_EXCEEDED(0xD) "received more data in CRYPTO frames than can be buffered"; + KEY_UPDATE_ERROR(0xE) "key update error"; + AEAD_LIMIT_REACHED(0xF) "the endpoint has reached the confidentiality or integrity limit for the AEAD algorithm"; + NO_VIABLE_PATH(0x10) "no viable network path exists"; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_parameters.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_parameters.rs new file mode 100644 index 0000000000000000000000000000000000000000..9fb081812d5f55791ae67ada6eb22dfa2a5157cd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/transport_parameters.rs @@ -0,0 +1,876 @@ +//! QUIC connection transport parameters +//! +//! The `TransportParameters` type is used to represent the transport parameters +//! negotiated by peers while establishing a QUIC connection. This process +//! happens as part of the establishment of the TLS session. As such, the types +//! contained in this modules should generally only be referred to by custom +//! implementations of the `crypto::Session` trait. + +use std::{ + convert::TryFrom, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, +}; + +use bytes::{Buf, BufMut}; +use rand::{Rng as _, RngCore, seq::SliceRandom as _}; +use thiserror::Error; + +use crate::{ + LOC_CID_COUNT, MAX_CID_SIZE, MAX_STREAM_COUNT, RESET_TOKEN_SIZE, ResetToken, Side, + TIMER_GRANULARITY, TransportError, VarInt, + cid_generator::ConnectionIdGenerator, + cid_queue::CidQueue, + coding::{BufExt, BufMutExt, UnexpectedEnd}, + config::{EndpointConfig, ServerConfig, TransportConfig}, + shared::ConnectionId, +}; + +// Apply a given macro to a list of all the transport parameters having integer types, along with +// their codes and default values. Using this helps us avoid error-prone duplication of the +// contained information across decoding, encoding, and the `Default` impl. Whenever we want to do +// something with transport parameters, we'll handle the bulk of cases by writing a macro that +// takes a list of arguments in this form, then passing it to this macro. +macro_rules! apply_params { + ($macro:ident) => { + $macro! { + // #[doc] name (id) = default, + /// Milliseconds, disabled if zero + max_idle_timeout(MaxIdleTimeout) = 0, + /// Limits the size of UDP payloads that the endpoint is willing to receive + max_udp_payload_size(MaxUdpPayloadSize) = 65527, + + /// Initial value for the maximum amount of data that can be sent on the connection + initial_max_data(InitialMaxData) = 0, + /// Initial flow control limit for locally-initiated bidirectional streams + initial_max_stream_data_bidi_local(InitialMaxStreamDataBidiLocal) = 0, + /// Initial flow control limit for peer-initiated bidirectional streams + initial_max_stream_data_bidi_remote(InitialMaxStreamDataBidiRemote) = 0, + /// Initial flow control limit for unidirectional streams + initial_max_stream_data_uni(InitialMaxStreamDataUni) = 0, + + /// Initial maximum number of bidirectional streams the peer may initiate + initial_max_streams_bidi(InitialMaxStreamsBidi) = 0, + /// Initial maximum number of unidirectional streams the peer may initiate + initial_max_streams_uni(InitialMaxStreamsUni) = 0, + + /// Exponent used to decode the ACK Delay field in the ACK frame + ack_delay_exponent(AckDelayExponent) = 3, + /// Maximum amount of time in milliseconds by which the endpoint will delay sending + /// acknowledgments + max_ack_delay(MaxAckDelay) = 25, + /// Maximum number of connection IDs from the peer that an endpoint is willing to store + active_connection_id_limit(ActiveConnectionIdLimit) = 2, + } + }; +} + +macro_rules! make_struct { + {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => { + /// Transport parameters used to negotiate connection-level preferences between peers + #[derive(Debug, Copy, Clone, Eq, PartialEq)] + pub struct TransportParameters { + $($(#[$doc])* pub(crate) $name : VarInt,)* + + /// Does the endpoint support active connection migration + pub(crate) disable_active_migration: bool, + /// Maximum size for datagram frames + pub(crate) max_datagram_frame_size: Option, + /// The value that the endpoint included in the Source Connection ID field of the first + /// Initial packet it sends for the connection + pub(crate) initial_src_cid: Option, + /// The endpoint is willing to receive QUIC packets containing any value for the fixed + /// bit + pub(crate) grease_quic_bit: bool, + + /// Minimum amount of time in microseconds by which the endpoint is able to delay + /// sending acknowledgments + /// + /// If a value is provided, it implies that the endpoint supports QUIC Acknowledgement + /// Frequency + pub(crate) min_ack_delay: Option, + + // Server-only + /// The value of the Destination Connection ID field from the first Initial packet sent + /// by the client + pub(crate) original_dst_cid: Option, + /// The value that the server included in the Source Connection ID field of a Retry + /// packet + pub(crate) retry_src_cid: Option, + /// Token used by the client to verify a stateless reset from the server + pub(crate) stateless_reset_token: Option, + /// The server's preferred address for communication after handshake completion + pub(crate) preferred_address: Option, + /// The randomly generated reserved transport parameter to sustain future extensibility + /// of transport parameter extensions. + /// When present, it is included during serialization but ignored during deserialization. + pub(crate) grease_transport_parameter: Option, + + /// Defines the order in which transport parameters are serialized. + /// + /// This field is initialized only for outgoing `TransportParameters` instances and + /// is set to `None` for `TransportParameters` received from a peer. + pub(crate) write_order: Option<[u8; TransportParameterId::SUPPORTED.len()]>, + } + + // We deliberately don't implement the `Default` trait, since that would be public, and + // downstream crates should never construct `TransportParameters` except by decoding those + // supplied by a peer. + impl TransportParameters { + /// Standard defaults, used if the peer does not supply a given parameter. + pub(crate) fn default() -> Self { + Self { + $($name: VarInt::from_u32($default),)* + + disable_active_migration: false, + max_datagram_frame_size: None, + initial_src_cid: None, + grease_quic_bit: false, + min_ack_delay: None, + + original_dst_cid: None, + retry_src_cid: None, + stateless_reset_token: None, + preferred_address: None, + grease_transport_parameter: None, + write_order: None, + } + } + } + } +} + +apply_params!(make_struct); + +impl TransportParameters { + pub(crate) fn new( + config: &TransportConfig, + endpoint_config: &EndpointConfig, + cid_gen: &dyn ConnectionIdGenerator, + initial_src_cid: ConnectionId, + server_config: Option<&ServerConfig>, + rng: &mut impl RngCore, + ) -> Self { + Self { + initial_src_cid: Some(initial_src_cid), + initial_max_streams_bidi: config.max_concurrent_bidi_streams, + initial_max_streams_uni: config.max_concurrent_uni_streams, + initial_max_data: config.receive_window, + initial_max_stream_data_bidi_local: config.stream_receive_window, + initial_max_stream_data_bidi_remote: config.stream_receive_window, + initial_max_stream_data_uni: config.stream_receive_window, + max_udp_payload_size: endpoint_config.max_udp_payload_size, + max_idle_timeout: config.max_idle_timeout.unwrap_or(VarInt(0)), + disable_active_migration: server_config.is_some_and(|c| !c.migration), + active_connection_id_limit: if cid_gen.cid_len() == 0 { + 2 // i.e. default, i.e. unsent + } else { + CidQueue::LEN as u32 + } + .into(), + max_datagram_frame_size: config + .datagram_receive_buffer_size + .map(|x| (x.min(u16::MAX.into()) as u16).into()), + grease_quic_bit: endpoint_config.grease_quic_bit, + min_ack_delay: Some( + VarInt::from_u64(u64::try_from(TIMER_GRANULARITY.as_micros()).unwrap()).unwrap(), + ), + grease_transport_parameter: Some(ReservedTransportParameter::random(rng)), + write_order: Some({ + let mut order = std::array::from_fn(|i| i as u8); + order.shuffle(rng); + order + }), + ..Self::default() + } + } + + /// Check that these parameters are legal when resuming from + /// certain cached parameters + pub(crate) fn validate_resumption_from(&self, cached: &Self) -> Result<(), TransportError> { + if cached.active_connection_id_limit > self.active_connection_id_limit + || cached.initial_max_data > self.initial_max_data + || cached.initial_max_stream_data_bidi_local > self.initial_max_stream_data_bidi_local + || cached.initial_max_stream_data_bidi_remote > self.initial_max_stream_data_bidi_remote + || cached.initial_max_stream_data_uni > self.initial_max_stream_data_uni + || cached.initial_max_streams_bidi > self.initial_max_streams_bidi + || cached.initial_max_streams_uni > self.initial_max_streams_uni + || cached.max_datagram_frame_size > self.max_datagram_frame_size + || cached.grease_quic_bit && !self.grease_quic_bit + { + return Err(TransportError::PROTOCOL_VIOLATION( + "0-RTT accepted with incompatible transport parameters", + )); + } + Ok(()) + } + + /// Maximum number of CIDs to issue to this peer + /// + /// Consider both a) the active_connection_id_limit from the other end; and + /// b) LOC_CID_COUNT used locally + pub(crate) fn issue_cids_limit(&self) -> u64 { + self.active_connection_id_limit.0.min(LOC_CID_COUNT) + } +} + +/// A server's preferred address +/// +/// This is communicated as a transport parameter during TLS session establishment. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct PreferredAddress { + pub(crate) address_v4: Option, + pub(crate) address_v6: Option, + pub(crate) connection_id: ConnectionId, + pub(crate) stateless_reset_token: ResetToken, +} + +impl PreferredAddress { + fn wire_size(&self) -> u16 { + 4 + 2 + 16 + 2 + 1 + self.connection_id.len() as u16 + 16 + } + + fn write(&self, w: &mut W) { + w.write(self.address_v4.map_or(Ipv4Addr::UNSPECIFIED, |x| *x.ip())); + w.write::(self.address_v4.map_or(0, |x| x.port())); + w.write(self.address_v6.map_or(Ipv6Addr::UNSPECIFIED, |x| *x.ip())); + w.write::(self.address_v6.map_or(0, |x| x.port())); + w.write::(self.connection_id.len() as u8); + w.put_slice(&self.connection_id); + w.put_slice(&self.stateless_reset_token); + } + + fn read(r: &mut R) -> Result { + let ip_v4 = r.get::()?; + let port_v4 = r.get::()?; + let ip_v6 = r.get::()?; + let port_v6 = r.get::()?; + let cid_len = r.get::()?; + if r.remaining() < cid_len as usize || cid_len > MAX_CID_SIZE as u8 { + return Err(Error::Malformed); + } + let mut stage = [0; MAX_CID_SIZE]; + r.copy_to_slice(&mut stage[0..cid_len as usize]); + let cid = ConnectionId::new(&stage[0..cid_len as usize]); + if r.remaining() < 16 { + return Err(Error::Malformed); + } + let mut token = [0; RESET_TOKEN_SIZE]; + r.copy_to_slice(&mut token); + let address_v4 = if ip_v4.is_unspecified() && port_v4 == 0 { + None + } else { + Some(SocketAddrV4::new(ip_v4, port_v4)) + }; + let address_v6 = if ip_v6.is_unspecified() && port_v6 == 0 { + None + } else { + Some(SocketAddrV6::new(ip_v6, port_v6, 0, 0)) + }; + if address_v4.is_none() && address_v6.is_none() { + return Err(Error::IllegalValue); + } + Ok(Self { + address_v4, + address_v6, + connection_id: cid, + stateless_reset_token: token.into(), + }) + } +} + +/// Errors encountered while decoding `TransportParameters` +#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)] +pub enum Error { + /// Parameters that are semantically invalid + #[error("parameter had illegal value")] + IllegalValue, + /// Catch-all error for problems while decoding transport parameters + #[error("parameters were malformed")] + Malformed, +} + +impl From for TransportError { + fn from(e: Error) -> Self { + match e { + Error::IllegalValue => Self::TRANSPORT_PARAMETER_ERROR("illegal value"), + Error::Malformed => Self::TRANSPORT_PARAMETER_ERROR("malformed"), + } + } +} + +impl From for Error { + fn from(_: UnexpectedEnd) -> Self { + Self::Malformed + } +} + +impl TransportParameters { + /// Encode `TransportParameters` into buffer + pub fn write(&self, w: &mut W) { + for idx in self + .write_order + .as_ref() + .unwrap_or(&std::array::from_fn(|i| i as u8)) + { + let id = TransportParameterId::SUPPORTED[*idx as usize]; + match id { + TransportParameterId::ReservedTransportParameter => { + if let Some(param) = self.grease_transport_parameter { + param.write(w); + } + } + TransportParameterId::StatelessResetToken => { + if let Some(ref x) = self.stateless_reset_token { + w.write_var(id as u64); + w.write_var(16); + w.put_slice(x); + } + } + TransportParameterId::DisableActiveMigration => { + if self.disable_active_migration { + w.write_var(id as u64); + w.write_var(0); + } + } + TransportParameterId::MaxDatagramFrameSize => { + if let Some(x) = self.max_datagram_frame_size { + w.write_var(id as u64); + w.write_var(x.size() as u64); + w.write(x); + } + } + TransportParameterId::PreferredAddress => { + if let Some(ref x) = self.preferred_address { + w.write_var(id as u64); + w.write_var(x.wire_size() as u64); + x.write(w); + } + } + TransportParameterId::OriginalDestinationConnectionId => { + if let Some(ref cid) = self.original_dst_cid { + w.write_var(id as u64); + w.write_var(cid.len() as u64); + w.put_slice(cid); + } + } + TransportParameterId::InitialSourceConnectionId => { + if let Some(ref cid) = self.initial_src_cid { + w.write_var(id as u64); + w.write_var(cid.len() as u64); + w.put_slice(cid); + } + } + TransportParameterId::RetrySourceConnectionId => { + if let Some(ref cid) = self.retry_src_cid { + w.write_var(id as u64); + w.write_var(cid.len() as u64); + w.put_slice(cid); + } + } + TransportParameterId::GreaseQuicBit => { + if self.grease_quic_bit { + w.write_var(id as u64); + w.write_var(0); + } + } + TransportParameterId::MinAckDelayDraft07 => { + if let Some(x) = self.min_ack_delay { + w.write_var(id as u64); + w.write_var(x.size() as u64); + w.write(x); + } + } + id => { + macro_rules! write_params { + {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => { + match id { + $(TransportParameterId::$id => { + if self.$name.0 != $default { + w.write_var(id as u64); + w.write(VarInt::try_from(self.$name.size()).unwrap()); + w.write(self.$name); + } + })*, + _ => { + unimplemented!("Missing implementation of write for transport parameter with code {id:?}"); + } + } + } + } + apply_params!(write_params); + } + } + } + } + + /// Decode `TransportParameters` from buffer + pub fn read(side: Side, r: &mut R) -> Result { + // Initialize to protocol-specified defaults + let mut params = Self::default(); + + // State to check for duplicate transport parameters. + macro_rules! param_state { + {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => {{ + struct ParamState { + $($name: bool,)* + } + + ParamState { + $($name: false,)* + } + }} + } + let mut got = apply_params!(param_state); + + while r.has_remaining() { + let id = r.get_var()?; + let len = r.get_var()?; + if (r.remaining() as u64) < len { + return Err(Error::Malformed); + } + let len = len as usize; + let Ok(id) = TransportParameterId::try_from(id) else { + // unknown transport parameters are ignored + r.advance(len); + continue; + }; + + match id { + TransportParameterId::OriginalDestinationConnectionId => { + decode_cid(len, &mut params.original_dst_cid, r)? + } + TransportParameterId::StatelessResetToken => { + if len != 16 || params.stateless_reset_token.is_some() { + return Err(Error::Malformed); + } + let mut tok = [0; RESET_TOKEN_SIZE]; + r.copy_to_slice(&mut tok); + params.stateless_reset_token = Some(tok.into()); + } + TransportParameterId::DisableActiveMigration => { + if len != 0 || params.disable_active_migration { + return Err(Error::Malformed); + } + params.disable_active_migration = true; + } + TransportParameterId::PreferredAddress => { + if params.preferred_address.is_some() { + return Err(Error::Malformed); + } + params.preferred_address = Some(PreferredAddress::read(&mut r.take(len))?); + } + TransportParameterId::InitialSourceConnectionId => { + decode_cid(len, &mut params.initial_src_cid, r)? + } + TransportParameterId::RetrySourceConnectionId => { + decode_cid(len, &mut params.retry_src_cid, r)? + } + TransportParameterId::MaxDatagramFrameSize => { + if len > 8 || params.max_datagram_frame_size.is_some() { + return Err(Error::Malformed); + } + params.max_datagram_frame_size = Some(r.get().unwrap()); + } + TransportParameterId::GreaseQuicBit => match len { + 0 => params.grease_quic_bit = true, + _ => return Err(Error::Malformed), + }, + TransportParameterId::MinAckDelayDraft07 => { + params.min_ack_delay = Some(r.get().unwrap()) + } + _ => { + macro_rules! parse { + {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => { + match id { + $(TransportParameterId::$id => { + let value = r.get::()?; + if len != value.size() || got.$name { return Err(Error::Malformed); } + params.$name = value.into(); + got.$name = true; + })* + _ => r.advance(len), + } + } + } + apply_params!(parse); + } + } + } + + // Semantic validation + + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-4.26.1 + if params.ack_delay_exponent.0 > 20 + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-4.28.1 + || params.max_ack_delay.0 >= 1 << 14 + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-6.2.1 + || params.active_connection_id_limit.0 < 2 + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-4.10.1 + || params.max_udp_payload_size.0 < 1200 + // https://www.rfc-editor.org/rfc/rfc9000.html#section-4.6-2 + || params.initial_max_streams_bidi.0 > MAX_STREAM_COUNT + || params.initial_max_streams_uni.0 > MAX_STREAM_COUNT + // https://www.ietf.org/archive/id/draft-ietf-quic-ack-frequency-08.html#section-3-4 + || params.min_ack_delay.is_some_and(|min_ack_delay| { + // min_ack_delay uses microseconds, whereas max_ack_delay uses milliseconds + min_ack_delay.0 > params.max_ack_delay.0 * 1_000 + }) + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-8 + || (side.is_server() + && (params.original_dst_cid.is_some() + || params.preferred_address.is_some() + || params.retry_src_cid.is_some() + || params.stateless_reset_token.is_some())) + // https://www.rfc-editor.org/rfc/rfc9000.html#section-18.2-4.38.1 + || params + .preferred_address.is_some_and(|x| x.connection_id.is_empty()) + { + return Err(Error::IllegalValue); + } + + Ok(params) + } +} + +/// A reserved transport parameter. +/// +/// It has an identifier of the form 31 * N + 27 for the integer value of N. +/// Such identifiers are reserved to exercise the requirement that unknown transport parameters be ignored. +/// The reserved transport parameter has no semantics and can carry arbitrary values. +/// It may be included in transport parameters sent to the peer, and should be ignored when received. +/// +/// See spec: +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct ReservedTransportParameter { + /// The reserved identifier of the transport parameter + id: VarInt, + + /// Buffer to store the parameter payload + payload: [u8; Self::MAX_PAYLOAD_LEN], + + /// The number of bytes to include in the wire format from the `payload` buffer + payload_len: usize, +} + +impl ReservedTransportParameter { + /// Generates a transport parameter with a random payload and a reserved ID. + /// + /// The implementation is inspired by quic-go and quiche: + /// 1. + /// 2. + fn random(rng: &mut impl RngCore) -> Self { + let id = Self::generate_reserved_id(rng); + + let payload_len = rng.random_range(0..Self::MAX_PAYLOAD_LEN); + + let payload = { + let mut slice = [0u8; Self::MAX_PAYLOAD_LEN]; + rng.fill_bytes(&mut slice[..payload_len]); + slice + }; + + Self { + id, + payload, + payload_len, + } + } + + fn write(&self, w: &mut impl BufMut) { + w.write_var(self.id.0); + w.write_var(self.payload_len as u64); + w.put_slice(&self.payload[..self.payload_len]); + } + + /// Generates a random reserved identifier of the form `31 * N + 27`, as required by RFC 9000. + /// Reserved transport parameter identifiers are used to test compliance with the requirement + /// that unknown transport parameters must be ignored by peers. + /// See: and + fn generate_reserved_id(rng: &mut impl RngCore) -> VarInt { + let id = { + let rand = rng.random_range(0u64..(1 << 62) - 27); + let n = rand / 31; + 31 * n + 27 + }; + debug_assert!( + id % 31 == 27, + "generated id does not have the form of 31 * N + 27" + ); + VarInt::from_u64(id).expect( + "generated id does fit into range of allowed transport parameter IDs: [0; 2^62)", + ) + } + + /// The maximum length of the payload to include as the parameter payload. + /// This value is not a specification-imposed limit but is chosen to match + /// the limit used by other implementations of QUIC, e.g., quic-go and quiche. + const MAX_PAYLOAD_LEN: usize = 16; +} + +#[repr(u64)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum TransportParameterId { + // https://www.rfc-editor.org/rfc/rfc9000.html#iana-tp-table + OriginalDestinationConnectionId = 0x00, + MaxIdleTimeout = 0x01, + StatelessResetToken = 0x02, + MaxUdpPayloadSize = 0x03, + InitialMaxData = 0x04, + InitialMaxStreamDataBidiLocal = 0x05, + InitialMaxStreamDataBidiRemote = 0x06, + InitialMaxStreamDataUni = 0x07, + InitialMaxStreamsBidi = 0x08, + InitialMaxStreamsUni = 0x09, + AckDelayExponent = 0x0A, + MaxAckDelay = 0x0B, + DisableActiveMigration = 0x0C, + PreferredAddress = 0x0D, + ActiveConnectionIdLimit = 0x0E, + InitialSourceConnectionId = 0x0F, + RetrySourceConnectionId = 0x10, + + // Smallest possible ID of reserved transport parameter https://datatracker.ietf.org/doc/html/rfc9000#section-22.3 + ReservedTransportParameter = 0x1B, + + // https://www.rfc-editor.org/rfc/rfc9221.html#section-3 + MaxDatagramFrameSize = 0x20, + + // https://www.rfc-editor.org/rfc/rfc9287.html#section-3 + GreaseQuicBit = 0x2AB2, + + // https://datatracker.ietf.org/doc/html/draft-ietf-quic-ack-frequency#section-10.1 + MinAckDelayDraft07 = 0xFF04DE1B, +} + +impl TransportParameterId { + /// Array with all supported transport parameter IDs + const SUPPORTED: [Self; 21] = [ + Self::MaxIdleTimeout, + Self::MaxUdpPayloadSize, + Self::InitialMaxData, + Self::InitialMaxStreamDataBidiLocal, + Self::InitialMaxStreamDataBidiRemote, + Self::InitialMaxStreamDataUni, + Self::InitialMaxStreamsBidi, + Self::InitialMaxStreamsUni, + Self::AckDelayExponent, + Self::MaxAckDelay, + Self::ActiveConnectionIdLimit, + Self::ReservedTransportParameter, + Self::StatelessResetToken, + Self::DisableActiveMigration, + Self::MaxDatagramFrameSize, + Self::PreferredAddress, + Self::OriginalDestinationConnectionId, + Self::InitialSourceConnectionId, + Self::RetrySourceConnectionId, + Self::GreaseQuicBit, + Self::MinAckDelayDraft07, + ]; +} + +impl std::cmp::PartialEq for TransportParameterId { + fn eq(&self, other: &u64) -> bool { + *other == (*self as u64) + } +} + +impl TryFrom for TransportParameterId { + type Error = (); + + fn try_from(value: u64) -> Result { + let param = match value { + id if Self::MaxIdleTimeout == id => Self::MaxIdleTimeout, + id if Self::MaxUdpPayloadSize == id => Self::MaxUdpPayloadSize, + id if Self::InitialMaxData == id => Self::InitialMaxData, + id if Self::InitialMaxStreamDataBidiLocal == id => Self::InitialMaxStreamDataBidiLocal, + id if Self::InitialMaxStreamDataBidiRemote == id => { + Self::InitialMaxStreamDataBidiRemote + } + id if Self::InitialMaxStreamDataUni == id => Self::InitialMaxStreamDataUni, + id if Self::InitialMaxStreamsBidi == id => Self::InitialMaxStreamsBidi, + id if Self::InitialMaxStreamsUni == id => Self::InitialMaxStreamsUni, + id if Self::AckDelayExponent == id => Self::AckDelayExponent, + id if Self::MaxAckDelay == id => Self::MaxAckDelay, + id if Self::ActiveConnectionIdLimit == id => Self::ActiveConnectionIdLimit, + id if Self::ReservedTransportParameter == id => Self::ReservedTransportParameter, + id if Self::StatelessResetToken == id => Self::StatelessResetToken, + id if Self::DisableActiveMigration == id => Self::DisableActiveMigration, + id if Self::MaxDatagramFrameSize == id => Self::MaxDatagramFrameSize, + id if Self::PreferredAddress == id => Self::PreferredAddress, + id if Self::OriginalDestinationConnectionId == id => { + Self::OriginalDestinationConnectionId + } + id if Self::InitialSourceConnectionId == id => Self::InitialSourceConnectionId, + id if Self::RetrySourceConnectionId == id => Self::RetrySourceConnectionId, + id if Self::GreaseQuicBit == id => Self::GreaseQuicBit, + id if Self::MinAckDelayDraft07 == id => Self::MinAckDelayDraft07, + _ => return Err(()), + }; + Ok(param) + } +} + +fn decode_cid(len: usize, value: &mut Option, r: &mut impl Buf) -> Result<(), Error> { + if len > MAX_CID_SIZE || value.is_some() || r.remaining() < len { + return Err(Error::Malformed); + } + + *value = Some(ConnectionId::from_buf(r, len)); + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn coding() { + let mut buf = Vec::new(); + let params = TransportParameters { + initial_src_cid: Some(ConnectionId::new(&[])), + original_dst_cid: Some(ConnectionId::new(&[])), + initial_max_streams_bidi: 16u32.into(), + initial_max_streams_uni: 16u32.into(), + ack_delay_exponent: 2u32.into(), + max_udp_payload_size: 1200u32.into(), + preferred_address: Some(PreferredAddress { + address_v4: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 42)), + address_v6: Some(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 24, 0, 0)), + connection_id: ConnectionId::new(&[0x42]), + stateless_reset_token: [0xab; RESET_TOKEN_SIZE].into(), + }), + grease_quic_bit: true, + min_ack_delay: Some(2_000u32.into()), + ..TransportParameters::default() + }; + params.write(&mut buf); + assert_eq!( + TransportParameters::read(Side::Client, &mut buf.as_slice()).unwrap(), + params + ); + } + + #[test] + fn reserved_transport_parameter_generate_reserved_id() { + let mut rngs = [ + StepRng(0), + StepRng(1), + StepRng(27), + StepRng(31), + StepRng(u32::MAX as u64), + StepRng(u32::MAX as u64 - 1), + StepRng(u32::MAX as u64 + 1), + StepRng(u32::MAX as u64 - 27), + StepRng(u32::MAX as u64 + 27), + StepRng(u32::MAX as u64 - 31), + StepRng(u32::MAX as u64 + 31), + StepRng(u64::MAX), + StepRng(u64::MAX - 1), + StepRng(u64::MAX - 27), + StepRng(u64::MAX - 31), + StepRng(1 << 62), + StepRng((1 << 62) - 1), + StepRng((1 << 62) + 1), + StepRng((1 << 62) - 27), + StepRng((1 << 62) + 27), + StepRng((1 << 62) - 31), + StepRng((1 << 62) + 31), + ]; + for rng in &mut rngs { + let id = ReservedTransportParameter::generate_reserved_id(rng); + assert!(id.0 % 31 == 27) + } + } + + struct StepRng(u64); + + impl RngCore for StepRng { + #[inline] + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + #[inline] + fn next_u64(&mut self) -> u64 { + let res = self.0; + self.0 = self.0.wrapping_add(1); + res + } + + #[inline] + fn fill_bytes(&mut self, dst: &mut [u8]) { + let mut left = dst; + while left.len() >= 8 { + let (l, r) = left.split_at_mut(8); + left = r; + l.copy_from_slice(&self.next_u64().to_le_bytes()); + } + let n = left.len(); + if n > 0 { + left.copy_from_slice(&self.next_u32().to_le_bytes()[..n]); + } + } + } + + #[test] + fn reserved_transport_parameter_ignored_when_read() { + let mut buf = Vec::new(); + let reserved_parameter = ReservedTransportParameter::random(&mut rand::rng()); + assert!(reserved_parameter.payload_len < ReservedTransportParameter::MAX_PAYLOAD_LEN); + assert!(reserved_parameter.id.0 % 31 == 27); + + reserved_parameter.write(&mut buf); + assert!(!buf.is_empty()); + let read_params = TransportParameters::read(Side::Server, &mut buf.as_slice()).unwrap(); + assert_eq!(read_params, TransportParameters::default()); + } + + #[test] + fn read_semantic_validation() { + #[allow(clippy::type_complexity)] + let illegal_params_builders: Vec> = vec![ + Box::new(|t| { + // This min_ack_delay is bigger than max_ack_delay! + let min_ack_delay = t.max_ack_delay.0 * 1_000 + 1; + t.min_ack_delay = Some(VarInt::from_u64(min_ack_delay).unwrap()) + }), + Box::new(|t| { + // Preferred address can only be sent by senders (and we are reading the transport + // params as a client) + t.preferred_address = Some(PreferredAddress { + address_v4: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 42)), + address_v6: None, + connection_id: ConnectionId::new(&[]), + stateless_reset_token: [0xab; RESET_TOKEN_SIZE].into(), + }) + }), + ]; + + for mut builder in illegal_params_builders { + let mut buf = Vec::new(); + let mut params = TransportParameters::default(); + builder(&mut params); + params.write(&mut buf); + + assert_eq!( + TransportParameters::read(Side::Server, &mut buf.as_slice()), + Err(Error::IllegalValue) + ); + } + } + + #[test] + fn resumption_params_validation() { + let high_limit = TransportParameters { + initial_max_streams_uni: 32u32.into(), + ..TransportParameters::default() + }; + let low_limit = TransportParameters { + initial_max_streams_uni: 16u32.into(), + ..TransportParameters::default() + }; + high_limit.validate_resumption_from(&low_limit).unwrap(); + low_limit.validate_resumption_from(&high_limit).unwrap_err(); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/varint.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/varint.rs new file mode 100644 index 0000000000000000000000000000000000000000..220228fd053ceb83fca6659a94e48f33d67314d7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/quinn-proto-0.11.13/src/varint.rs @@ -0,0 +1,193 @@ +use std::{convert::TryInto, fmt}; + +use bytes::{Buf, BufMut}; +use thiserror::Error; + +use crate::coding::{self, Codec, UnexpectedEnd}; + +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; + +/// An integer less than 2^62 +/// +/// Values of this type are suitable for encoding as QUIC variable-length integer. +// It would be neat if we could express to Rust that the top two bits are available for use as enum +// discriminants +#[derive(Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct VarInt(pub(crate) u64); + +impl VarInt { + /// The largest representable value + pub const MAX: Self = Self((1 << 62) - 1); + /// The largest encoded value length + pub const MAX_SIZE: usize = 8; + + /// Construct a `VarInt` infallibly + pub const fn from_u32(x: u32) -> Self { + Self(x as u64) + } + + /// Succeeds iff `x` < 2^62 + pub fn from_u64(x: u64) -> Result { + if x < 2u64.pow(62) { + Ok(Self(x)) + } else { + Err(VarIntBoundsExceeded) + } + } + + /// Create a VarInt without ensuring it's in range + /// + /// # Safety + /// + /// `x` must be less than 2^62. + pub const unsafe fn from_u64_unchecked(x: u64) -> Self { + Self(x) + } + + /// Extract the integer value + pub const fn into_inner(self) -> u64 { + self.0 + } + + /// Compute the number of bytes needed to encode this value + pub(crate) const fn size(self) -> usize { + let x = self.0; + if x < 2u64.pow(6) { + 1 + } else if x < 2u64.pow(14) { + 2 + } else if x < 2u64.pow(30) { + 4 + } else if x < 2u64.pow(62) { + 8 + } else { + panic!("malformed VarInt"); + } + } +} + +impl From for u64 { + fn from(x: VarInt) -> Self { + x.0 + } +} + +impl From for VarInt { + fn from(x: u8) -> Self { + Self(x.into()) + } +} + +impl From for VarInt { + fn from(x: u16) -> Self { + Self(x.into()) + } +} + +impl From for VarInt { + fn from(x: u32) -> Self { + Self(x.into()) + } +} + +impl std::convert::TryFrom for VarInt { + type Error = VarIntBoundsExceeded; + /// Succeeds iff `x` < 2^62 + fn try_from(x: u64) -> Result { + Self::from_u64(x) + } +} + +impl std::convert::TryFrom for VarInt { + type Error = VarIntBoundsExceeded; + /// Succeeds iff `x` < 2^62 + fn try_from(x: u128) -> Result { + Self::from_u64(x.try_into().map_err(|_| VarIntBoundsExceeded)?) + } +} + +impl std::convert::TryFrom for VarInt { + type Error = VarIntBoundsExceeded; + /// Succeeds iff `x` < 2^62 + fn try_from(x: usize) -> Result { + Self::try_from(x as u64) + } +} + +impl fmt::Debug for VarInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Display for VarInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +#[cfg(feature = "arbitrary")] +impl<'arbitrary> Arbitrary<'arbitrary> for VarInt { + fn arbitrary(u: &mut arbitrary::Unstructured<'arbitrary>) -> arbitrary::Result { + Ok(Self(u.int_in_range(0..=Self::MAX.0)?)) + } +} + +/// Error returned when constructing a `VarInt` from a value >= 2^62 +#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)] +#[error("value too large for varint encoding")] +pub struct VarIntBoundsExceeded; + +impl Codec for VarInt { + fn decode(r: &mut B) -> coding::Result { + if !r.has_remaining() { + return Err(UnexpectedEnd); + } + let mut buf = [0; 8]; + buf[0] = r.get_u8(); + let tag = buf[0] >> 6; + buf[0] &= 0b0011_1111; + let x = match tag { + 0b00 => u64::from(buf[0]), + 0b01 => { + if r.remaining() < 1 { + return Err(UnexpectedEnd); + } + r.copy_to_slice(&mut buf[1..2]); + u64::from(u16::from_be_bytes(buf[..2].try_into().unwrap())) + } + 0b10 => { + if r.remaining() < 3 { + return Err(UnexpectedEnd); + } + r.copy_to_slice(&mut buf[1..4]); + u64::from(u32::from_be_bytes(buf[..4].try_into().unwrap())) + } + 0b11 => { + if r.remaining() < 7 { + return Err(UnexpectedEnd); + } + r.copy_to_slice(&mut buf[1..8]); + u64::from_be_bytes(buf) + } + _ => unreachable!(), + }; + Ok(Self(x)) + } + + fn encode(&self, w: &mut B) { + let x = self.0; + if x < 2u64.pow(6) { + w.put_u8(x as u8); + } else if x < 2u64.pow(14) { + w.put_u16((0b01 << 14) | x as u16); + } else if x < 2u64.pow(30) { + w.put_u32((0b10 << 30) | x as u32); + } else if x < 2u64.pow(62) { + w.put_u64((0b11 << 62) | x); + } else { + unreachable!("malformed VarInt") + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7e2426dc78fdca019dea80d26f4be2fe394bf057 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/mod.rs @@ -0,0 +1,1807 @@ +/*! +Defines an abstract syntax for regular expressions. +*/ + +use core::cmp::Ordering; + +use alloc::{boxed::Box, string::String, vec, vec::Vec}; + +pub use crate::ast::visitor::{visit, Visitor}; + +pub mod parse; +pub mod print; +mod visitor; + +/// An error that occurred while parsing a regular expression into an abstract +/// syntax tree. +/// +/// Note that not all ASTs represents a valid regular expression. For example, +/// an AST is constructed without error for `\p{Quux}`, but `Quux` is not a +/// valid Unicode property name. That particular error is reported when +/// translating an AST to the high-level intermediate representation (`HIR`). +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Error { + /// The kind of error. + kind: ErrorKind, + /// The original pattern that the parser generated the error from. Every + /// span in an error is a valid range into this string. + pattern: String, + /// The span of this error. + span: Span, +} + +impl Error { + /// Return the type of this error. + pub fn kind(&self) -> &ErrorKind { + &self.kind + } + + /// The original pattern string in which this error occurred. + /// + /// Every span reported by this error is reported in terms of this string. + pub fn pattern(&self) -> &str { + &self.pattern + } + + /// Return the span at which this error occurred. + pub fn span(&self) -> &Span { + &self.span + } + + /// Return an auxiliary span. This span exists only for some errors that + /// benefit from being able to point to two locations in the original + /// regular expression. For example, "duplicate" errors will have the + /// main error position set to the duplicate occurrence while its + /// auxiliary span will be set to the initial occurrence. + pub fn auxiliary_span(&self) -> Option<&Span> { + use self::ErrorKind::*; + match self.kind { + FlagDuplicate { ref original } => Some(original), + FlagRepeatedNegation { ref original, .. } => Some(original), + GroupNameDuplicate { ref original, .. } => Some(original), + _ => None, + } + } +} + +/// The type of an error that occurred while building an AST. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ErrorKind { + /// The capturing group limit was exceeded. + /// + /// Note that this represents a limit on the total number of capturing + /// groups in a regex and not necessarily the number of nested capturing + /// groups. That is, the nest limit can be low and it is still possible for + /// this error to occur. + CaptureLimitExceeded, + /// An invalid escape sequence was found in a character class set. + ClassEscapeInvalid, + /// An invalid character class range was found. An invalid range is any + /// range where the start is greater than the end. + ClassRangeInvalid, + /// An invalid range boundary was found in a character class. Range + /// boundaries must be a single literal codepoint, but this error indicates + /// that something else was found, such as a nested class. + ClassRangeLiteral, + /// An opening `[` was found with no corresponding closing `]`. + ClassUnclosed, + /// Note that this error variant is no longer used. Namely, a decimal + /// number can only appear as a repetition quantifier. When the number + /// in a repetition quantifier is empty, then it gets its own specialized + /// error, `RepetitionCountDecimalEmpty`. + DecimalEmpty, + /// An invalid decimal number was given where one was expected. + DecimalInvalid, + /// A bracketed hex literal was empty. + EscapeHexEmpty, + /// A bracketed hex literal did not correspond to a Unicode scalar value. + EscapeHexInvalid, + /// An invalid hexadecimal digit was found. + EscapeHexInvalidDigit, + /// EOF was found before an escape sequence was completed. + EscapeUnexpectedEof, + /// An unrecognized escape sequence. + EscapeUnrecognized, + /// A dangling negation was used when setting flags, e.g., `i-`. + FlagDanglingNegation, + /// A flag was used twice, e.g., `i-i`. + FlagDuplicate { + /// The position of the original flag. The error position + /// points to the duplicate flag. + original: Span, + }, + /// The negation operator was used twice, e.g., `-i-s`. + FlagRepeatedNegation { + /// The position of the original negation operator. The error position + /// points to the duplicate negation operator. + original: Span, + }, + /// Expected a flag but got EOF, e.g., `(?`. + FlagUnexpectedEof, + /// Unrecognized flag, e.g., `a`. + FlagUnrecognized, + /// A duplicate capture name was found. + GroupNameDuplicate { + /// The position of the initial occurrence of the capture name. The + /// error position itself points to the duplicate occurrence. + original: Span, + }, + /// A capture group name is empty, e.g., `(?P<>abc)`. + GroupNameEmpty, + /// An invalid character was seen for a capture group name. This includes + /// errors where the first character is a digit (even though subsequent + /// characters are allowed to be digits). + GroupNameInvalid, + /// A closing `>` could not be found for a capture group name. + GroupNameUnexpectedEof, + /// An unclosed group, e.g., `(ab`. + /// + /// The span of this error corresponds to the unclosed parenthesis. + GroupUnclosed, + /// An unopened group, e.g., `ab)`. + GroupUnopened, + /// The nest limit was exceeded. The limit stored here is the limit + /// configured in the parser. + NestLimitExceeded(u32), + /// The range provided in a counted repetition operator is invalid. The + /// range is invalid if the start is greater than the end. + RepetitionCountInvalid, + /// An opening `{` was not followed by a valid decimal value. + /// For example, `x{}` or `x{]}` would fail. + RepetitionCountDecimalEmpty, + /// An opening `{` was found with no corresponding closing `}`. + RepetitionCountUnclosed, + /// A repetition operator was applied to a missing sub-expression. This + /// occurs, for example, in the regex consisting of just a `*` or even + /// `(?i)*`. It is, however, possible to create a repetition operating on + /// an empty sub-expression. For example, `()*` is still considered valid. + RepetitionMissing, + /// The special word boundary syntax, `\b{something}`, was used, but + /// either EOF without `}` was seen, or an invalid character in the + /// braces was seen. + SpecialWordBoundaryUnclosed, + /// The special word boundary syntax, `\b{something}`, was used, but + /// `something` was not recognized as a valid word boundary kind. + SpecialWordBoundaryUnrecognized, + /// The syntax `\b{` was observed, but afterwards the end of the pattern + /// was observed without being able to tell whether it was meant to be a + /// bounded repetition on the `\b` or the beginning of a special word + /// boundary assertion. + SpecialWordOrRepetitionUnexpectedEof, + /// The Unicode class is not valid. This typically occurs when a `\p` is + /// followed by something other than a `{`. + UnicodeClassInvalid, + /// When octal support is disabled, this error is produced when an octal + /// escape is used. The octal escape is assumed to be an invocation of + /// a backreference, which is the common case. + UnsupportedBackreference, + /// When syntax similar to PCRE's look-around is used, this error is + /// returned. Some example syntaxes that are rejected include, but are + /// not necessarily limited to, `(?=re)`, `(?!re)`, `(?<=re)` and + /// `(?) -> core::fmt::Result { + crate::error::Formatter::from(self).fmt(f) + } +} + +impl core::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::ErrorKind::*; + match *self { + CaptureLimitExceeded => write!( + f, + "exceeded the maximum number of \ + capturing groups ({})", + u32::MAX + ), + ClassEscapeInvalid => { + write!(f, "invalid escape sequence found in character class") + } + ClassRangeInvalid => write!( + f, + "invalid character class range, \ + the start must be <= the end" + ), + ClassRangeLiteral => { + write!(f, "invalid range boundary, must be a literal") + } + ClassUnclosed => write!(f, "unclosed character class"), + DecimalEmpty => write!(f, "decimal literal empty"), + DecimalInvalid => write!(f, "decimal literal invalid"), + EscapeHexEmpty => write!(f, "hexadecimal literal empty"), + EscapeHexInvalid => { + write!(f, "hexadecimal literal is not a Unicode scalar value") + } + EscapeHexInvalidDigit => write!(f, "invalid hexadecimal digit"), + EscapeUnexpectedEof => write!( + f, + "incomplete escape sequence, \ + reached end of pattern prematurely" + ), + EscapeUnrecognized => write!(f, "unrecognized escape sequence"), + FlagDanglingNegation => { + write!(f, "dangling flag negation operator") + } + FlagDuplicate { .. } => write!(f, "duplicate flag"), + FlagRepeatedNegation { .. } => { + write!(f, "flag negation operator repeated") + } + FlagUnexpectedEof => { + write!(f, "expected flag but got end of regex") + } + FlagUnrecognized => write!(f, "unrecognized flag"), + GroupNameDuplicate { .. } => { + write!(f, "duplicate capture group name") + } + GroupNameEmpty => write!(f, "empty capture group name"), + GroupNameInvalid => write!(f, "invalid capture group character"), + GroupNameUnexpectedEof => write!(f, "unclosed capture group name"), + GroupUnclosed => write!(f, "unclosed group"), + GroupUnopened => write!(f, "unopened group"), + NestLimitExceeded(limit) => write!( + f, + "exceed the maximum number of \ + nested parentheses/brackets ({})", + limit + ), + RepetitionCountInvalid => write!( + f, + "invalid repetition count range, \ + the start must be <= the end" + ), + RepetitionCountDecimalEmpty => { + write!(f, "repetition quantifier expects a valid decimal") + } + RepetitionCountUnclosed => { + write!(f, "unclosed counted repetition") + } + RepetitionMissing => { + write!(f, "repetition operator missing expression") + } + SpecialWordBoundaryUnclosed => { + write!( + f, + "special word boundary assertion is either \ + unclosed or contains an invalid character", + ) + } + SpecialWordBoundaryUnrecognized => { + write!( + f, + "unrecognized special word boundary assertion, \ + valid choices are: start, end, start-half \ + or end-half", + ) + } + SpecialWordOrRepetitionUnexpectedEof => { + write!( + f, + "found either the beginning of a special word \ + boundary or a bounded repetition on a \\b with \ + an opening brace, but no closing brace", + ) + } + UnicodeClassInvalid => { + write!(f, "invalid Unicode character class") + } + UnsupportedBackreference => { + write!(f, "backreferences are not supported") + } + UnsupportedLookAround => write!( + f, + "look-around, including look-ahead and look-behind, \ + is not supported" + ), + } + } +} + +/// Span represents the position information of a single AST item. +/// +/// All span positions are absolute byte offsets that can be used on the +/// original regular expression that was parsed. +#[derive(Clone, Copy, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Span { + /// The start byte offset. + pub start: Position, + /// The end byte offset. + pub end: Position, +} + +impl core::fmt::Debug for Span { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "Span({:?}, {:?})", self.start, self.end) + } +} + +impl Ord for Span { + fn cmp(&self, other: &Span) -> Ordering { + (&self.start, &self.end).cmp(&(&other.start, &other.end)) + } +} + +impl PartialOrd for Span { + fn partial_cmp(&self, other: &Span) -> Option { + Some(self.cmp(other)) + } +} + +/// A single position in a regular expression. +/// +/// A position encodes one half of a span, and include the byte offset, line +/// number and column number. +#[derive(Clone, Copy, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Position { + /// The absolute offset of this position, starting at `0` from the + /// beginning of the regular expression pattern string. + pub offset: usize, + /// The line number, starting at `1`. + pub line: usize, + /// The approximate column number, starting at `1`. + pub column: usize, +} + +impl core::fmt::Debug for Position { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Position(o: {:?}, l: {:?}, c: {:?})", + self.offset, self.line, self.column + ) + } +} + +impl Ord for Position { + fn cmp(&self, other: &Position) -> Ordering { + self.offset.cmp(&other.offset) + } +} + +impl PartialOrd for Position { + fn partial_cmp(&self, other: &Position) -> Option { + Some(self.cmp(other)) + } +} + +impl Span { + /// Create a new span with the given positions. + pub fn new(start: Position, end: Position) -> Span { + Span { start, end } + } + + /// Create a new span using the given position as the start and end. + pub fn splat(pos: Position) -> Span { + Span::new(pos, pos) + } + + /// Create a new span by replacing the starting the position with the one + /// given. + pub fn with_start(self, pos: Position) -> Span { + Span { start: pos, ..self } + } + + /// Create a new span by replacing the ending the position with the one + /// given. + pub fn with_end(self, pos: Position) -> Span { + Span { end: pos, ..self } + } + + /// Returns true if and only if this span occurs on a single line. + pub fn is_one_line(&self) -> bool { + self.start.line == self.end.line + } + + /// Returns true if and only if this span is empty. That is, it points to + /// a single position in the concrete syntax of a regular expression. + pub fn is_empty(&self) -> bool { + self.start.offset == self.end.offset + } +} + +impl Position { + /// Create a new position with the given information. + /// + /// `offset` is the absolute offset of the position, starting at `0` from + /// the beginning of the regular expression pattern string. + /// + /// `line` is the line number, starting at `1`. + /// + /// `column` is the approximate column number, starting at `1`. + pub fn new(offset: usize, line: usize, column: usize) -> Position { + Position { offset, line, column } + } +} + +/// An abstract syntax tree for a singular expression along with comments +/// found. +/// +/// Comments are not stored in the tree itself to avoid complexity. Each +/// comment contains a span of precisely where it occurred in the original +/// regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct WithComments { + /// The actual ast. + pub ast: Ast, + /// All comments found in the original regular expression. + pub comments: Vec, +} + +/// A comment from a regular expression with an associated span. +/// +/// A regular expression can only contain comments when the `x` flag is +/// enabled. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Comment { + /// The span of this comment, including the beginning `#` and ending `\n`. + pub span: Span, + /// The comment text, starting with the first character following the `#` + /// and ending with the last character preceding the `\n`. + pub comment: String, +} + +/// An abstract syntax tree for a single regular expression. +/// +/// An `Ast`'s `fmt::Display` implementation uses constant stack space and heap +/// space proportional to the size of the `Ast`. +/// +/// This type defines its own destructor that uses constant stack space and +/// heap space proportional to the size of the `Ast`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum Ast { + /// An empty regex that matches everything. + Empty(Box), + /// A set of flags, e.g., `(?is)`. + Flags(Box), + /// A single character literal, which includes escape sequences. + Literal(Box), + /// The "any character" class. + Dot(Box), + /// A single zero-width assertion. + Assertion(Box), + /// A single Unicode character class, e.g., `\pL` or `\p{Greek}`. + ClassUnicode(Box), + /// A single perl character class, e.g., `\d` or `\W`. + ClassPerl(Box), + /// A single bracketed character class set, which may contain zero or more + /// character ranges and/or zero or more nested classes. e.g., + /// `[a-zA-Z\pL]`. + ClassBracketed(Box), + /// A repetition operator applied to an arbitrary regular expression. + Repetition(Box), + /// A grouped regular expression. + Group(Box), + /// An alternation of regular expressions. + Alternation(Box), + /// A concatenation of regular expressions. + Concat(Box), +} + +impl Ast { + /// Create an "empty" AST item. + pub fn empty(span: Span) -> Ast { + Ast::Empty(Box::new(span)) + } + + /// Create a "flags" AST item. + pub fn flags(e: SetFlags) -> Ast { + Ast::Flags(Box::new(e)) + } + + /// Create a "literal" AST item. + pub fn literal(e: Literal) -> Ast { + Ast::Literal(Box::new(e)) + } + + /// Create a "dot" AST item. + pub fn dot(span: Span) -> Ast { + Ast::Dot(Box::new(span)) + } + + /// Create a "assertion" AST item. + pub fn assertion(e: Assertion) -> Ast { + Ast::Assertion(Box::new(e)) + } + + /// Create a "Unicode class" AST item. + pub fn class_unicode(e: ClassUnicode) -> Ast { + Ast::ClassUnicode(Box::new(e)) + } + + /// Create a "Perl class" AST item. + pub fn class_perl(e: ClassPerl) -> Ast { + Ast::ClassPerl(Box::new(e)) + } + + /// Create a "bracketed class" AST item. + pub fn class_bracketed(e: ClassBracketed) -> Ast { + Ast::ClassBracketed(Box::new(e)) + } + + /// Create a "repetition" AST item. + pub fn repetition(e: Repetition) -> Ast { + Ast::Repetition(Box::new(e)) + } + + /// Create a "group" AST item. + pub fn group(e: Group) -> Ast { + Ast::Group(Box::new(e)) + } + + /// Create a "alternation" AST item. + pub fn alternation(e: Alternation) -> Ast { + Ast::Alternation(Box::new(e)) + } + + /// Create a "concat" AST item. + pub fn concat(e: Concat) -> Ast { + Ast::Concat(Box::new(e)) + } + + /// Return the span of this abstract syntax tree. + pub fn span(&self) -> &Span { + match *self { + Ast::Empty(ref span) => span, + Ast::Flags(ref x) => &x.span, + Ast::Literal(ref x) => &x.span, + Ast::Dot(ref span) => span, + Ast::Assertion(ref x) => &x.span, + Ast::ClassUnicode(ref x) => &x.span, + Ast::ClassPerl(ref x) => &x.span, + Ast::ClassBracketed(ref x) => &x.span, + Ast::Repetition(ref x) => &x.span, + Ast::Group(ref x) => &x.span, + Ast::Alternation(ref x) => &x.span, + Ast::Concat(ref x) => &x.span, + } + } + + /// Return true if and only if this Ast is empty. + pub fn is_empty(&self) -> bool { + match *self { + Ast::Empty(_) => true, + _ => false, + } + } + + /// Returns true if and only if this AST has any (including possibly empty) + /// subexpressions. + fn has_subexprs(&self) -> bool { + match *self { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => false, + Ast::ClassBracketed(_) + | Ast::Repetition(_) + | Ast::Group(_) + | Ast::Alternation(_) + | Ast::Concat(_) => true, + } + } +} + +/// Print a display representation of this Ast. +/// +/// This does not preserve any of the original whitespace formatting that may +/// have originally been present in the concrete syntax from which this Ast +/// was generated. +/// +/// This implementation uses constant stack space and heap space proportional +/// to the size of the `Ast`. +impl core::fmt::Display for Ast { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use crate::ast::print::Printer; + Printer::new().print(self, f) + } +} + +/// An alternation of regular expressions. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Alternation { + /// The span of this alternation. + pub span: Span, + /// The alternate regular expressions. + pub asts: Vec, +} + +impl Alternation { + /// Return this alternation as an AST. + /// + /// If this alternation contains zero ASTs, then `Ast::empty` is returned. + /// If this alternation contains exactly 1 AST, then the corresponding AST + /// is returned. Otherwise, `Ast::alternation` is returned. + pub fn into_ast(mut self) -> Ast { + match self.asts.len() { + 0 => Ast::empty(self.span), + 1 => self.asts.pop().unwrap(), + _ => Ast::alternation(self), + } + } +} + +/// A concatenation of regular expressions. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Concat { + /// The span of this concatenation. + pub span: Span, + /// The concatenation regular expressions. + pub asts: Vec, +} + +impl Concat { + /// Return this concatenation as an AST. + /// + /// If this alternation contains zero ASTs, then `Ast::empty` is returned. + /// If this alternation contains exactly 1 AST, then the corresponding AST + /// is returned. Otherwise, `Ast::concat` is returned. + pub fn into_ast(mut self) -> Ast { + match self.asts.len() { + 0 => Ast::empty(self.span), + 1 => self.asts.pop().unwrap(), + _ => Ast::concat(self), + } + } +} + +/// A single literal expression. +/// +/// A literal corresponds to a single Unicode scalar value. Literals may be +/// represented in their literal form, e.g., `a` or in their escaped form, +/// e.g., `\x61`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Literal { + /// The span of this literal. + pub span: Span, + /// The kind of this literal. + pub kind: LiteralKind, + /// The Unicode scalar value corresponding to this literal. + pub c: char, +} + +impl Literal { + /// If this literal was written as a `\x` hex escape, then this returns + /// the corresponding byte value. Otherwise, this returns `None`. + pub fn byte(&self) -> Option { + match self.kind { + LiteralKind::HexFixed(HexLiteralKind::X) => { + u8::try_from(self.c).ok() + } + _ => None, + } + } +} + +/// The kind of a single literal expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum LiteralKind { + /// The literal is written verbatim, e.g., `a` or `☃`. + Verbatim, + /// The literal is written as an escape because it is otherwise a special + /// regex meta character, e.g., `\*` or `\[`. + Meta, + /// The literal is written as an escape despite the fact that the escape is + /// unnecessary, e.g., `\%` or `\/`. + Superfluous, + /// The literal is written as an octal escape, e.g., `\141`. + Octal, + /// The literal is written as a hex code with a fixed number of digits + /// depending on the type of the escape, e.g., `\x61` or `\u0061` or + /// `\U00000061`. + HexFixed(HexLiteralKind), + /// The literal is written as a hex code with a bracketed number of + /// digits. The only restriction is that the bracketed hex code must refer + /// to a valid Unicode scalar value. + HexBrace(HexLiteralKind), + /// The literal is written as a specially recognized escape, e.g., `\f` + /// or `\n`. + Special(SpecialLiteralKind), +} + +/// The type of a special literal. +/// +/// A special literal is a special escape sequence recognized by the regex +/// parser, e.g., `\f` or `\n`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum SpecialLiteralKind { + /// Bell, spelled `\a` (`\x07`). + Bell, + /// Form feed, spelled `\f` (`\x0C`). + FormFeed, + /// Tab, spelled `\t` (`\x09`). + Tab, + /// Line feed, spelled `\n` (`\x0A`). + LineFeed, + /// Carriage return, spelled `\r` (`\x0D`). + CarriageReturn, + /// Vertical tab, spelled `\v` (`\x0B`). + VerticalTab, + /// Space, spelled `\ ` (`\x20`). Note that this can only appear when + /// parsing in verbose mode. + Space, +} + +/// The type of a Unicode hex literal. +/// +/// Note that all variants behave the same when used with brackets. They only +/// differ when used without brackets in the number of hex digits that must +/// follow. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum HexLiteralKind { + /// A `\x` prefix. When used without brackets, this form is limited to + /// two digits. + X, + /// A `\u` prefix. When used without brackets, this form is limited to + /// four digits. + UnicodeShort, + /// A `\U` prefix. When used without brackets, this form is limited to + /// eight digits. + UnicodeLong, +} + +impl HexLiteralKind { + /// The number of digits that must be used with this literal form when + /// used without brackets. When used with brackets, there is no + /// restriction on the number of digits. + pub fn digits(&self) -> u32 { + match *self { + HexLiteralKind::X => 2, + HexLiteralKind::UnicodeShort => 4, + HexLiteralKind::UnicodeLong => 8, + } + } +} + +/// A Perl character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassPerl { + /// The span of this class. + pub span: Span, + /// The kind of Perl class. + pub kind: ClassPerlKind, + /// Whether the class is negated or not. e.g., `\d` is not negated but + /// `\D` is. + pub negated: bool, +} + +/// The available Perl character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassPerlKind { + /// Decimal numbers. + Digit, + /// Whitespace. + Space, + /// Word characters. + Word, +} + +/// An ASCII character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassAscii { + /// The span of this class. + pub span: Span, + /// The kind of ASCII class. + pub kind: ClassAsciiKind, + /// Whether the class is negated or not. e.g., `[[:alpha:]]` is not negated + /// but `[[:^alpha:]]` is. + pub negated: bool, +} + +/// The available ASCII character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassAsciiKind { + /// `[0-9A-Za-z]` + Alnum, + /// `[A-Za-z]` + Alpha, + /// `[\x00-\x7F]` + Ascii, + /// `[ \t]` + Blank, + /// `[\x00-\x1F\x7F]` + Cntrl, + /// `[0-9]` + Digit, + /// `[!-~]` + Graph, + /// `[a-z]` + Lower, + /// `[ -~]` + Print, + /// ``[!-/:-@\[-`{-~]`` + Punct, + /// `[\t\n\v\f\r ]` + Space, + /// `[A-Z]` + Upper, + /// `[0-9A-Za-z_]` + Word, + /// `[0-9A-Fa-f]` + Xdigit, +} + +impl ClassAsciiKind { + /// Return the corresponding ClassAsciiKind variant for the given name. + /// + /// The name given should correspond to the lowercase version of the + /// variant name. e.g., `cntrl` is the name for `ClassAsciiKind::Cntrl`. + /// + /// If no variant with the corresponding name exists, then `None` is + /// returned. + pub fn from_name(name: &str) -> Option { + use self::ClassAsciiKind::*; + match name { + "alnum" => Some(Alnum), + "alpha" => Some(Alpha), + "ascii" => Some(Ascii), + "blank" => Some(Blank), + "cntrl" => Some(Cntrl), + "digit" => Some(Digit), + "graph" => Some(Graph), + "lower" => Some(Lower), + "print" => Some(Print), + "punct" => Some(Punct), + "space" => Some(Space), + "upper" => Some(Upper), + "word" => Some(Word), + "xdigit" => Some(Xdigit), + _ => None, + } + } +} + +/// A Unicode character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassUnicode { + /// The span of this class. + pub span: Span, + /// Whether this class is negated or not. + /// + /// Note: be careful when using this attribute. This specifically refers + /// to whether the class is written as `\p` or `\P`, where the latter + /// is `negated = true`. However, it also possible to write something like + /// `\P{scx!=Katakana}` which is actually equivalent to + /// `\p{scx=Katakana}` and is therefore not actually negated even though + /// `negated = true` here. To test whether this class is truly negated + /// or not, use the `is_negated` method. + pub negated: bool, + /// The kind of Unicode class. + pub kind: ClassUnicodeKind, +} + +impl ClassUnicode { + /// Returns true if this class has been negated. + /// + /// Note that this takes the Unicode op into account, if it's present. + /// e.g., `is_negated` for `\P{scx!=Katakana}` will return `false`. + pub fn is_negated(&self) -> bool { + match self.kind { + ClassUnicodeKind::NamedValue { + op: ClassUnicodeOpKind::NotEqual, + .. + } => !self.negated, + _ => self.negated, + } + } +} + +/// The available forms of Unicode character classes. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClassUnicodeKind { + /// A one letter abbreviated class, e.g., `\pN`. + OneLetter(char), + /// A binary property, general category or script. The string may be + /// empty. + Named(String), + /// A property name and an associated value. + NamedValue { + /// The type of Unicode op used to associate `name` with `value`. + op: ClassUnicodeOpKind, + /// The property name (which may be empty). + name: String, + /// The property value (which may be empty). + value: String, + }, +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for ClassUnicodeKind { + fn arbitrary( + u: &mut arbitrary::Unstructured, + ) -> arbitrary::Result { + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + { + use alloc::string::ToString; + + use super::unicode_tables::{ + property_names::PROPERTY_NAMES, + property_values::PROPERTY_VALUES, + }; + + match u.choose_index(3)? { + 0 => { + let all = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .filter(|(name, _)| name.len() == 1) + .count(); + let idx = u.choose_index(all)?; + let value = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .take(idx + 1) + .last() + .unwrap() + .0 + .chars() + .next() + .unwrap(); + Ok(ClassUnicodeKind::OneLetter(value)) + } + 1 => { + let all = PROPERTY_VALUES + .iter() + .map(|e| e.1.len()) + .sum::() + + PROPERTY_NAMES.len(); + let idx = u.choose_index(all)?; + let name = PROPERTY_VALUES + .iter() + .flat_map(|e| e.1.iter()) + .chain(PROPERTY_NAMES) + .map(|(_, e)| e) + .take(idx + 1) + .last() + .unwrap(); + Ok(ClassUnicodeKind::Named(name.to_string())) + } + 2 => { + let all = PROPERTY_VALUES + .iter() + .map(|e| e.1.len()) + .sum::(); + let idx = u.choose_index(all)?; + let (prop, value) = PROPERTY_VALUES + .iter() + .flat_map(|e| { + e.1.iter().map(|(_, value)| (e.0, value)) + }) + .take(idx + 1) + .last() + .unwrap(); + Ok(ClassUnicodeKind::NamedValue { + op: u.arbitrary()?, + name: prop.to_string(), + value: value.to_string(), + }) + } + _ => unreachable!("index chosen is impossible"), + } + } + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + { + match u.choose_index(3)? { + 0 => Ok(ClassUnicodeKind::OneLetter(u.arbitrary()?)), + 1 => Ok(ClassUnicodeKind::Named(u.arbitrary()?)), + 2 => Ok(ClassUnicodeKind::NamedValue { + op: u.arbitrary()?, + name: u.arbitrary()?, + value: u.arbitrary()?, + }), + _ => unreachable!("index chosen is impossible"), + } + } + } + + fn size_hint(depth: usize) -> (usize, Option) { + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + { + arbitrary::size_hint::and_all(&[ + usize::size_hint(depth), + usize::size_hint(depth), + arbitrary::size_hint::or( + (0, Some(0)), + ClassUnicodeOpKind::size_hint(depth), + ), + ]) + } + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + { + arbitrary::size_hint::and( + usize::size_hint(depth), + arbitrary::size_hint::or_all(&[ + char::size_hint(depth), + String::size_hint(depth), + arbitrary::size_hint::and_all(&[ + String::size_hint(depth), + String::size_hint(depth), + ClassUnicodeOpKind::size_hint(depth), + ]), + ]), + ) + } + } +} + +/// The type of op used in a Unicode character class. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassUnicodeOpKind { + /// A property set to a specific value, e.g., `\p{scx=Katakana}`. + Equal, + /// A property set to a specific value using a colon, e.g., + /// `\p{scx:Katakana}`. + Colon, + /// A property that isn't a particular value, e.g., `\p{scx!=Katakana}`. + NotEqual, +} + +impl ClassUnicodeOpKind { + /// Whether the op is an equality op or not. + pub fn is_equal(&self) -> bool { + match *self { + ClassUnicodeOpKind::Equal | ClassUnicodeOpKind::Colon => true, + _ => false, + } + } +} + +/// A bracketed character class, e.g., `[a-z0-9]`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassBracketed { + /// The span of this class. + pub span: Span, + /// Whether this class is negated or not. e.g., `[a]` is not negated but + /// `[^a]` is. + pub negated: bool, + /// The type of this set. A set is either a normal union of things, e.g., + /// `[abc]` or a result of applying set operations, e.g., `[\pL--c]`. + pub kind: ClassSet, +} + +/// A character class set. +/// +/// This type corresponds to the internal structure of a bracketed character +/// class. That is, every bracketed character is one of two types: a union of +/// items (literals, ranges, other bracketed classes) or a tree of binary set +/// operations. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSet { + /// An item, which can be a single literal, range, nested character class + /// or a union of items. + Item(ClassSetItem), + /// A single binary operation (i.e., &&, -- or ~~). + BinaryOp(ClassSetBinaryOp), +} + +impl ClassSet { + /// Build a set from a union. + pub fn union(ast: ClassSetUnion) -> ClassSet { + ClassSet::Item(ClassSetItem::Union(ast)) + } + + /// Return the span of this character class set. + pub fn span(&self) -> &Span { + match *self { + ClassSet::Item(ref x) => x.span(), + ClassSet::BinaryOp(ref x) => &x.span, + } + } + + /// Return true if and only if this class set is empty. + fn is_empty(&self) -> bool { + match *self { + ClassSet::Item(ClassSetItem::Empty(_)) => true, + _ => false, + } + } +} + +/// A single component of a character class set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSetItem { + /// An empty item. + /// + /// Note that a bracketed character class cannot contain a single empty + /// item. Empty items can appear when using one of the binary operators. + /// For example, `[&&]` is the intersection of two empty classes. + Empty(Span), + /// A single literal. + Literal(Literal), + /// A range between two literals. + Range(ClassSetRange), + /// An ASCII character class, e.g., `[:alnum:]` or `[:punct:]`. + Ascii(ClassAscii), + /// A Unicode character class, e.g., `\pL` or `\p{Greek}`. + Unicode(ClassUnicode), + /// A perl character class, e.g., `\d` or `\W`. + Perl(ClassPerl), + /// A bracketed character class set, which may contain zero or more + /// character ranges and/or zero or more nested classes. e.g., + /// `[a-zA-Z\pL]`. + Bracketed(Box), + /// A union of items. + Union(ClassSetUnion), +} + +impl ClassSetItem { + /// Return the span of this character class set item. + pub fn span(&self) -> &Span { + match *self { + ClassSetItem::Empty(ref span) => span, + ClassSetItem::Literal(ref x) => &x.span, + ClassSetItem::Range(ref x) => &x.span, + ClassSetItem::Ascii(ref x) => &x.span, + ClassSetItem::Perl(ref x) => &x.span, + ClassSetItem::Unicode(ref x) => &x.span, + ClassSetItem::Bracketed(ref x) => &x.span, + ClassSetItem::Union(ref x) => &x.span, + } + } +} + +/// A single character class range in a set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetRange { + /// The span of this range. + pub span: Span, + /// The start of this range. + pub start: Literal, + /// The end of this range. + pub end: Literal, +} + +impl ClassSetRange { + /// Returns true if and only if this character class range is valid. + /// + /// The only case where a range is invalid is if its start is greater than + /// its end. + pub fn is_valid(&self) -> bool { + self.start.c <= self.end.c + } +} + +/// A union of items inside a character class set. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetUnion { + /// The span of the items in this operation. e.g., the `a-z0-9` in + /// `[^a-z0-9]` + pub span: Span, + /// The sequence of items that make up this union. + pub items: Vec, +} + +impl ClassSetUnion { + /// Push a new item in this union. + /// + /// The ending position of this union's span is updated to the ending + /// position of the span of the item given. If the union is empty, then + /// the starting position of this union is set to the starting position + /// of this item. + /// + /// In other words, if you only use this method to add items to a union + /// and you set the spans on each item correctly, then you should never + /// need to adjust the span of the union directly. + pub fn push(&mut self, item: ClassSetItem) { + if self.items.is_empty() { + self.span.start = item.span().start; + } + self.span.end = item.span().end; + self.items.push(item); + } + + /// Return this union as a character class set item. + /// + /// If this union contains zero items, then an empty union is + /// returned. If this concatenation contains exactly 1 item, then the + /// corresponding item is returned. Otherwise, ClassSetItem::Union is + /// returned. + pub fn into_item(mut self) -> ClassSetItem { + match self.items.len() { + 0 => ClassSetItem::Empty(self.span), + 1 => self.items.pop().unwrap(), + _ => ClassSetItem::Union(self), + } + } +} + +/// A Unicode character class set operation. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct ClassSetBinaryOp { + /// The span of this operation. e.g., the `a-z--[h-p]` in `[a-z--h-p]`. + pub span: Span, + /// The type of this set operation. + pub kind: ClassSetBinaryOpKind, + /// The left hand side of the operation. + pub lhs: Box, + /// The right hand side of the operation. + pub rhs: Box, +} + +/// The type of a Unicode character class set operation. +/// +/// Note that this doesn't explicitly represent union since there is no +/// explicit union operator. Concatenation inside a character class corresponds +/// to the union operation. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum ClassSetBinaryOpKind { + /// The intersection of two sets, e.g., `\pN&&[a-z]`. + Intersection, + /// The difference of two sets, e.g., `\pN--[0-9]`. + Difference, + /// The symmetric difference of two sets. The symmetric difference is the + /// set of elements belonging to one but not both sets. + /// e.g., `[\pL~~[:ascii:]]`. + SymmetricDifference, +} + +/// A single zero-width assertion. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Assertion { + /// The span of this assertion. + pub span: Span, + /// The assertion kind, e.g., `\b` or `^`. + pub kind: AssertionKind, +} + +/// An assertion kind. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum AssertionKind { + /// `^` + StartLine, + /// `$` + EndLine, + /// `\A` + StartText, + /// `\z` + EndText, + /// `\b` + WordBoundary, + /// `\B` + NotWordBoundary, + /// `\b{start}` + WordBoundaryStart, + /// `\b{end}` + WordBoundaryEnd, + /// `\<` (alias for `\b{start}`) + WordBoundaryStartAngle, + /// `\>` (alias for `\b{end}`) + WordBoundaryEndAngle, + /// `\b{start-half}` + WordBoundaryStartHalf, + /// `\b{end-half}` + WordBoundaryEndHalf, +} + +/// A repetition operation applied to a regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Repetition { + /// The span of this operation. + pub span: Span, + /// The actual operation. + pub op: RepetitionOp, + /// Whether this operation was applied greedily or not. + pub greedy: bool, + /// The regular expression under repetition. + pub ast: Box, +} + +/// The repetition operator itself. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct RepetitionOp { + /// The span of this operator. This includes things like `+`, `*?` and + /// `{m,n}`. + pub span: Span, + /// The type of operation. + pub kind: RepetitionKind, +} + +/// The kind of a repetition operator. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum RepetitionKind { + /// `?` + ZeroOrOne, + /// `*` + ZeroOrMore, + /// `+` + OneOrMore, + /// `{m,n}` + Range(RepetitionRange), +} + +/// A range repetition operator. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum RepetitionRange { + /// `{m}` + Exactly(u32), + /// `{m,}` + AtLeast(u32), + /// `{m,n}` + Bounded(u32, u32), +} + +impl RepetitionRange { + /// Returns true if and only if this repetition range is valid. + /// + /// The only case where a repetition range is invalid is if it is bounded + /// and its start is greater than its end. + pub fn is_valid(&self) -> bool { + match *self { + RepetitionRange::Bounded(s, e) if s > e => false, + _ => true, + } + } +} + +/// A grouped regular expression. +/// +/// This includes both capturing and non-capturing groups. This does **not** +/// include flag-only groups like `(?is)`, but does contain any group that +/// contains a sub-expression, e.g., `(a)`, `(?Pa)`, `(?:a)` and +/// `(?is:a)`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Group { + /// The span of this group. + pub span: Span, + /// The kind of this group. + pub kind: GroupKind, + /// The regular expression in this group. + pub ast: Box, +} + +impl Group { + /// If this group is non-capturing, then this returns the (possibly empty) + /// set of flags. Otherwise, `None` is returned. + pub fn flags(&self) -> Option<&Flags> { + match self.kind { + GroupKind::NonCapturing(ref flags) => Some(flags), + _ => None, + } + } + + /// Returns true if and only if this group is capturing. + pub fn is_capturing(&self) -> bool { + match self.kind { + GroupKind::CaptureIndex(_) | GroupKind::CaptureName { .. } => true, + GroupKind::NonCapturing(_) => false, + } + } + + /// Returns the capture index of this group, if this is a capturing group. + /// + /// This returns a capture index precisely when `is_capturing` is `true`. + pub fn capture_index(&self) -> Option { + match self.kind { + GroupKind::CaptureIndex(i) => Some(i), + GroupKind::CaptureName { ref name, .. } => Some(name.index), + GroupKind::NonCapturing(_) => None, + } + } +} + +/// The kind of a group. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum GroupKind { + /// `(a)` + CaptureIndex(u32), + /// `(?a)` or `(?Pa)` + CaptureName { + /// True if the `?P<` syntax is used and false if the `?<` syntax is used. + starts_with_p: bool, + /// The capture name. + name: CaptureName, + }, + /// `(?:a)` and `(?i:a)` + NonCapturing(Flags), +} + +/// A capture name. +/// +/// This corresponds to the name itself between the angle brackets in, e.g., +/// `(?Pexpr)`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct CaptureName { + /// The span of this capture name. + pub span: Span, + /// The capture name. + pub name: String, + /// The capture index. + pub index: u32, +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for CaptureName { + fn arbitrary( + u: &mut arbitrary::Unstructured, + ) -> arbitrary::Result { + let len = u.arbitrary_len::()?; + if len == 0 { + return Err(arbitrary::Error::NotEnoughData); + } + let mut name: String = String::new(); + for _ in 0..len { + let ch: char = u.arbitrary()?; + let cp = u32::from(ch); + let ascii_letter_offset = u8::try_from(cp % 26).unwrap(); + let ascii_letter = b'a' + ascii_letter_offset; + name.push(char::from(ascii_letter)); + } + Ok(CaptureName { span: u.arbitrary()?, name, index: u.arbitrary()? }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all(&[ + Span::size_hint(depth), + usize::size_hint(depth), + u32::size_hint(depth), + ]) + } +} + +/// A group of flags that is not applied to a particular regular expression. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct SetFlags { + /// The span of these flags, including the grouping parentheses. + pub span: Span, + /// The actual sequence of flags. + pub flags: Flags, +} + +/// A group of flags. +/// +/// This corresponds only to the sequence of flags themselves, e.g., `is-u`. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct Flags { + /// The span of this group of flags. + pub span: Span, + /// A sequence of flag items. Each item is either a flag or a negation + /// operator. + pub items: Vec, +} + +impl Flags { + /// Add the given item to this sequence of flags. + /// + /// If the item was added successfully, then `None` is returned. If the + /// given item is a duplicate, then `Some(i)` is returned, where + /// `items[i].kind == item.kind`. + pub fn add_item(&mut self, item: FlagsItem) -> Option { + for (i, x) in self.items.iter().enumerate() { + if x.kind == item.kind { + return Some(i); + } + } + self.items.push(item); + None + } + + /// Returns the state of the given flag in this set. + /// + /// If the given flag is in the set but is negated, then `Some(false)` is + /// returned. + /// + /// If the given flag is in the set and is not negated, then `Some(true)` + /// is returned. + /// + /// Otherwise, `None` is returned. + pub fn flag_state(&self, flag: Flag) -> Option { + let mut negated = false; + for x in &self.items { + match x.kind { + FlagsItemKind::Negation => { + negated = true; + } + FlagsItemKind::Flag(ref xflag) if xflag == &flag => { + return Some(!negated); + } + _ => {} + } + } + None + } +} + +/// A single item in a group of flags. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub struct FlagsItem { + /// The span of this item. + pub span: Span, + /// The kind of this item. + pub kind: FlagsItemKind, +} + +/// The kind of an item in a group of flags. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum FlagsItemKind { + /// A negation operator applied to all subsequent flags in the enclosing + /// group. + Negation, + /// A single flag in a group. + Flag(Flag), +} + +impl FlagsItemKind { + /// Returns true if and only if this item is a negation operator. + pub fn is_negation(&self) -> bool { + match *self { + FlagsItemKind::Negation => true, + _ => false, + } + } +} + +/// A single flag. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +pub enum Flag { + /// `i` + CaseInsensitive, + /// `m` + MultiLine, + /// `s` + DotMatchesNewLine, + /// `U` + SwapGreed, + /// `u` + Unicode, + /// `R` + CRLF, + /// `x` + IgnoreWhitespace, +} + +/// A custom `Drop` impl is used for `Ast` such that it uses constant stack +/// space but heap space proportional to the depth of the `Ast`. +impl Drop for Ast { + fn drop(&mut self) { + use core::mem; + + match *self { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) + // Bracketed classes are recursive, they get their own Drop impl. + | Ast::ClassBracketed(_) => return, + Ast::Repetition(ref x) if !x.ast.has_subexprs() => return, + Ast::Group(ref x) if !x.ast.has_subexprs() => return, + Ast::Alternation(ref x) if x.asts.is_empty() => return, + Ast::Concat(ref x) if x.asts.is_empty() => return, + _ => {} + } + + let empty_span = || Span::splat(Position::new(0, 0, 0)); + let empty_ast = || Ast::empty(empty_span()); + let mut stack = vec![mem::replace(self, empty_ast())]; + while let Some(mut ast) = stack.pop() { + match ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) + // Bracketed classes are recursive, so they get their own Drop + // impl. + | Ast::ClassBracketed(_) => {} + Ast::Repetition(ref mut x) => { + stack.push(mem::replace(&mut x.ast, empty_ast())); + } + Ast::Group(ref mut x) => { + stack.push(mem::replace(&mut x.ast, empty_ast())); + } + Ast::Alternation(ref mut x) => { + stack.extend(x.asts.drain(..)); + } + Ast::Concat(ref mut x) => { + stack.extend(x.asts.drain(..)); + } + } + } + } +} + +/// A custom `Drop` impl is used for `ClassSet` such that it uses constant +/// stack space but heap space proportional to the depth of the `ClassSet`. +impl Drop for ClassSet { + fn drop(&mut self) { + use core::mem; + + match *self { + ClassSet::Item(ref item) => match *item { + ClassSetItem::Empty(_) + | ClassSetItem::Literal(_) + | ClassSetItem::Range(_) + | ClassSetItem::Ascii(_) + | ClassSetItem::Unicode(_) + | ClassSetItem::Perl(_) => return, + ClassSetItem::Bracketed(ref x) => { + if x.kind.is_empty() { + return; + } + } + ClassSetItem::Union(ref x) => { + if x.items.is_empty() { + return; + } + } + }, + ClassSet::BinaryOp(ref op) => { + if op.lhs.is_empty() && op.rhs.is_empty() { + return; + } + } + } + + let empty_span = || Span::splat(Position::new(0, 0, 0)); + let empty_set = || ClassSet::Item(ClassSetItem::Empty(empty_span())); + let mut stack = vec![mem::replace(self, empty_set())]; + while let Some(mut set) = stack.pop() { + match set { + ClassSet::Item(ref mut item) => match *item { + ClassSetItem::Empty(_) + | ClassSetItem::Literal(_) + | ClassSetItem::Range(_) + | ClassSetItem::Ascii(_) + | ClassSetItem::Unicode(_) + | ClassSetItem::Perl(_) => {} + ClassSetItem::Bracketed(ref mut x) => { + stack.push(mem::replace(&mut x.kind, empty_set())); + } + ClassSetItem::Union(ref mut x) => { + stack.extend(x.items.drain(..).map(ClassSet::Item)); + } + }, + ClassSet::BinaryOp(ref mut op) => { + stack.push(mem::replace(&mut op.lhs, empty_set())); + stack.push(mem::replace(&mut op.rhs, empty_set())); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // We use a thread with an explicit stack size to test that our destructor + // for Ast can handle arbitrarily sized expressions in constant stack + // space. In case we run on a platform without threads (WASM?), we limit + // this test to Windows/Unix. + #[test] + #[cfg(any(unix, windows))] + fn no_stack_overflow_on_drop() { + use std::thread; + + let run = || { + let span = || Span::splat(Position::new(0, 0, 0)); + let mut ast = Ast::empty(span()); + for i in 0..200 { + ast = Ast::group(Group { + span: span(), + kind: GroupKind::CaptureIndex(i), + ast: Box::new(ast), + }); + } + assert!(!ast.is_empty()); + }; + + // We run our test on a thread with a small stack size so we can + // force the issue more easily. + // + // NOTE(2023-03-21): It turns out that some platforms (like FreeBSD) + // will just barf with very small stack sizes. So we bump this up a bit + // to give more room to breath. When I did this, I confirmed that if + // I remove the custom `Drop` impl for `Ast`, then this test does + // indeed still fail with a stack overflow. (At the time of writing, I + // had to bump it all the way up to 32K before the test would pass even + // without the custom `Drop` impl. So 16K seems like a safe number + // here.) + // + // See: https://github.com/rust-lang/regex/issues/967 + thread::Builder::new() + .stack_size(16 << 10) + .spawn(run) + .unwrap() + .join() + .unwrap(); + } + + // This tests that our `Ast` has a reasonable size. This isn't a hard rule + // and it can be increased if given a good enough reason. But this test + // exists because the size of `Ast` was at one point over 200 bytes on a + // 64-bit target. Wow. + #[test] + fn ast_size() { + let max = 2 * core::mem::size_of::(); + let size = core::mem::size_of::(); + assert!( + size <= max, + "Ast size of {size} bytes is bigger than suggested max {max}", + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/parse.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/parse.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdaab72283857aeddcb798db35babca6fbc8f247 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/parse.rs @@ -0,0 +1,6377 @@ +/*! +This module provides a regular expression parser. +*/ + +use core::{ + borrow::Borrow, + cell::{Cell, RefCell}, + mem, +}; + +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ + ast::{self, Ast, Position, Span}, + either::Either, + is_escapeable_character, is_meta_character, +}; + +type Result = core::result::Result; + +/// A primitive is an expression with no sub-expressions. This includes +/// literals, assertions and non-set character classes. This representation +/// is used as intermediate state in the parser. +/// +/// This does not include ASCII character classes, since they can only appear +/// within a set character class. +#[derive(Clone, Debug, Eq, PartialEq)] +enum Primitive { + Literal(ast::Literal), + Assertion(ast::Assertion), + Dot(Span), + Perl(ast::ClassPerl), + Unicode(ast::ClassUnicode), +} + +impl Primitive { + /// Return the span of this primitive. + fn span(&self) -> &Span { + match *self { + Primitive::Literal(ref x) => &x.span, + Primitive::Assertion(ref x) => &x.span, + Primitive::Dot(ref span) => span, + Primitive::Perl(ref x) => &x.span, + Primitive::Unicode(ref x) => &x.span, + } + } + + /// Convert this primitive into a proper AST. + fn into_ast(self) -> Ast { + match self { + Primitive::Literal(lit) => Ast::literal(lit), + Primitive::Assertion(assert) => Ast::assertion(assert), + Primitive::Dot(span) => Ast::dot(span), + Primitive::Perl(cls) => Ast::class_perl(cls), + Primitive::Unicode(cls) => Ast::class_unicode(cls), + } + } + + /// Convert this primitive into an item in a character class. + /// + /// If this primitive is not a legal item (i.e., an assertion or a dot), + /// then return an error. + fn into_class_set_item>( + self, + p: &ParserI<'_, P>, + ) -> Result { + use self::Primitive::*; + use crate::ast::ClassSetItem; + + match self { + Literal(lit) => Ok(ClassSetItem::Literal(lit)), + Perl(cls) => Ok(ClassSetItem::Perl(cls)), + Unicode(cls) => Ok(ClassSetItem::Unicode(cls)), + x => Err(p.error(*x.span(), ast::ErrorKind::ClassEscapeInvalid)), + } + } + + /// Convert this primitive into a literal in a character class. In + /// particular, literals are the only valid items that can appear in + /// ranges. + /// + /// If this primitive is not a legal item (i.e., a class, assertion or a + /// dot), then return an error. + fn into_class_literal>( + self, + p: &ParserI<'_, P>, + ) -> Result { + use self::Primitive::*; + + match self { + Literal(lit) => Ok(lit), + x => Err(p.error(*x.span(), ast::ErrorKind::ClassRangeLiteral)), + } + } +} + +/// Returns true if the given character is a hexadecimal digit. +fn is_hex(c: char) -> bool { + ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') +} + +/// Returns true if the given character is a valid in a capture group name. +/// +/// If `first` is true, then `c` is treated as the first character in the +/// group name (which must be alphabetic or underscore). +fn is_capture_char(c: char, first: bool) -> bool { + if first { + c == '_' || c.is_alphabetic() + } else { + c == '_' || c == '.' || c == '[' || c == ']' || c.is_alphanumeric() + } +} + +/// A builder for a regular expression parser. +/// +/// This builder permits modifying configuration options for the parser. +#[derive(Clone, Debug)] +pub struct ParserBuilder { + ignore_whitespace: bool, + nest_limit: u32, + octal: bool, + empty_min_range: bool, +} + +impl Default for ParserBuilder { + fn default() -> ParserBuilder { + ParserBuilder::new() + } +} + +impl ParserBuilder { + /// Create a new parser builder with a default configuration. + pub fn new() -> ParserBuilder { + ParserBuilder { + ignore_whitespace: false, + nest_limit: 250, + octal: false, + empty_min_range: false, + } + } + + /// Build a parser from this configuration with the given pattern. + pub fn build(&self) -> Parser { + Parser { + pos: Cell::new(Position { offset: 0, line: 1, column: 1 }), + capture_index: Cell::new(0), + nest_limit: self.nest_limit, + octal: self.octal, + empty_min_range: self.empty_min_range, + initial_ignore_whitespace: self.ignore_whitespace, + ignore_whitespace: Cell::new(self.ignore_whitespace), + comments: RefCell::new(vec![]), + stack_group: RefCell::new(vec![]), + stack_class: RefCell::new(vec![]), + capture_names: RefCell::new(vec![]), + scratch: RefCell::new(String::new()), + } + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is allowed + /// to be. If the AST exceeds the given limit (e.g., with too many nested + /// groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an `Ast` using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire AST is parsed. Therefore, + /// if callers want to put a limit on the amount of heap space used, then + /// they should impose a limit on the length, in bytes, of the concrete + /// pattern string. In particular, this is viable since this parser + /// implementation will limit itself to heap space proportional to the + /// length of the pattern string. + /// + /// Note that a nest limit of `0` will return a nest limit error for most + /// patterns but not all. For example, a nest limit of `0` permits `a` but + /// not `ab`, since `ab` requires a concatenation, which results in a nest + /// depth of `1`. In general, a nest limit is not something that manifests + /// in an obvious way in the concrete syntax, therefore, it should not be + /// used in a granular way. + pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { + self.nest_limit = limit; + self + } + + /// Whether to support octal syntax or not. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints in + /// a regular expression. For example, `a`, `\x61`, `\u0061` and + /// `\141` are all equivalent regular expressions, where the last example + /// shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, it does + /// make good error messages harder. That is, in PCRE based regex engines, + /// syntax like `\0` invokes a backreference, which is explicitly + /// unsupported in Rust's regex engine. However, many users expect it to + /// be supported. Therefore, when octal support is disabled, the error + /// message will explicitly mention that backreferences aren't supported. + /// + /// Octal syntax is disabled by default. + pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { + self.octal = yes; + self + } + + /// Enable verbose mode in the regular expression. + /// + /// When enabled, verbose mode permits insignificant whitespace in many + /// places in the regular expression, as well as comments. Comments are + /// started using `#` and continue until the end of the line. + /// + /// By default, this is disabled. It may be selectively enabled in the + /// regular expression by using the `x` flag regardless of this setting. + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { + self.ignore_whitespace = yes; + self + } + + /// Allow using `{,n}` as an equivalent to `{0,n}`. + /// + /// When enabled, the parser accepts `{,n}` as valid syntax for `{0,n}`. + /// Most regular expression engines don't support the `{,n}` syntax, but + /// some others do it, namely Python's `re` library. + /// + /// This is disabled by default. + pub fn empty_min_range(&mut self, yes: bool) -> &mut ParserBuilder { + self.empty_min_range = yes; + self + } +} + +/// A regular expression parser. +/// +/// This parses a string representation of a regular expression into an +/// abstract syntax tree. The size of the tree is proportional to the length +/// of the regular expression pattern. +/// +/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. +#[derive(Clone, Debug)] +pub struct Parser { + /// The current position of the parser. + pos: Cell, + /// The current capture index. + capture_index: Cell, + /// The maximum number of open parens/brackets allowed. If the parser + /// exceeds this number, then an error is returned. + nest_limit: u32, + /// Whether to support octal syntax or not. When `false`, the parser will + /// return an error helpfully pointing out that backreferences are not + /// supported. + octal: bool, + /// The initial setting for `ignore_whitespace` as provided by + /// `ParserBuilder`. It is used when resetting the parser's state. + initial_ignore_whitespace: bool, + /// Whether the parser supports `{,n}` repetitions as an equivalent to + /// `{0,n}.` + empty_min_range: bool, + /// Whether whitespace should be ignored. When enabled, comments are + /// also permitted. + ignore_whitespace: Cell, + /// A list of comments, in order of appearance. + comments: RefCell>, + /// A stack of grouped sub-expressions, including alternations. + stack_group: RefCell>, + /// A stack of nested character classes. This is only non-empty when + /// parsing a class. + stack_class: RefCell>, + /// A sorted sequence of capture names. This is used to detect duplicate + /// capture names and report an error if one is detected. + capture_names: RefCell>, + /// A scratch buffer used in various places. Mostly this is used to + /// accumulate relevant characters from parts of a pattern. + scratch: RefCell, +} + +/// ParserI is the internal parser implementation. +/// +/// We use this separate type so that we can carry the provided pattern string +/// along with us. In particular, a `Parser` internal state is not tied to any +/// one pattern, but `ParserI` is. +/// +/// This type also lets us use `ParserI<&Parser>` in production code while +/// retaining the convenience of `ParserI` for tests, which sometimes +/// work against the internal interface of the parser. +#[derive(Clone, Debug)] +struct ParserI<'s, P> { + /// The parser state/configuration. + parser: P, + /// The full regular expression provided by the user. + pattern: &'s str, +} + +/// GroupState represents a single stack frame while parsing nested groups +/// and alternations. Each frame records the state up to an opening parenthesis +/// or a alternating bracket `|`. +#[derive(Clone, Debug)] +enum GroupState { + /// This state is pushed whenever an opening group is found. + Group { + /// The concatenation immediately preceding the opening group. + concat: ast::Concat, + /// The group that has been opened. Its sub-AST is always empty. + group: ast::Group, + /// Whether this group has the `x` flag enabled or not. + ignore_whitespace: bool, + }, + /// This state is pushed whenever a new alternation branch is found. If + /// an alternation branch is found and this state is at the top of the + /// stack, then this state should be modified to include the new + /// alternation. + Alternation(ast::Alternation), +} + +/// ClassState represents a single stack frame while parsing character classes. +/// Each frame records the state up to an intersection, difference, symmetric +/// difference or nested class. +/// +/// Note that a parser's character class stack is only non-empty when parsing +/// a character class. In all other cases, it is empty. +#[derive(Clone, Debug)] +enum ClassState { + /// This state is pushed whenever an opening bracket is found. + Open { + /// The union of class items immediately preceding this class. + union: ast::ClassSetUnion, + /// The class that has been opened. Typically this just corresponds + /// to the `[`, but it can also include `[^` since `^` indicates + /// negation of the class. + set: ast::ClassBracketed, + }, + /// This state is pushed when a operator is seen. When popped, the stored + /// set becomes the left hand side of the operator. + Op { + /// The type of the operation, i.e., &&, -- or ~~. + kind: ast::ClassSetBinaryOpKind, + /// The left-hand side of the operator. + lhs: ast::ClassSet, + }, +} + +impl Parser { + /// Create a new parser with a default configuration. + /// + /// The parser can be run with either the `parse` or `parse_with_comments` + /// methods. The parse methods return an abstract syntax tree. + /// + /// To set configuration options on the parser, use [`ParserBuilder`]. + pub fn new() -> Parser { + ParserBuilder::new().build() + } + + /// Parse the regular expression into an abstract syntax tree. + pub fn parse(&mut self, pattern: &str) -> Result { + ParserI::new(self, pattern).parse() + } + + /// Parse the regular expression and return an abstract syntax tree with + /// all of the comments found in the pattern. + pub fn parse_with_comments( + &mut self, + pattern: &str, + ) -> Result { + ParserI::new(self, pattern).parse_with_comments() + } + + /// Reset the internal state of a parser. + /// + /// This is called at the beginning of every parse. This prevents the + /// parser from running with inconsistent state (say, if a previous + /// invocation returned an error and the parser is reused). + fn reset(&self) { + // These settings should be in line with the construction + // in `ParserBuilder::build`. + self.pos.set(Position { offset: 0, line: 1, column: 1 }); + self.ignore_whitespace.set(self.initial_ignore_whitespace); + self.comments.borrow_mut().clear(); + self.stack_group.borrow_mut().clear(); + self.stack_class.borrow_mut().clear(); + } +} + +impl<'s, P: Borrow> ParserI<'s, P> { + /// Build an internal parser from a parser configuration and a pattern. + fn new(parser: P, pattern: &'s str) -> ParserI<'s, P> { + ParserI { parser, pattern } + } + + /// Return a reference to the parser state. + fn parser(&self) -> &Parser { + self.parser.borrow() + } + + /// Return a reference to the pattern being parsed. + fn pattern(&self) -> &str { + self.pattern + } + + /// Create a new error with the given span and error type. + fn error(&self, span: Span, kind: ast::ErrorKind) -> ast::Error { + ast::Error { kind, pattern: self.pattern().to_string(), span } + } + + /// Return the current offset of the parser. + /// + /// The offset starts at `0` from the beginning of the regular expression + /// pattern string. + fn offset(&self) -> usize { + self.parser().pos.get().offset + } + + /// Return the current line number of the parser. + /// + /// The line number starts at `1`. + fn line(&self) -> usize { + self.parser().pos.get().line + } + + /// Return the current column of the parser. + /// + /// The column number starts at `1` and is reset whenever a `\n` is seen. + fn column(&self) -> usize { + self.parser().pos.get().column + } + + /// Return the next capturing index. Each subsequent call increments the + /// internal index. + /// + /// The span given should correspond to the location of the opening + /// parenthesis. + /// + /// If the capture limit is exceeded, then an error is returned. + fn next_capture_index(&self, span: Span) -> Result { + let current = self.parser().capture_index.get(); + let i = current.checked_add(1).ok_or_else(|| { + self.error(span, ast::ErrorKind::CaptureLimitExceeded) + })?; + self.parser().capture_index.set(i); + Ok(i) + } + + /// Adds the given capture name to this parser. If this capture name has + /// already been used, then an error is returned. + fn add_capture_name(&self, cap: &ast::CaptureName) -> Result<()> { + let mut names = self.parser().capture_names.borrow_mut(); + match names + .binary_search_by_key(&cap.name.as_str(), |c| c.name.as_str()) + { + Err(i) => { + names.insert(i, cap.clone()); + Ok(()) + } + Ok(i) => Err(self.error( + cap.span, + ast::ErrorKind::GroupNameDuplicate { original: names[i].span }, + )), + } + } + + /// Return whether the parser should ignore whitespace or not. + fn ignore_whitespace(&self) -> bool { + self.parser().ignore_whitespace.get() + } + + /// Return the character at the current position of the parser. + /// + /// This panics if the current position does not point to a valid char. + fn char(&self) -> char { + self.char_at(self.offset()) + } + + /// Return the character at the given position. + /// + /// This panics if the given position does not point to a valid char. + fn char_at(&self, i: usize) -> char { + self.pattern()[i..] + .chars() + .next() + .unwrap_or_else(|| panic!("expected char at offset {i}")) + } + + /// Bump the parser to the next Unicode scalar value. + /// + /// If the end of the input has been reached, then `false` is returned. + fn bump(&self) -> bool { + if self.is_eof() { + return false; + } + let Position { mut offset, mut line, mut column } = self.pos(); + if self.char() == '\n' { + line = line.checked_add(1).unwrap(); + column = 1; + } else { + column = column.checked_add(1).unwrap(); + } + offset += self.char().len_utf8(); + self.parser().pos.set(Position { offset, line, column }); + self.pattern()[self.offset()..].chars().next().is_some() + } + + /// If the substring starting at the current position of the parser has + /// the given prefix, then bump the parser to the character immediately + /// following the prefix and return true. Otherwise, don't bump the parser + /// and return false. + fn bump_if(&self, prefix: &str) -> bool { + if self.pattern()[self.offset()..].starts_with(prefix) { + for _ in 0..prefix.chars().count() { + self.bump(); + } + true + } else { + false + } + } + + /// Returns true if and only if the parser is positioned at a look-around + /// prefix. The conditions under which this returns true must always + /// correspond to a regular expression that would otherwise be consider + /// invalid. + /// + /// This should only be called immediately after parsing the opening of + /// a group or a set of flags. + fn is_lookaround_prefix(&self) -> bool { + self.bump_if("?=") + || self.bump_if("?!") + || self.bump_if("?<=") + || self.bump_if("? bool { + if !self.bump() { + return false; + } + self.bump_space(); + !self.is_eof() + } + + /// If the `x` flag is enabled (i.e., whitespace insensitivity with + /// comments), then this will advance the parser through all whitespace + /// and comments to the next non-whitespace non-comment byte. + /// + /// If the `x` flag is disabled, then this is a no-op. + /// + /// This should be used selectively throughout the parser where + /// arbitrary whitespace is permitted when the `x` flag is enabled. For + /// example, `{ 5 , 6}` is equivalent to `{5,6}`. + fn bump_space(&self) { + if !self.ignore_whitespace() { + return; + } + while !self.is_eof() { + if self.char().is_whitespace() { + self.bump(); + } else if self.char() == '#' { + let start = self.pos(); + let mut comment_text = String::new(); + self.bump(); + while !self.is_eof() { + let c = self.char(); + self.bump(); + if c == '\n' { + break; + } + comment_text.push(c); + } + let comment = ast::Comment { + span: Span::new(start, self.pos()), + comment: comment_text, + }; + self.parser().comments.borrow_mut().push(comment); + } else { + break; + } + } + } + + /// Peek at the next character in the input without advancing the parser. + /// + /// If the input has been exhausted, then this returns `None`. + fn peek(&self) -> Option { + if self.is_eof() { + return None; + } + self.pattern()[self.offset() + self.char().len_utf8()..].chars().next() + } + + /// Like peek, but will ignore spaces when the parser is in whitespace + /// insensitive mode. + fn peek_space(&self) -> Option { + if !self.ignore_whitespace() { + return self.peek(); + } + if self.is_eof() { + return None; + } + let mut start = self.offset() + self.char().len_utf8(); + let mut in_comment = false; + for (i, c) in self.pattern()[start..].char_indices() { + if c.is_whitespace() { + continue; + } else if !in_comment && c == '#' { + in_comment = true; + } else if in_comment && c == '\n' { + in_comment = false; + } else { + start += i; + break; + } + } + self.pattern()[start..].chars().next() + } + + /// Returns true if the next call to `bump` would return false. + fn is_eof(&self) -> bool { + self.offset() == self.pattern().len() + } + + /// Return the current position of the parser, which includes the offset, + /// line and column. + fn pos(&self) -> Position { + self.parser().pos.get() + } + + /// Create a span at the current position of the parser. Both the start + /// and end of the span are set. + fn span(&self) -> Span { + Span::splat(self.pos()) + } + + /// Create a span that covers the current character. + fn span_char(&self) -> Span { + let mut next = Position { + offset: self.offset().checked_add(self.char().len_utf8()).unwrap(), + line: self.line(), + column: self.column().checked_add(1).unwrap(), + }; + if self.char() == '\n' { + next.line += 1; + next.column = 1; + } + Span::new(self.pos(), next) + } + + /// Parse and push a single alternation on to the parser's internal stack. + /// If the top of the stack already has an alternation, then add to that + /// instead of pushing a new one. + /// + /// The concatenation given corresponds to a single alternation branch. + /// The concatenation returned starts the next branch and is empty. + /// + /// This assumes the parser is currently positioned at `|` and will advance + /// the parser to the character following `|`. + #[inline(never)] + fn push_alternate(&self, mut concat: ast::Concat) -> Result { + assert_eq!(self.char(), '|'); + concat.span.end = self.pos(); + self.push_or_add_alternation(concat); + self.bump(); + Ok(ast::Concat { span: self.span(), asts: vec![] }) + } + + /// Pushes or adds the given branch of an alternation to the parser's + /// internal stack of state. + fn push_or_add_alternation(&self, concat: ast::Concat) { + use self::GroupState::*; + + let mut stack = self.parser().stack_group.borrow_mut(); + if let Some(&mut Alternation(ref mut alts)) = stack.last_mut() { + alts.asts.push(concat.into_ast()); + return; + } + stack.push(Alternation(ast::Alternation { + span: Span::new(concat.span.start, self.pos()), + asts: vec![concat.into_ast()], + })); + } + + /// Parse and push a group AST (and its parent concatenation) on to the + /// parser's internal stack. Return a fresh concatenation corresponding + /// to the group's sub-AST. + /// + /// If a set of flags was found (with no group), then the concatenation + /// is returned with that set of flags added. + /// + /// This assumes that the parser is currently positioned on the opening + /// parenthesis. It advances the parser to the character at the start + /// of the sub-expression (or adjoining expression). + /// + /// If there was a problem parsing the start of the group, then an error + /// is returned. + #[inline(never)] + fn push_group(&self, mut concat: ast::Concat) -> Result { + assert_eq!(self.char(), '('); + match self.parse_group()? { + Either::Left(set) => { + let ignore = set.flags.flag_state(ast::Flag::IgnoreWhitespace); + if let Some(v) = ignore { + self.parser().ignore_whitespace.set(v); + } + + concat.asts.push(Ast::flags(set)); + Ok(concat) + } + Either::Right(group) => { + let old_ignore_whitespace = self.ignore_whitespace(); + let new_ignore_whitespace = group + .flags() + .and_then(|f| f.flag_state(ast::Flag::IgnoreWhitespace)) + .unwrap_or(old_ignore_whitespace); + self.parser().stack_group.borrow_mut().push( + GroupState::Group { + concat, + group, + ignore_whitespace: old_ignore_whitespace, + }, + ); + self.parser().ignore_whitespace.set(new_ignore_whitespace); + Ok(ast::Concat { span: self.span(), asts: vec![] }) + } + } + } + + /// Pop a group AST from the parser's internal stack and set the group's + /// AST to the given concatenation. Return the concatenation containing + /// the group. + /// + /// This assumes that the parser is currently positioned on the closing + /// parenthesis and advances the parser to the character following the `)`. + /// + /// If no such group could be popped, then an unopened group error is + /// returned. + #[inline(never)] + fn pop_group(&self, mut group_concat: ast::Concat) -> Result { + use self::GroupState::*; + + assert_eq!(self.char(), ')'); + let mut stack = self.parser().stack_group.borrow_mut(); + let (mut prior_concat, mut group, ignore_whitespace, alt) = match stack + .pop() + { + Some(Group { concat, group, ignore_whitespace }) => { + (concat, group, ignore_whitespace, None) + } + Some(Alternation(alt)) => match stack.pop() { + Some(Group { concat, group, ignore_whitespace }) => { + (concat, group, ignore_whitespace, Some(alt)) + } + None | Some(Alternation(_)) => { + return Err(self.error( + self.span_char(), + ast::ErrorKind::GroupUnopened, + )); + } + }, + None => { + return Err(self + .error(self.span_char(), ast::ErrorKind::GroupUnopened)); + } + }; + self.parser().ignore_whitespace.set(ignore_whitespace); + group_concat.span.end = self.pos(); + self.bump(); + group.span.end = self.pos(); + match alt { + Some(mut alt) => { + alt.span.end = group_concat.span.end; + alt.asts.push(group_concat.into_ast()); + group.ast = Box::new(alt.into_ast()); + } + None => { + group.ast = Box::new(group_concat.into_ast()); + } + } + prior_concat.asts.push(Ast::group(group)); + Ok(prior_concat) + } + + /// Pop the last state from the parser's internal stack, if it exists, and + /// add the given concatenation to it. There either must be no state or a + /// single alternation item on the stack. Any other scenario produces an + /// error. + /// + /// This assumes that the parser has advanced to the end. + #[inline(never)] + fn pop_group_end(&self, mut concat: ast::Concat) -> Result { + concat.span.end = self.pos(); + let mut stack = self.parser().stack_group.borrow_mut(); + let ast = match stack.pop() { + None => Ok(concat.into_ast()), + Some(GroupState::Alternation(mut alt)) => { + alt.span.end = self.pos(); + alt.asts.push(concat.into_ast()); + Ok(Ast::alternation(alt)) + } + Some(GroupState::Group { group, .. }) => { + return Err( + self.error(group.span, ast::ErrorKind::GroupUnclosed) + ); + } + }; + // If we try to pop again, there should be nothing. + match stack.pop() { + None => ast, + Some(GroupState::Alternation(_)) => { + // This unreachable is unfortunate. This case can't happen + // because the only way we can be here is if there were two + // `GroupState::Alternation`s adjacent in the parser's stack, + // which we guarantee to never happen because we never push a + // `GroupState::Alternation` if one is already at the top of + // the stack. + unreachable!() + } + Some(GroupState::Group { group, .. }) => { + Err(self.error(group.span, ast::ErrorKind::GroupUnclosed)) + } + } + } + + /// Parse the opening of a character class and push the current class + /// parsing context onto the parser's stack. This assumes that the parser + /// is positioned at an opening `[`. The given union should correspond to + /// the union of set items built up before seeing the `[`. + /// + /// If there was a problem parsing the opening of the class, then an error + /// is returned. Otherwise, a new union of set items for the class is + /// returned (which may be populated with either a `]` or a `-`). + #[inline(never)] + fn push_class_open( + &self, + parent_union: ast::ClassSetUnion, + ) -> Result { + assert_eq!(self.char(), '['); + + let (nested_set, nested_union) = self.parse_set_class_open()?; + self.parser() + .stack_class + .borrow_mut() + .push(ClassState::Open { union: parent_union, set: nested_set }); + Ok(nested_union) + } + + /// Parse the end of a character class set and pop the character class + /// parser stack. The union given corresponds to the last union built + /// before seeing the closing `]`. The union returned corresponds to the + /// parent character class set with the nested class added to it. + /// + /// This assumes that the parser is positioned at a `]` and will advance + /// the parser to the byte immediately following the `]`. + /// + /// If the stack is empty after popping, then this returns the final + /// "top-level" character class AST (where a "top-level" character class + /// is one that is not nested inside any other character class). + /// + /// If there is no corresponding opening bracket on the parser's stack, + /// then an error is returned. + #[inline(never)] + fn pop_class( + &self, + nested_union: ast::ClassSetUnion, + ) -> Result> { + assert_eq!(self.char(), ']'); + + let item = ast::ClassSet::Item(nested_union.into_item()); + let prevset = self.pop_class_op(item); + let mut stack = self.parser().stack_class.borrow_mut(); + match stack.pop() { + None => { + // We can never observe an empty stack: + // + // 1) We are guaranteed to start with a non-empty stack since + // the character class parser is only initiated when it sees + // a `[`. + // 2) If we ever observe an empty stack while popping after + // seeing a `]`, then we signal the character class parser + // to terminate. + panic!("unexpected empty character class stack") + } + Some(ClassState::Op { .. }) => { + // This panic is unfortunate, but this case is impossible + // since we already popped the Op state if one exists above. + // Namely, every push to the class parser stack is guarded by + // whether an existing Op is already on the top of the stack. + // If it is, the existing Op is modified. That is, the stack + // can never have consecutive Op states. + panic!("unexpected ClassState::Op") + } + Some(ClassState::Open { mut union, mut set }) => { + self.bump(); + set.span.end = self.pos(); + set.kind = prevset; + if stack.is_empty() { + Ok(Either::Right(set)) + } else { + union.push(ast::ClassSetItem::Bracketed(Box::new(set))); + Ok(Either::Left(union)) + } + } + } + } + + /// Return an "unclosed class" error whose span points to the most + /// recently opened class. + /// + /// This should only be called while parsing a character class. + #[inline(never)] + fn unclosed_class_error(&self) -> ast::Error { + for state in self.parser().stack_class.borrow().iter().rev() { + if let ClassState::Open { ref set, .. } = *state { + return self.error(set.span, ast::ErrorKind::ClassUnclosed); + } + } + // We are guaranteed to have a non-empty stack with at least + // one open bracket, so we should never get here. + panic!("no open character class found") + } + + /// Push the current set of class items on to the class parser's stack as + /// the left hand side of the given operator. + /// + /// A fresh set union is returned, which should be used to build the right + /// hand side of this operator. + #[inline(never)] + fn push_class_op( + &self, + next_kind: ast::ClassSetBinaryOpKind, + next_union: ast::ClassSetUnion, + ) -> ast::ClassSetUnion { + let item = ast::ClassSet::Item(next_union.into_item()); + let new_lhs = self.pop_class_op(item); + self.parser() + .stack_class + .borrow_mut() + .push(ClassState::Op { kind: next_kind, lhs: new_lhs }); + ast::ClassSetUnion { span: self.span(), items: vec![] } + } + + /// Pop a character class set from the character class parser stack. If the + /// top of the stack is just an item (not an operation), then return the + /// given set unchanged. If the top of the stack is an operation, then the + /// given set will be used as the rhs of the operation on the top of the + /// stack. In that case, the binary operation is returned as a set. + #[inline(never)] + fn pop_class_op(&self, rhs: ast::ClassSet) -> ast::ClassSet { + let mut stack = self.parser().stack_class.borrow_mut(); + let (kind, lhs) = match stack.pop() { + Some(ClassState::Op { kind, lhs }) => (kind, lhs), + Some(state @ ClassState::Open { .. }) => { + stack.push(state); + return rhs; + } + None => unreachable!(), + }; + let span = Span::new(lhs.span().start, rhs.span().end); + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } +} + +impl<'s, P: Borrow> ParserI<'s, P> { + /// Parse the regular expression into an abstract syntax tree. + fn parse(&self) -> Result { + self.parse_with_comments().map(|astc| astc.ast) + } + + /// Parse the regular expression and return an abstract syntax tree with + /// all of the comments found in the pattern. + fn parse_with_comments(&self) -> Result { + assert_eq!(self.offset(), 0, "parser can only be used once"); + self.parser().reset(); + let mut concat = ast::Concat { span: self.span(), asts: vec![] }; + loop { + self.bump_space(); + if self.is_eof() { + break; + } + match self.char() { + '(' => concat = self.push_group(concat)?, + ')' => concat = self.pop_group(concat)?, + '|' => concat = self.push_alternate(concat)?, + '[' => { + let class = self.parse_set_class()?; + concat.asts.push(Ast::class_bracketed(class)); + } + '?' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::ZeroOrOne, + )?; + } + '*' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::ZeroOrMore, + )?; + } + '+' => { + concat = self.parse_uncounted_repetition( + concat, + ast::RepetitionKind::OneOrMore, + )?; + } + '{' => { + concat = self.parse_counted_repetition(concat)?; + } + _ => concat.asts.push(self.parse_primitive()?.into_ast()), + } + } + let ast = self.pop_group_end(concat)?; + NestLimiter::new(self).check(&ast)?; + Ok(ast::WithComments { + ast, + comments: mem::replace( + &mut *self.parser().comments.borrow_mut(), + vec![], + ), + }) + } + + /// Parses an uncounted repetition operation. An uncounted repetition + /// operator includes ?, * and +, but does not include the {m,n} syntax. + /// The given `kind` should correspond to the operator observed by the + /// caller. + /// + /// This assumes that the parser is currently positioned at the repetition + /// operator and advances the parser to the first character after the + /// operator. (Note that the operator may include a single additional `?`, + /// which makes the operator ungreedy.) + /// + /// The caller should include the concatenation that is being built. The + /// concatenation returned includes the repetition operator applied to the + /// last expression in the given concatenation. + #[inline(never)] + fn parse_uncounted_repetition( + &self, + mut concat: ast::Concat, + kind: ast::RepetitionKind, + ) -> Result { + assert!( + self.char() == '?' || self.char() == '*' || self.char() == '+' + ); + let op_start = self.pos(); + let ast = match concat.asts.pop() { + Some(ast) => ast, + None => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + }; + match ast { + Ast::Empty(_) | Ast::Flags(_) => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + _ => {} + } + let mut greedy = true; + if self.bump() && self.char() == '?' { + greedy = false; + self.bump(); + } + concat.asts.push(Ast::repetition(ast::Repetition { + span: ast.span().with_end(self.pos()), + op: ast::RepetitionOp { + span: Span::new(op_start, self.pos()), + kind, + }, + greedy, + ast: Box::new(ast), + })); + Ok(concat) + } + + /// Parses a counted repetition operation. A counted repetition operator + /// corresponds to the {m,n} syntax, and does not include the ?, * or + + /// operators. + /// + /// This assumes that the parser is currently positioned at the opening `{` + /// and advances the parser to the first character after the operator. + /// (Note that the operator may include a single additional `?`, which + /// makes the operator ungreedy.) + /// + /// The caller should include the concatenation that is being built. The + /// concatenation returned includes the repetition operator applied to the + /// last expression in the given concatenation. + #[inline(never)] + fn parse_counted_repetition( + &self, + mut concat: ast::Concat, + ) -> Result { + assert!(self.char() == '{'); + let start = self.pos(); + let ast = match concat.asts.pop() { + Some(ast) => ast, + None => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + }; + match ast { + Ast::Empty(_) | Ast::Flags(_) => { + return Err( + self.error(self.span(), ast::ErrorKind::RepetitionMissing) + ) + } + _ => {} + } + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + let count_start = specialize_err( + self.parse_decimal(), + ast::ErrorKind::DecimalEmpty, + ast::ErrorKind::RepetitionCountDecimalEmpty, + ); + if self.is_eof() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + let range = if self.char() == ',' { + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + if self.char() != '}' { + let count_start = match count_start { + Ok(c) => c, + Err(err) + if err.kind + == ast::ErrorKind::RepetitionCountDecimalEmpty => + { + if self.parser().empty_min_range { + 0 + } else { + return Err(err); + } + } + err => err?, + }; + let count_end = specialize_err( + self.parse_decimal(), + ast::ErrorKind::DecimalEmpty, + ast::ErrorKind::RepetitionCountDecimalEmpty, + )?; + ast::RepetitionRange::Bounded(count_start, count_end) + } else { + ast::RepetitionRange::AtLeast(count_start?) + } + } else { + ast::RepetitionRange::Exactly(count_start?) + }; + + if self.is_eof() || self.char() != '}' { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::RepetitionCountUnclosed, + )); + } + + let mut greedy = true; + if self.bump_and_bump_space() && self.char() == '?' { + greedy = false; + self.bump(); + } + + let op_span = Span::new(start, self.pos()); + if !range.is_valid() { + return Err( + self.error(op_span, ast::ErrorKind::RepetitionCountInvalid) + ); + } + concat.asts.push(Ast::repetition(ast::Repetition { + span: ast.span().with_end(self.pos()), + op: ast::RepetitionOp { + span: op_span, + kind: ast::RepetitionKind::Range(range), + }, + greedy, + ast: Box::new(ast), + })); + Ok(concat) + } + + /// Parse a group (which contains a sub-expression) or a set of flags. + /// + /// If a group was found, then it is returned with an empty AST. If a set + /// of flags is found, then that set is returned. + /// + /// The parser should be positioned at the opening parenthesis. + /// + /// This advances the parser to the character before the start of the + /// sub-expression (in the case of a group) or to the closing parenthesis + /// immediately following the set of flags. + /// + /// # Errors + /// + /// If flags are given and incorrectly specified, then a corresponding + /// error is returned. + /// + /// If a capture name is given and it is incorrectly specified, then a + /// corresponding error is returned. + #[inline(never)] + fn parse_group(&self) -> Result> { + assert_eq!(self.char(), '('); + let open_span = self.span_char(); + self.bump(); + self.bump_space(); + if self.is_lookaround_prefix() { + return Err(self.error( + Span::new(open_span.start, self.span().end), + ast::ErrorKind::UnsupportedLookAround, + )); + } + let inner_span = self.span(); + let mut starts_with_p = true; + if self.bump_if("?P<") || { + starts_with_p = false; + self.bump_if("?<") + } { + let capture_index = self.next_capture_index(open_span)?; + let name = self.parse_capture_name(capture_index)?; + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::CaptureName { starts_with_p, name }, + ast: Box::new(Ast::empty(self.span())), + })) + } else if self.bump_if("?") { + if self.is_eof() { + return Err( + self.error(open_span, ast::ErrorKind::GroupUnclosed) + ); + } + let flags = self.parse_flags()?; + let char_end = self.char(); + self.bump(); + if char_end == ')' { + // We don't allow empty flags, e.g., `(?)`. We instead + // interpret it as a repetition operator missing its argument. + if flags.items.is_empty() { + return Err(self.error( + inner_span, + ast::ErrorKind::RepetitionMissing, + )); + } + Ok(Either::Left(ast::SetFlags { + span: Span { end: self.pos(), ..open_span }, + flags, + })) + } else { + assert_eq!(char_end, ':'); + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::NonCapturing(flags), + ast: Box::new(Ast::empty(self.span())), + })) + } + } else { + let capture_index = self.next_capture_index(open_span)?; + Ok(Either::Right(ast::Group { + span: open_span, + kind: ast::GroupKind::CaptureIndex(capture_index), + ast: Box::new(Ast::empty(self.span())), + })) + } + } + + /// Parses a capture group name. Assumes that the parser is positioned at + /// the first character in the name following the opening `<` (and may + /// possibly be EOF). This advances the parser to the first character + /// following the closing `>`. + /// + /// The caller must provide the capture index of the group for this name. + #[inline(never)] + fn parse_capture_name( + &self, + capture_index: u32, + ) -> Result { + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); + } + let start = self.pos(); + loop { + if self.char() == '>' { + break; + } + if !is_capture_char(self.char(), self.pos() == start) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::GroupNameInvalid, + )); + } + if !self.bump() { + break; + } + } + let end = self.pos(); + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::GroupNameUnexpectedEof)); + } + assert_eq!(self.char(), '>'); + self.bump(); + let name = &self.pattern()[start.offset..end.offset]; + if name.is_empty() { + return Err(self.error( + Span::new(start, start), + ast::ErrorKind::GroupNameEmpty, + )); + } + let capname = ast::CaptureName { + span: Span::new(start, end), + name: name.to_string(), + index: capture_index, + }; + self.add_capture_name(&capname)?; + Ok(capname) + } + + /// Parse a sequence of flags starting at the current character. + /// + /// This advances the parser to the character immediately following the + /// flags, which is guaranteed to be either `:` or `)`. + /// + /// # Errors + /// + /// If any flags are duplicated, then an error is returned. + /// + /// If the negation operator is used more than once, then an error is + /// returned. + /// + /// If no flags could be found or if the negation operation is not followed + /// by any flags, then an error is returned. + #[inline(never)] + fn parse_flags(&self) -> Result { + let mut flags = ast::Flags { span: self.span(), items: vec![] }; + let mut last_was_negation = None; + while self.char() != ':' && self.char() != ')' { + if self.char() == '-' { + last_was_negation = Some(self.span_char()); + let item = ast::FlagsItem { + span: self.span_char(), + kind: ast::FlagsItemKind::Negation, + }; + if let Some(i) = flags.add_item(item) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::FlagRepeatedNegation { + original: flags.items[i].span, + }, + )); + } + } else { + last_was_negation = None; + let item = ast::FlagsItem { + span: self.span_char(), + kind: ast::FlagsItemKind::Flag(self.parse_flag()?), + }; + if let Some(i) = flags.add_item(item) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::FlagDuplicate { + original: flags.items[i].span, + }, + )); + } + } + if !self.bump() { + return Err( + self.error(self.span(), ast::ErrorKind::FlagUnexpectedEof) + ); + } + } + if let Some(span) = last_was_negation { + return Err(self.error(span, ast::ErrorKind::FlagDanglingNegation)); + } + flags.span.end = self.pos(); + Ok(flags) + } + + /// Parse the current character as a flag. Do not advance the parser. + /// + /// # Errors + /// + /// If the flag is not recognized, then an error is returned. + #[inline(never)] + fn parse_flag(&self) -> Result { + match self.char() { + 'i' => Ok(ast::Flag::CaseInsensitive), + 'm' => Ok(ast::Flag::MultiLine), + 's' => Ok(ast::Flag::DotMatchesNewLine), + 'U' => Ok(ast::Flag::SwapGreed), + 'u' => Ok(ast::Flag::Unicode), + 'R' => Ok(ast::Flag::CRLF), + 'x' => Ok(ast::Flag::IgnoreWhitespace), + _ => { + Err(self + .error(self.span_char(), ast::ErrorKind::FlagUnrecognized)) + } + } + } + + /// Parse a primitive AST. e.g., A literal, non-set character class or + /// assertion. + /// + /// This assumes that the parser expects a primitive at the current + /// location. i.e., All other non-primitive cases have been handled. + /// For example, if the parser's position is at `|`, then `|` will be + /// treated as a literal (e.g., inside a character class). + /// + /// This advances the parser to the first character immediately following + /// the primitive. + fn parse_primitive(&self) -> Result { + match self.char() { + '\\' => self.parse_escape(), + '.' => { + let ast = Primitive::Dot(self.span_char()); + self.bump(); + Ok(ast) + } + '^' => { + let ast = Primitive::Assertion(ast::Assertion { + span: self.span_char(), + kind: ast::AssertionKind::StartLine, + }); + self.bump(); + Ok(ast) + } + '$' => { + let ast = Primitive::Assertion(ast::Assertion { + span: self.span_char(), + kind: ast::AssertionKind::EndLine, + }); + self.bump(); + Ok(ast) + } + c => { + let ast = Primitive::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c, + }); + self.bump(); + Ok(ast) + } + } + } + + /// Parse an escape sequence as a primitive AST. + /// + /// This assumes the parser is positioned at the start of the escape + /// sequence, i.e., `\`. It advances the parser to the first position + /// immediately following the escape sequence. + #[inline(never)] + fn parse_escape(&self) -> Result { + assert_eq!(self.char(), '\\'); + let start = self.pos(); + if !self.bump() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::EscapeUnexpectedEof, + )); + } + let c = self.char(); + // Put some of the more complicated routines into helpers. + match c { + '0'..='7' => { + if !self.parser().octal { + return Err(self.error( + Span::new(start, self.span_char().end), + ast::ErrorKind::UnsupportedBackreference, + )); + } + let mut lit = self.parse_octal(); + lit.span.start = start; + return Ok(Primitive::Literal(lit)); + } + '8'..='9' if !self.parser().octal => { + return Err(self.error( + Span::new(start, self.span_char().end), + ast::ErrorKind::UnsupportedBackreference, + )); + } + 'x' | 'u' | 'U' => { + let mut lit = self.parse_hex()?; + lit.span.start = start; + return Ok(Primitive::Literal(lit)); + } + 'p' | 'P' => { + let mut cls = self.parse_unicode_class()?; + cls.span.start = start; + return Ok(Primitive::Unicode(cls)); + } + 'd' | 's' | 'w' | 'D' | 'S' | 'W' => { + let mut cls = self.parse_perl_class(); + cls.span.start = start; + return Ok(Primitive::Perl(cls)); + } + _ => {} + } + + // Handle all of the one letter sequences inline. + self.bump(); + let span = Span::new(start, self.pos()); + if is_meta_character(c) { + return Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Meta, + c, + })); + } + if is_escapeable_character(c) { + return Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Superfluous, + c, + })); + } + let special = |kind, c| { + Ok(Primitive::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Special(kind), + c, + })) + }; + match c { + 'a' => special(ast::SpecialLiteralKind::Bell, '\x07'), + 'f' => special(ast::SpecialLiteralKind::FormFeed, '\x0C'), + 't' => special(ast::SpecialLiteralKind::Tab, '\t'), + 'n' => special(ast::SpecialLiteralKind::LineFeed, '\n'), + 'r' => special(ast::SpecialLiteralKind::CarriageReturn, '\r'), + 'v' => special(ast::SpecialLiteralKind::VerticalTab, '\x0B'), + 'A' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::StartText, + })), + 'z' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::EndText, + })), + 'b' => { + let mut wb = ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundary, + }; + // After a \b, we "try" to parse things like \b{start} for + // special word boundary assertions. + if !self.is_eof() && self.char() == '{' { + if let Some(kind) = + self.maybe_parse_special_word_boundary(start)? + { + wb.kind = kind; + wb.span.end = self.pos(); + } + } + Ok(Primitive::Assertion(wb)) + } + 'B' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::NotWordBoundary, + })), + '<' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundaryStartAngle, + })), + '>' => Ok(Primitive::Assertion(ast::Assertion { + span, + kind: ast::AssertionKind::WordBoundaryEndAngle, + })), + _ => Err(self.error(span, ast::ErrorKind::EscapeUnrecognized)), + } + } + + /// Attempt to parse a specialty word boundary. That is, `\b{start}`, + /// `\b{end}`, `\b{start-half}` or `\b{end-half}`. + /// + /// This is similar to `maybe_parse_ascii_class` in that, in most cases, + /// if it fails it will just return `None` with no error. This is done + /// because `\b{5}` is a valid expression and we want to let that be parsed + /// by the existing counted repetition parsing code. (I thought about just + /// invoking the counted repetition code from here, but it seemed a little + /// ham-fisted.) + /// + /// Unlike `maybe_parse_ascii_class` though, this can return an error. + /// Namely, if we definitely know it isn't a counted repetition, then we + /// return an error specific to the specialty word boundaries. + /// + /// This assumes the parser is positioned at a `{` immediately following + /// a `\b`. When `None` is returned, the parser is returned to the position + /// at which it started: pointing at a `{`. + /// + /// The position given should correspond to the start of the `\b`. + fn maybe_parse_special_word_boundary( + &self, + wb_start: Position, + ) -> Result> { + assert_eq!(self.char(), '{'); + + let is_valid_char = |c| match c { + 'A'..='Z' | 'a'..='z' | '-' => true, + _ => false, + }; + let start = self.pos(); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(wb_start, self.pos()), + ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + )); + } + let start_contents = self.pos(); + // This is one of the critical bits: if the first non-whitespace + // character isn't in [-A-Za-z] (i.e., this can't be a special word + // boundary), then we bail and let the counted repetition parser deal + // with this. + if !is_valid_char(self.char()) { + self.parser().pos.set(start); + return Ok(None); + } + + // Now collect up our chars until we see a '}'. + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + while !self.is_eof() && is_valid_char(self.char()) { + scratch.push(self.char()); + self.bump_and_bump_space(); + } + if self.is_eof() || self.char() != '}' { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::SpecialWordBoundaryUnclosed, + )); + } + let end = self.pos(); + self.bump(); + let kind = match scratch.as_str() { + "start" => ast::AssertionKind::WordBoundaryStart, + "end" => ast::AssertionKind::WordBoundaryEnd, + "start-half" => ast::AssertionKind::WordBoundaryStartHalf, + "end-half" => ast::AssertionKind::WordBoundaryEndHalf, + _ => { + return Err(self.error( + Span::new(start_contents, end), + ast::ErrorKind::SpecialWordBoundaryUnrecognized, + )) + } + }; + Ok(Some(kind)) + } + + /// Parse an octal representation of a Unicode codepoint up to 3 digits + /// long. This expects the parser to be positioned at the first octal + /// digit and advances the parser to the first character immediately + /// following the octal number. This also assumes that parsing octal + /// escapes is enabled. + /// + /// Assuming the preconditions are met, this routine can never fail. + #[inline(never)] + fn parse_octal(&self) -> ast::Literal { + assert!(self.parser().octal); + assert!('0' <= self.char() && self.char() <= '7'); + let start = self.pos(); + // Parse up to two more digits. + while self.bump() + && '0' <= self.char() + && self.char() <= '7' + && self.pos().offset - start.offset <= 2 + {} + let end = self.pos(); + let octal = &self.pattern()[start.offset..end.offset]; + // Parsing the octal should never fail since the above guarantees a + // valid number. + let codepoint = + u32::from_str_radix(octal, 8).expect("valid octal number"); + // The max value for 3 digit octal is 0777 = 511 and [0, 511] has no + // invalid Unicode scalar values. + let c = char::from_u32(codepoint).expect("Unicode scalar value"); + ast::Literal { + span: Span::new(start, end), + kind: ast::LiteralKind::Octal, + c, + } + } + + /// Parse a hex representation of a Unicode codepoint. This handles both + /// hex notations, i.e., `\xFF` and `\x{FFFF}`. This expects the parser to + /// be positioned at the `x`, `u` or `U` prefix. The parser is advanced to + /// the first character immediately following the hexadecimal literal. + #[inline(never)] + fn parse_hex(&self) -> Result { + assert!( + self.char() == 'x' || self.char() == 'u' || self.char() == 'U' + ); + + let hex_kind = match self.char() { + 'x' => ast::HexLiteralKind::X, + 'u' => ast::HexLiteralKind::UnicodeShort, + _ => ast::HexLiteralKind::UnicodeLong, + }; + if !self.bump_and_bump_space() { + return Err( + self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) + ); + } + if self.char() == '{' { + self.parse_hex_brace(hex_kind) + } else { + self.parse_hex_digits(hex_kind) + } + } + + /// Parse an N-digit hex representation of a Unicode codepoint. This + /// expects the parser to be positioned at the first digit and will advance + /// the parser to the first character immediately following the escape + /// sequence. + /// + /// The number of digits given must be 2 (for `\xNN`), 4 (for `\uNNNN`) + /// or 8 (for `\UNNNNNNNN`). + #[inline(never)] + fn parse_hex_digits( + &self, + kind: ast::HexLiteralKind, + ) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let start = self.pos(); + for i in 0..kind.digits() { + if i > 0 && !self.bump_and_bump_space() { + return Err(self + .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); + } + if !is_hex(self.char()) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::EscapeHexInvalidDigit, + )); + } + scratch.push(self.char()); + } + // The final bump just moves the parser past the literal, which may + // be EOF. + self.bump_and_bump_space(); + let end = self.pos(); + let hex = scratch.as_str(); + match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { + None => Err(self.error( + Span::new(start, end), + ast::ErrorKind::EscapeHexInvalid, + )), + Some(c) => Ok(ast::Literal { + span: Span::new(start, end), + kind: ast::LiteralKind::HexFixed(kind), + c, + }), + } + } + + /// Parse a hex representation of any Unicode scalar value. This expects + /// the parser to be positioned at the opening brace `{` and will advance + /// the parser to the first character following the closing brace `}`. + #[inline(never)] + fn parse_hex_brace( + &self, + kind: ast::HexLiteralKind, + ) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let brace_pos = self.pos(); + let start = self.span_char().end; + while self.bump_and_bump_space() && self.char() != '}' { + if !is_hex(self.char()) { + return Err(self.error( + self.span_char(), + ast::ErrorKind::EscapeHexInvalidDigit, + )); + } + scratch.push(self.char()); + } + if self.is_eof() { + return Err(self.error( + Span::new(brace_pos, self.pos()), + ast::ErrorKind::EscapeUnexpectedEof, + )); + } + let end = self.pos(); + let hex = scratch.as_str(); + assert_eq!(self.char(), '}'); + self.bump_and_bump_space(); + + if hex.is_empty() { + return Err(self.error( + Span::new(brace_pos, self.pos()), + ast::ErrorKind::EscapeHexEmpty, + )); + } + match u32::from_str_radix(hex, 16).ok().and_then(char::from_u32) { + None => Err(self.error( + Span::new(start, end), + ast::ErrorKind::EscapeHexInvalid, + )), + Some(c) => Ok(ast::Literal { + span: Span::new(start, self.pos()), + kind: ast::LiteralKind::HexBrace(kind), + c, + }), + } + } + + /// Parse a decimal number into a u32 while trimming leading and trailing + /// whitespace. + /// + /// This expects the parser to be positioned at the first position where + /// a decimal digit could occur. This will advance the parser to the byte + /// immediately following the last contiguous decimal digit. + /// + /// If no decimal digit could be found or if there was a problem parsing + /// the complete set of digits into a u32, then an error is returned. + fn parse_decimal(&self) -> Result { + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + while !self.is_eof() && self.char().is_whitespace() { + self.bump(); + } + let start = self.pos(); + while !self.is_eof() && '0' <= self.char() && self.char() <= '9' { + scratch.push(self.char()); + self.bump_and_bump_space(); + } + let span = Span::new(start, self.pos()); + while !self.is_eof() && self.char().is_whitespace() { + self.bump_and_bump_space(); + } + let digits = scratch.as_str(); + if digits.is_empty() { + return Err(self.error(span, ast::ErrorKind::DecimalEmpty)); + } + match u32::from_str_radix(digits, 10).ok() { + Some(n) => Ok(n), + None => Err(self.error(span, ast::ErrorKind::DecimalInvalid)), + } + } + + /// Parse a standard character class consisting primarily of characters or + /// character ranges, but can also contain nested character classes of + /// any type (sans `.`). + /// + /// This assumes the parser is positioned at the opening `[`. If parsing + /// is successful, then the parser is advanced to the position immediately + /// following the closing `]`. + #[inline(never)] + fn parse_set_class(&self) -> Result { + assert_eq!(self.char(), '['); + + let mut union = + ast::ClassSetUnion { span: self.span(), items: vec![] }; + loop { + self.bump_space(); + if self.is_eof() { + return Err(self.unclosed_class_error()); + } + match self.char() { + '[' => { + // If we've already parsed the opening bracket, then + // attempt to treat this as the beginning of an ASCII + // class. If ASCII class parsing fails, then the parser + // backs up to `[`. + if !self.parser().stack_class.borrow().is_empty() { + if let Some(cls) = self.maybe_parse_ascii_class() { + union.push(ast::ClassSetItem::Ascii(cls)); + continue; + } + } + union = self.push_class_open(union)?; + } + ']' => match self.pop_class(union)? { + Either::Left(nested_union) => { + union = nested_union; + } + Either::Right(class) => return Ok(class), + }, + '&' if self.peek() == Some('&') => { + assert!(self.bump_if("&&")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::Intersection, + union, + ); + } + '-' if self.peek() == Some('-') => { + assert!(self.bump_if("--")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::Difference, + union, + ); + } + '~' if self.peek() == Some('~') => { + assert!(self.bump_if("~~")); + union = self.push_class_op( + ast::ClassSetBinaryOpKind::SymmetricDifference, + union, + ); + } + _ => { + union.push(self.parse_set_class_range()?); + } + } + } + } + + /// Parse a single primitive item in a character class set. The item to + /// be parsed can either be one of a simple literal character, a range + /// between two simple literal characters or a "primitive" character + /// class like \w or \p{Greek}. + /// + /// If an invalid escape is found, or if a character class is found where + /// a simple literal is expected (e.g., in a range), then an error is + /// returned. + #[inline(never)] + fn parse_set_class_range(&self) -> Result { + let prim1 = self.parse_set_class_item()?; + self.bump_space(); + if self.is_eof() { + return Err(self.unclosed_class_error()); + } + // If the next char isn't a `-`, then we don't have a range. + // There are two exceptions. If the char after a `-` is a `]`, then + // `-` is interpreted as a literal `-`. Alternatively, if the char + // after a `-` is a `-`, then `--` corresponds to a "difference" + // operation. + if self.char() != '-' + || self.peek_space() == Some(']') + || self.peek_space() == Some('-') + { + return prim1.into_class_set_item(self); + } + // OK, now we're parsing a range, so bump past the `-` and parse the + // second half of the range. + if !self.bump_and_bump_space() { + return Err(self.unclosed_class_error()); + } + let prim2 = self.parse_set_class_item()?; + let range = ast::ClassSetRange { + span: Span::new(prim1.span().start, prim2.span().end), + start: prim1.into_class_literal(self)?, + end: prim2.into_class_literal(self)?, + }; + if !range.is_valid() { + return Err( + self.error(range.span, ast::ErrorKind::ClassRangeInvalid) + ); + } + Ok(ast::ClassSetItem::Range(range)) + } + + /// Parse a single item in a character class as a primitive, where the + /// primitive either consists of a verbatim literal or a single escape + /// sequence. + /// + /// This assumes the parser is positioned at the beginning of a primitive, + /// and advances the parser to the first position after the primitive if + /// successful. + /// + /// Note that it is the caller's responsibility to report an error if an + /// illegal primitive was parsed. + #[inline(never)] + fn parse_set_class_item(&self) -> Result { + if self.char() == '\\' { + self.parse_escape() + } else { + let x = Primitive::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: self.char(), + }); + self.bump(); + Ok(x) + } + } + + /// Parses the opening of a character class set. This includes the opening + /// bracket along with `^` if present to indicate negation. This also + /// starts parsing the opening set of unioned items if applicable, since + /// there are special rules applied to certain characters in the opening + /// of a character class. For example, `[^]]` is the class of all + /// characters not equal to `]`. (`]` would need to be escaped in any other + /// position.) Similarly for `-`. + /// + /// In all cases, the op inside the returned `ast::ClassBracketed` is an + /// empty union. This empty union should be replaced with the actual item + /// when it is popped from the parser's stack. + /// + /// This assumes the parser is positioned at the opening `[` and advances + /// the parser to the first non-special byte of the character class. + /// + /// An error is returned if EOF is found. + #[inline(never)] + fn parse_set_class_open( + &self, + ) -> Result<(ast::ClassBracketed, ast::ClassSetUnion)> { + assert_eq!(self.char(), '['); + let start = self.pos(); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + + let negated = if self.char() != '^' { + false + } else { + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + true + }; + // Accept any number of `-` as literal `-`. + let mut union = + ast::ClassSetUnion { span: self.span(), items: vec![] }; + while self.char() == '-' { + union.push(ast::ClassSetItem::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: '-', + })); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, start), + ast::ErrorKind::ClassUnclosed, + )); + } + } + // If `]` is the *first* char in a set, then interpret it as a literal + // `]`. That is, an empty class is impossible to write. + if union.items.is_empty() && self.char() == ']' { + union.push(ast::ClassSetItem::Literal(ast::Literal { + span: self.span_char(), + kind: ast::LiteralKind::Verbatim, + c: ']', + })); + if !self.bump_and_bump_space() { + return Err(self.error( + Span::new(start, self.pos()), + ast::ErrorKind::ClassUnclosed, + )); + } + } + let set = ast::ClassBracketed { + span: Span::new(start, self.pos()), + negated, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: Span::new(union.span.start, union.span.start), + items: vec![], + }), + }; + Ok((set, union)) + } + + /// Attempt to parse an ASCII character class, e.g., `[:alnum:]`. + /// + /// This assumes the parser is positioned at the opening `[`. + /// + /// If no valid ASCII character class could be found, then this does not + /// advance the parser and `None` is returned. Otherwise, the parser is + /// advanced to the first byte following the closing `]` and the + /// corresponding ASCII class is returned. + #[inline(never)] + fn maybe_parse_ascii_class(&self) -> Option { + // ASCII character classes are interesting from a parsing perspective + // because parsing cannot fail with any interesting error. For example, + // in order to use an ASCII character class, it must be enclosed in + // double brackets, e.g., `[[:alnum:]]`. Alternatively, you might think + // of it as "ASCII character classes have the syntax `[:NAME:]` which + // can only appear within character brackets." This means that things + // like `[[:lower:]A]` are legal constructs. + // + // However, if one types an incorrect ASCII character class, e.g., + // `[[:loower:]]`, then we treat that as a normal nested character + // class containing the characters `:elorw`. One might argue that we + // should return an error instead since the repeated colons give away + // the intent to write an ASCII class. But what if the user typed + // `[[:lower]]` instead? How can we tell that was intended to be an + // ASCII class and not just a normal nested class? + // + // Reasonable people can probably disagree over this, but for better + // or worse, we implement semantics that never fails at the expense + // of better failure modes. + assert_eq!(self.char(), '['); + // If parsing fails, then we back up the parser to this starting point. + let start = self.pos(); + let mut negated = false; + if !self.bump() || self.char() != ':' { + self.parser().pos.set(start); + return None; + } + if !self.bump() { + self.parser().pos.set(start); + return None; + } + if self.char() == '^' { + negated = true; + if !self.bump() { + self.parser().pos.set(start); + return None; + } + } + let name_start = self.offset(); + while self.char() != ':' && self.bump() {} + if self.is_eof() { + self.parser().pos.set(start); + return None; + } + let name = &self.pattern()[name_start..self.offset()]; + if !self.bump_if(":]") { + self.parser().pos.set(start); + return None; + } + let kind = match ast::ClassAsciiKind::from_name(name) { + Some(kind) => kind, + None => { + self.parser().pos.set(start); + return None; + } + }; + Some(ast::ClassAscii { + span: Span::new(start, self.pos()), + kind, + negated, + }) + } + + /// Parse a Unicode class in either the single character notation, `\pN` + /// or the multi-character bracketed notation, `\p{Greek}`. This assumes + /// the parser is positioned at the `p` (or `P` for negation) and will + /// advance the parser to the character immediately following the class. + /// + /// Note that this does not check whether the class name is valid or not. + #[inline(never)] + fn parse_unicode_class(&self) -> Result { + assert!(self.char() == 'p' || self.char() == 'P'); + + let mut scratch = self.parser().scratch.borrow_mut(); + scratch.clear(); + + let negated = self.char() == 'P'; + if !self.bump_and_bump_space() { + return Err( + self.error(self.span(), ast::ErrorKind::EscapeUnexpectedEof) + ); + } + let (start, kind) = if self.char() == '{' { + let start = self.span_char().end; + while self.bump_and_bump_space() && self.char() != '}' { + scratch.push(self.char()); + } + if self.is_eof() { + return Err(self + .error(self.span(), ast::ErrorKind::EscapeUnexpectedEof)); + } + assert_eq!(self.char(), '}'); + self.bump(); + + let name = scratch.as_str(); + if let Some(i) = name.find("!=") { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: name[..i].to_string(), + value: name[i + 2..].to_string(), + }, + ) + } else if let Some(i) = name.find(':') { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: name[..i].to_string(), + value: name[i + 1..].to_string(), + }, + ) + } else if let Some(i) = name.find('=') { + ( + start, + ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: name[..i].to_string(), + value: name[i + 1..].to_string(), + }, + ) + } else { + (start, ast::ClassUnicodeKind::Named(name.to_string())) + } + } else { + let start = self.pos(); + let c = self.char(); + if c == '\\' { + return Err(self.error( + self.span_char(), + ast::ErrorKind::UnicodeClassInvalid, + )); + } + self.bump_and_bump_space(); + let kind = ast::ClassUnicodeKind::OneLetter(c); + (start, kind) + }; + Ok(ast::ClassUnicode { + span: Span::new(start, self.pos()), + negated, + kind, + }) + } + + /// Parse a Perl character class, e.g., `\d` or `\W`. This assumes the + /// parser is currently at a valid character class name and will be + /// advanced to the character immediately following the class. + #[inline(never)] + fn parse_perl_class(&self) -> ast::ClassPerl { + let c = self.char(); + let span = self.span_char(); + self.bump(); + let (negated, kind) = match c { + 'd' => (false, ast::ClassPerlKind::Digit), + 'D' => (true, ast::ClassPerlKind::Digit), + 's' => (false, ast::ClassPerlKind::Space), + 'S' => (true, ast::ClassPerlKind::Space), + 'w' => (false, ast::ClassPerlKind::Word), + 'W' => (true, ast::ClassPerlKind::Word), + c => panic!("expected valid Perl class but got '{c}'"), + }; + ast::ClassPerl { span, kind, negated } + } +} + +/// A type that traverses a fully parsed Ast and checks whether its depth +/// exceeds the specified nesting limit. If it does, then an error is returned. +#[derive(Debug)] +struct NestLimiter<'p, 's, P> { + /// The parser that is checking the nest limit. + p: &'p ParserI<'s, P>, + /// The current depth while walking an Ast. + depth: u32, +} + +impl<'p, 's, P: Borrow> NestLimiter<'p, 's, P> { + fn new(p: &'p ParserI<'s, P>) -> NestLimiter<'p, 's, P> { + NestLimiter { p, depth: 0 } + } + + #[inline(never)] + fn check(self, ast: &Ast) -> Result<()> { + ast::visit(ast, self) + } + + fn increment_depth(&mut self, span: &Span) -> Result<()> { + let new = self.depth.checked_add(1).ok_or_else(|| { + self.p.error( + span.clone(), + ast::ErrorKind::NestLimitExceeded(u32::MAX), + ) + })?; + let limit = self.p.parser().nest_limit; + if new > limit { + return Err(self.p.error( + span.clone(), + ast::ErrorKind::NestLimitExceeded(limit), + )); + } + self.depth = new; + Ok(()) + } + + fn decrement_depth(&mut self) { + // Assuming the correctness of the visitor, this should never drop + // below 0. + self.depth = self.depth.checked_sub(1).unwrap(); + } +} + +impl<'p, 's, P: Borrow> ast::Visitor for NestLimiter<'p, 's, P> { + type Output = (); + type Err = ast::Error; + + fn finish(self) -> Result<()> { + Ok(()) + } + + fn visit_pre(&mut self, ast: &Ast) -> Result<()> { + let span = match *ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => { + // These are all base cases, so we don't increment depth. + return Ok(()); + } + Ast::ClassBracketed(ref x) => &x.span, + Ast::Repetition(ref x) => &x.span, + Ast::Group(ref x) => &x.span, + Ast::Alternation(ref x) => &x.span, + Ast::Concat(ref x) => &x.span, + }; + self.increment_depth(span) + } + + fn visit_post(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::Empty(_) + | Ast::Flags(_) + | Ast::Literal(_) + | Ast::Dot(_) + | Ast::Assertion(_) + | Ast::ClassUnicode(_) + | Ast::ClassPerl(_) => { + // These are all base cases, so we don't decrement depth. + Ok(()) + } + Ast::ClassBracketed(_) + | Ast::Repetition(_) + | Ast::Group(_) + | Ast::Alternation(_) + | Ast::Concat(_) => { + self.decrement_depth(); + Ok(()) + } + } + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + let span = match *ast { + ast::ClassSetItem::Empty(_) + | ast::ClassSetItem::Literal(_) + | ast::ClassSetItem::Range(_) + | ast::ClassSetItem::Ascii(_) + | ast::ClassSetItem::Unicode(_) + | ast::ClassSetItem::Perl(_) => { + // These are all base cases, so we don't increment depth. + return Ok(()); + } + ast::ClassSetItem::Bracketed(ref x) => &x.span, + ast::ClassSetItem::Union(ref x) => &x.span, + }; + self.increment_depth(span) + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Empty(_) + | ast::ClassSetItem::Literal(_) + | ast::ClassSetItem::Range(_) + | ast::ClassSetItem::Ascii(_) + | ast::ClassSetItem::Unicode(_) + | ast::ClassSetItem::Perl(_) => { + // These are all base cases, so we don't decrement depth. + Ok(()) + } + ast::ClassSetItem::Bracketed(_) | ast::ClassSetItem::Union(_) => { + self.decrement_depth(); + Ok(()) + } + } + } + + fn visit_class_set_binary_op_pre( + &mut self, + ast: &ast::ClassSetBinaryOp, + ) -> Result<()> { + self.increment_depth(&ast.span) + } + + fn visit_class_set_binary_op_post( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<()> { + self.decrement_depth(); + Ok(()) + } +} + +/// When the result is an error, transforms the ast::ErrorKind from the source +/// Result into another one. This function is used to return clearer error +/// messages when possible. +fn specialize_err( + result: Result, + from: ast::ErrorKind, + to: ast::ErrorKind, +) -> Result { + if let Err(e) = result { + if e.kind == from { + Err(ast::Error { kind: to, pattern: e.pattern, span: e.span }) + } else { + Err(e) + } + } else { + result + } +} + +#[cfg(test)] +mod tests { + use core::ops::Range; + + use alloc::format; + + use super::*; + + // Our own assert_eq, which has slightly better formatting (but honestly + // still kind of crappy). + macro_rules! assert_eq { + ($left:expr, $right:expr) => {{ + match (&$left, &$right) { + (left_val, right_val) => { + if !(*left_val == *right_val) { + panic!( + "assertion failed: `(left == right)`\n\n\ + left: `{:?}`\nright: `{:?}`\n\n", + left_val, right_val + ) + } + } + } + }}; + } + + // We create these errors to compare with real ast::Errors in the tests. + // We define equality between TestError and ast::Error to disregard the + // pattern string in ast::Error, which is annoying to provide in tests. + #[derive(Clone, Debug)] + struct TestError { + span: Span, + kind: ast::ErrorKind, + } + + impl PartialEq for TestError { + fn eq(&self, other: &ast::Error) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + impl PartialEq for ast::Error { + fn eq(&self, other: &TestError) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + fn s(str: &str) -> String { + str.to_string() + } + + fn parser(pattern: &str) -> ParserI<'_, Parser> { + ParserI::new(Parser::new(), pattern) + } + + fn parser_octal(pattern: &str) -> ParserI<'_, Parser> { + let parser = ParserBuilder::new().octal(true).build(); + ParserI::new(parser, pattern) + } + + fn parser_empty_min_range(pattern: &str) -> ParserI<'_, Parser> { + let parser = ParserBuilder::new().empty_min_range(true).build(); + ParserI::new(parser, pattern) + } + + fn parser_nest_limit( + pattern: &str, + nest_limit: u32, + ) -> ParserI<'_, Parser> { + let p = ParserBuilder::new().nest_limit(nest_limit).build(); + ParserI::new(p, pattern) + } + + fn parser_ignore_whitespace(pattern: &str) -> ParserI<'_, Parser> { + let p = ParserBuilder::new().ignore_whitespace(true).build(); + ParserI::new(p, pattern) + } + + /// Short alias for creating a new span. + fn nspan(start: Position, end: Position) -> Span { + Span::new(start, end) + } + + /// Short alias for creating a new position. + fn npos(offset: usize, line: usize, column: usize) -> Position { + Position::new(offset, line, column) + } + + /// Create a new span from the given offset range. This assumes a single + /// line and sets the columns based on the offsets. i.e., This only works + /// out of the box for ASCII, which is fine for most tests. + fn span(range: Range) -> Span { + let start = Position::new(range.start, 1, range.start + 1); + let end = Position::new(range.end, 1, range.end + 1); + Span::new(start, end) + } + + /// Create a new span for the corresponding byte range in the given string. + fn span_range(subject: &str, range: Range) -> Span { + let start = Position { + offset: range.start, + line: 1 + subject[..range.start].matches('\n').count(), + column: 1 + subject[..range.start] + .chars() + .rev() + .position(|c| c == '\n') + .unwrap_or(subject[..range.start].chars().count()), + }; + let end = Position { + offset: range.end, + line: 1 + subject[..range.end].matches('\n').count(), + column: 1 + subject[..range.end] + .chars() + .rev() + .position(|c| c == '\n') + .unwrap_or(subject[..range.end].chars().count()), + }; + Span::new(start, end) + } + + /// Create a verbatim literal starting at the given position. + fn lit(c: char, start: usize) -> Ast { + lit_with(c, span(start..start + c.len_utf8())) + } + + /// Create a meta literal starting at the given position. + fn meta_lit(c: char, span: Span) -> Ast { + Ast::literal(ast::Literal { span, kind: ast::LiteralKind::Meta, c }) + } + + /// Create a verbatim literal with the given span. + fn lit_with(c: char, span: Span) -> Ast { + Ast::literal(ast::Literal { + span, + kind: ast::LiteralKind::Verbatim, + c, + }) + } + + /// Create a concatenation with the given range. + fn concat(range: Range, asts: Vec) -> Ast { + concat_with(span(range), asts) + } + + /// Create a concatenation with the given span. + fn concat_with(span: Span, asts: Vec) -> Ast { + Ast::concat(ast::Concat { span, asts }) + } + + /// Create an alternation with the given span. + fn alt(range: Range, asts: Vec) -> Ast { + Ast::alternation(ast::Alternation { span: span(range), asts }) + } + + /// Create a capturing group with the given span. + fn group(range: Range, index: u32, ast: Ast) -> Ast { + Ast::group(ast::Group { + span: span(range), + kind: ast::GroupKind::CaptureIndex(index), + ast: Box::new(ast), + }) + } + + /// Create an ast::SetFlags. + /// + /// The given pattern should be the full pattern string. The range given + /// should correspond to the byte offsets where the flag set occurs. + /// + /// If negated is true, then the set is interpreted as beginning with a + /// negation. + fn flag_set( + pat: &str, + range: Range, + flag: ast::Flag, + negated: bool, + ) -> Ast { + let mut items = vec![ast::FlagsItem { + span: span_range(pat, (range.end - 2)..(range.end - 1)), + kind: ast::FlagsItemKind::Flag(flag), + }]; + if negated { + items.insert( + 0, + ast::FlagsItem { + span: span_range(pat, (range.start + 2)..(range.end - 2)), + kind: ast::FlagsItemKind::Negation, + }, + ); + } + Ast::flags(ast::SetFlags { + span: span_range(pat, range.clone()), + flags: ast::Flags { + span: span_range(pat, (range.start + 2)..(range.end - 1)), + items, + }, + }) + } + + #[test] + fn parse_nest_limit() { + // A nest limit of 0 still allows some types of regexes. + assert_eq!( + parser_nest_limit("", 0).parse(), + Ok(Ast::empty(span(0..0))) + ); + assert_eq!(parser_nest_limit("a", 0).parse(), Ok(lit('a', 0))); + + // Test repetition operations, which require one level of nesting. + assert_eq!( + parser_nest_limit("a+", 0).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("a+", 1).parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_nest_limit("(a)+", 1).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("a+*", 1).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("a+*", 2).parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrMore, + }, + greedy: true, + ast: Box::new(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })), + })) + ); + + // Test concatenations. A concatenation requires one level of nesting. + assert_eq!( + parser_nest_limit("ab", 0).parse().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("ab", 1).parse(), + Ok(concat(0..2, vec![lit('a', 0), lit('b', 1)])) + ); + assert_eq!( + parser_nest_limit("abc", 1).parse(), + Ok(concat(0..3, vec![lit('a', 0), lit('b', 1), lit('c', 2)])) + ); + + // Test alternations. An alternation requires one level of nesting. + assert_eq!( + parser_nest_limit("a|b", 0).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("a|b", 1).parse(), + Ok(alt(0..3, vec![lit('a', 0), lit('b', 2)])) + ); + assert_eq!( + parser_nest_limit("a|b|c", 1).parse(), + Ok(alt(0..5, vec![lit('a', 0), lit('b', 2), lit('c', 4)])) + ); + + // Test character classes. Classes form their own mini-recursive + // syntax! + assert_eq!( + parser_nest_limit("[a]", 0).parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::NestLimitExceeded(0), + } + ); + assert_eq!( + parser_nest_limit("[a]", 1).parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: ast::ClassSet::Item(ast::ClassSetItem::Literal( + ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: 'a', + } + )), + })) + ); + assert_eq!( + parser_nest_limit("[ab]", 1).parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("[ab[cd]]", 2).parse().unwrap_err(), + TestError { + span: span(3..7), + kind: ast::ErrorKind::NestLimitExceeded(2), + } + ); + assert_eq!( + parser_nest_limit("[ab[cd]]", 3).parse().unwrap_err(), + TestError { + span: span(4..6), + kind: ast::ErrorKind::NestLimitExceeded(3), + } + ); + assert_eq!( + parser_nest_limit("[a--b]", 1).parse().unwrap_err(), + TestError { + span: span(1..5), + kind: ast::ErrorKind::NestLimitExceeded(1), + } + ); + assert_eq!( + parser_nest_limit("[a--bc]", 2).parse().unwrap_err(), + TestError { + span: span(4..6), + kind: ast::ErrorKind::NestLimitExceeded(2), + } + ); + } + + #[test] + fn parse_comments() { + let pat = "(?x) +# This is comment 1. +foo # This is comment 2. + # This is comment 3. +bar +# This is comment 4."; + let astc = parser(pat).parse_with_comments().unwrap(); + assert_eq!( + astc.ast, + concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('f', span_range(pat, 26..27)), + lit_with('o', span_range(pat, 27..28)), + lit_with('o', span_range(pat, 28..29)), + lit_with('b', span_range(pat, 74..75)), + lit_with('a', span_range(pat, 75..76)), + lit_with('r', span_range(pat, 76..77)), + ] + ) + ); + assert_eq!( + astc.comments, + vec![ + ast::Comment { + span: span_range(pat, 5..26), + comment: s(" This is comment 1."), + }, + ast::Comment { + span: span_range(pat, 30..51), + comment: s(" This is comment 2."), + }, + ast::Comment { + span: span_range(pat, 53..74), + comment: s(" This is comment 3."), + }, + ast::Comment { + span: span_range(pat, 78..98), + comment: s(" This is comment 4."), + }, + ] + ); + } + + #[test] + fn parse_holistic() { + assert_eq!(parser("]").parse(), Ok(lit(']', 0))); + assert_eq!( + parser(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~").parse(), + Ok(concat( + 0..36, + vec![ + meta_lit('\\', span(0..2)), + meta_lit('.', span(2..4)), + meta_lit('+', span(4..6)), + meta_lit('*', span(6..8)), + meta_lit('?', span(8..10)), + meta_lit('(', span(10..12)), + meta_lit(')', span(12..14)), + meta_lit('|', span(14..16)), + meta_lit('[', span(16..18)), + meta_lit(']', span(18..20)), + meta_lit('{', span(20..22)), + meta_lit('}', span(22..24)), + meta_lit('^', span(24..26)), + meta_lit('$', span(26..28)), + meta_lit('#', span(28..30)), + meta_lit('&', span(30..32)), + meta_lit('-', span(32..34)), + meta_lit('~', span(34..36)), + ] + )) + ); + } + + #[test] + fn parse_ignore_whitespace() { + // Test that basic whitespace insensitivity works. + let pat = "(?x)a b"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + nspan(npos(0, 1, 1), npos(7, 1, 8)), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), + ] + )) + ); + + // Test that we can toggle whitespace insensitivity. + let pat = "(?x)a b(?-x)a b"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + nspan(npos(0, 1, 1), npos(15, 1, 16)), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('b', nspan(npos(6, 1, 7), npos(7, 1, 8))), + flag_set(pat, 7..12, ast::Flag::IgnoreWhitespace, true), + lit_with('a', nspan(npos(12, 1, 13), npos(13, 1, 14))), + lit_with(' ', nspan(npos(13, 1, 14), npos(14, 1, 15))), + lit_with('b', nspan(npos(14, 1, 15), npos(15, 1, 16))), + ] + )) + ); + + // Test that nesting whitespace insensitive flags works. + let pat = "a (?x:a )a "; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..11), + vec![ + lit_with('a', span_range(pat, 0..1)), + lit_with(' ', span_range(pat, 1..2)), + Ast::group(ast::Group { + span: span_range(pat, 2..9), + kind: ast::GroupKind::NonCapturing(ast::Flags { + span: span_range(pat, 4..5), + items: vec![ast::FlagsItem { + span: span_range(pat, 4..5), + kind: ast::FlagsItemKind::Flag( + ast::Flag::IgnoreWhitespace + ), + },], + }), + ast: Box::new(lit_with('a', span_range(pat, 6..7))), + }), + lit_with('a', span_range(pat, 9..10)), + lit_with(' ', span_range(pat, 10..11)), + ] + )) + ); + + // Test that whitespace after an opening paren is insignificant. + let pat = "(?x)( ?P a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span_range(pat, 9..12), + name: s("foo"), + index: 1, + } + }, + ast: Box::new(lit_with('a', span_range(pat, 14..15))), + }), + ] + )) + ); + let pat = "(?x)( a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::CaptureIndex(1), + ast: Box::new(lit_with('a', span_range(pat, 7..8))), + }), + ] + )) + ); + let pat = "(?x)( ?: a )"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::group(ast::Group { + span: span_range(pat, 4..pat.len()), + kind: ast::GroupKind::NonCapturing(ast::Flags { + span: span_range(pat, 8..8), + items: vec![], + }), + ast: Box::new(lit_with('a', span_range(pat, 11..12))), + }), + ] + )) + ); + let pat = r"(?x)\x { 53 }"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::literal(ast::Literal { + span: span(4..13), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::X + ), + c: 'S', + }), + ] + )) + ); + + // Test that whitespace after an escape is OK. + let pat = r"(?x)\ "; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + flag_set(pat, 0..4, ast::Flag::IgnoreWhitespace, false), + Ast::literal(ast::Literal { + span: span_range(pat, 4..6), + kind: ast::LiteralKind::Superfluous, + c: ' ', + }), + ] + )) + ); + } + + #[test] + fn parse_newlines() { + let pat = ".\n."; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..3), + vec![ + Ast::dot(span_range(pat, 0..1)), + lit_with('\n', span_range(pat, 1..2)), + Ast::dot(span_range(pat, 2..3)), + ] + )) + ); + + let pat = "foobar\nbaz\nquux\n"; + assert_eq!( + parser(pat).parse(), + Ok(concat_with( + span_range(pat, 0..pat.len()), + vec![ + lit_with('f', nspan(npos(0, 1, 1), npos(1, 1, 2))), + lit_with('o', nspan(npos(1, 1, 2), npos(2, 1, 3))), + lit_with('o', nspan(npos(2, 1, 3), npos(3, 1, 4))), + lit_with('b', nspan(npos(3, 1, 4), npos(4, 1, 5))), + lit_with('a', nspan(npos(4, 1, 5), npos(5, 1, 6))), + lit_with('r', nspan(npos(5, 1, 6), npos(6, 1, 7))), + lit_with('\n', nspan(npos(6, 1, 7), npos(7, 2, 1))), + lit_with('b', nspan(npos(7, 2, 1), npos(8, 2, 2))), + lit_with('a', nspan(npos(8, 2, 2), npos(9, 2, 3))), + lit_with('z', nspan(npos(9, 2, 3), npos(10, 2, 4))), + lit_with('\n', nspan(npos(10, 2, 4), npos(11, 3, 1))), + lit_with('q', nspan(npos(11, 3, 1), npos(12, 3, 2))), + lit_with('u', nspan(npos(12, 3, 2), npos(13, 3, 3))), + lit_with('u', nspan(npos(13, 3, 3), npos(14, 3, 4))), + lit_with('x', nspan(npos(14, 3, 4), npos(15, 3, 5))), + lit_with('\n', nspan(npos(15, 3, 5), npos(16, 4, 1))), + ] + )) + ); + } + + #[test] + fn parse_uncounted_repetition() { + assert_eq!( + parser(r"a*").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a+").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::OneOrMore, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + + assert_eq!( + parser(r"a?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a??").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(1..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a?b").parse(), + Ok(concat( + 0..3, + vec![ + Ast::repetition(ast::Repetition { + span: span(0..2), + op: ast::RepetitionOp { + span: span(1..2), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 0)), + }), + lit('b', 2), + ] + )) + ); + assert_eq!( + parser(r"a??b").parse(), + Ok(concat( + 0..4, + vec![ + Ast::repetition(ast::Repetition { + span: span(0..3), + op: ast::RepetitionOp { + span: span(1..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: false, + ast: Box::new(lit('a', 0)), + }), + lit('b', 3), + ] + )) + ); + assert_eq!( + parser(r"ab?").parse(), + Ok(concat( + 0..3, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + ] + )) + ); + assert_eq!( + parser(r"(ab)?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(4..5), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(group( + 0..4, + 1, + concat(1..3, vec![lit('a', 1), lit('b', 2),]) + )), + })) + ); + assert_eq!( + parser(r"|a?").parse(), + Ok(alt( + 0..3, + vec![ + Ast::empty(span(0..0)), + Ast::repetition(ast::Repetition { + span: span(1..3), + op: ast::RepetitionOp { + span: span(2..3), + kind: ast::RepetitionKind::ZeroOrOne, + }, + greedy: true, + ast: Box::new(lit('a', 1)), + }), + ] + )) + ); + + assert_eq!( + parser(r"*").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?i)*").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(*)").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?:?)").parse().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"+").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"?").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?)").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|*").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|+").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|?").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + } + + #[test] + fn parse_counted_repetition() { + assert_eq!( + parser(r"a{5}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..4), + op: ast::RepetitionOp { + span: span(1..4), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5,}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::AtLeast(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..6), + op: ast::RepetitionOp { + span: span(1..6), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{5}?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"ab{5}").parse(), + Ok(concat( + 0..5, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..5), + op: ast::RepetitionOp { + span: span(2..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + ] + )) + ); + assert_eq!( + parser(r"ab{5}c").parse(), + Ok(concat( + 0..6, + vec![ + lit('a', 0), + Ast::repetition(ast::Repetition { + span: span(1..5), + op: ast::RepetitionOp { + span: span(2..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('b', 1)), + }), + lit('c', 5), + ] + )) + ); + + assert_eq!( + parser(r"a{ 5 }").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..6), + op: ast::RepetitionOp { + span: span(1..6), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Exactly(5) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"a{ 5 , 9 }").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..10), + op: ast::RepetitionOp { + span: span(1..10), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_empty_min_range(r"a{,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..5), + op: ast::RepetitionOp { + span: span(1..5), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(0, 9) + ), + }, + greedy: true, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser_ignore_whitespace(r"a{5,9} ?").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..8), + op: ast::RepetitionOp { + span: span(1..8), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: false, + ast: Box::new(lit('a', 0)), + })) + ); + assert_eq!( + parser(r"\b{5,9}").parse(), + Ok(Ast::repetition(ast::Repetition { + span: span(0..7), + op: ast::RepetitionOp { + span: span(2..7), + kind: ast::RepetitionKind::Range( + ast::RepetitionRange::Bounded(5, 9) + ), + }, + greedy: true, + ast: Box::new(Ast::assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundary, + })), + })) + ); + + assert_eq!( + parser(r"(?i){0}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"(?m){1,1}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"a{]}").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{1,]}").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{}").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{a").parse().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{9999999999}").parse().unwrap_err(), + TestError { + span: span(2..12), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + assert_eq!( + parser(r"a{9").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{9,a").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::RepetitionCountDecimalEmpty, + } + ); + assert_eq!( + parser(r"a{9,9999999999}").parse().unwrap_err(), + TestError { + span: span(4..14), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + assert_eq!( + parser(r"a{9,").parse().unwrap_err(), + TestError { + span: span(1..4), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{9,11").parse().unwrap_err(), + TestError { + span: span(1..6), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + assert_eq!( + parser(r"a{2,1}").parse().unwrap_err(), + TestError { + span: span(1..6), + kind: ast::ErrorKind::RepetitionCountInvalid, + } + ); + assert_eq!( + parser(r"{5}").parse().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + assert_eq!( + parser(r"|{5}").parse().unwrap_err(), + TestError { + span: span(1..1), + kind: ast::ErrorKind::RepetitionMissing, + } + ); + } + + #[test] + fn parse_alternate() { + assert_eq!( + parser(r"a|b").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..3), + asts: vec![lit('a', 0), lit('b', 2)], + })) + ); + assert_eq!( + parser(r"(a|b)").parse(), + Ok(group( + 0..5, + 1, + Ast::alternation(ast::Alternation { + span: span(1..4), + asts: vec![lit('a', 1), lit('b', 3)], + }) + )) + ); + + assert_eq!( + parser(r"a|b|c").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..5), + asts: vec![lit('a', 0), lit('b', 2), lit('c', 4)], + })) + ); + assert_eq!( + parser(r"ax|by|cz").parse(), + Ok(Ast::alternation(ast::Alternation { + span: span(0..8), + asts: vec![ + concat(0..2, vec![lit('a', 0), lit('x', 1)]), + concat(3..5, vec![lit('b', 3), lit('y', 4)]), + concat(6..8, vec![lit('c', 6), lit('z', 7)]), + ], + })) + ); + assert_eq!( + parser(r"(ax|by|cz)").parse(), + Ok(group( + 0..10, + 1, + Ast::alternation(ast::Alternation { + span: span(1..9), + asts: vec![ + concat(1..3, vec![lit('a', 1), lit('x', 2)]), + concat(4..6, vec![lit('b', 4), lit('y', 5)]), + concat(7..9, vec![lit('c', 7), lit('z', 8)]), + ], + }) + )) + ); + assert_eq!( + parser(r"(ax|(by|(cz)))").parse(), + Ok(group( + 0..14, + 1, + alt( + 1..13, + vec![ + concat(1..3, vec![lit('a', 1), lit('x', 2)]), + group( + 4..13, + 2, + alt( + 5..12, + vec![ + concat( + 5..7, + vec![lit('b', 5), lit('y', 6)] + ), + group( + 8..12, + 3, + concat( + 9..11, + vec![lit('c', 9), lit('z', 10),] + ) + ), + ] + ) + ), + ] + ) + )) + ); + + assert_eq!( + parser(r"|").parse(), + Ok(alt( + 0..1, + vec![Ast::empty(span(0..0)), Ast::empty(span(1..1)),] + )) + ); + assert_eq!( + parser(r"||").parse(), + Ok(alt( + 0..2, + vec![ + Ast::empty(span(0..0)), + Ast::empty(span(1..1)), + Ast::empty(span(2..2)), + ] + )) + ); + assert_eq!( + parser(r"a|").parse(), + Ok(alt(0..2, vec![lit('a', 0), Ast::empty(span(2..2)),])) + ); + assert_eq!( + parser(r"|a").parse(), + Ok(alt(0..2, vec![Ast::empty(span(0..0)), lit('a', 1),])) + ); + + assert_eq!( + parser(r"(|)").parse(), + Ok(group( + 0..3, + 1, + alt( + 1..2, + vec![Ast::empty(span(1..1)), Ast::empty(span(2..2)),] + ) + )) + ); + assert_eq!( + parser(r"(a|)").parse(), + Ok(group( + 0..4, + 1, + alt(1..3, vec![lit('a', 1), Ast::empty(span(3..3)),]) + )) + ); + assert_eq!( + parser(r"(|a)").parse(), + Ok(group( + 0..4, + 1, + alt(1..3, vec![Ast::empty(span(1..1)), lit('a', 2),]) + )) + ); + + assert_eq!( + parser(r"a|b)").parse().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::GroupUnopened, + } + ); + assert_eq!( + parser(r"(a|b").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::GroupUnclosed, + } + ); + } + + #[test] + fn parse_unsupported_lookaround() { + assert_eq!( + parser(r"(?=a)").parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?!a)").parse().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?<=a)").parse().unwrap_err(), + TestError { + span: span(0..4), + kind: ast::ErrorKind::UnsupportedLookAround, + } + ); + assert_eq!( + parser(r"(?z)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..7), + kind: ast::GroupKind::CaptureName { + starts_with_p: false, + name: ast::CaptureName { + span: span(3..4), + name: s("a"), + index: 1, + } + }, + ast: Box::new(lit('z', 5)), + })) + ); + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..8), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..5), + name: s("a"), + index: 1, + } + }, + ast: Box::new(lit('z', 6)), + })) + ); + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("abc"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("a_1"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..10), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..7), + name: s("a.1"), + index: 1, + } + }, + ast: Box::new(lit('z', 8)), + })) + ); + + assert_eq!( + parser("(?Pz)").parse(), + Ok(Ast::group(ast::Group { + span: span(0..11), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: span(4..8), + name: s("a[1]"), + index: 1, + } + }, + ast: Box::new(lit('z', 9)), + })) + ); + + assert_eq!( + parser("(?P)").parse(), + Ok(Ast::group(ast::Group { + span: Span::new( + Position::new(0, 1, 1), + Position::new(9, 1, 9), + ), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: Span::new( + Position::new(4, 1, 5), + Position::new(7, 1, 7), + ), + name: s("a¾"), + index: 1, + } + }, + ast: Box::new(Ast::empty(Span::new( + Position::new(8, 1, 8), + Position::new(8, 1, 8), + ))), + })) + ); + assert_eq!( + parser("(?P<名字>)").parse(), + Ok(Ast::group(ast::Group { + span: Span::new( + Position::new(0, 1, 1), + Position::new(12, 1, 9), + ), + kind: ast::GroupKind::CaptureName { + starts_with_p: true, + name: ast::CaptureName { + span: Span::new( + Position::new(4, 1, 5), + Position::new(10, 1, 7), + ), + name: s("名字"), + index: 1, + } + }, + ast: Box::new(Ast::empty(Span::new( + Position::new(11, 1, 8), + Position::new(11, 1, 8), + ))), + })) + ); + + assert_eq!( + parser("(?P<").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::GroupNameUnexpectedEof, + } + ); + assert_eq!( + parser("(?P<>z)").parse().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::GroupNameEmpty, + } + ); + assert_eq!( + parser("(?Py)(?Pz)").parse().unwrap_err(), + TestError { + span: span(12..13), + kind: ast::ErrorKind::GroupNameDuplicate { + original: span(4..5), + }, + } + ); + assert_eq!( + parser("(?P<5>)").parse().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<5a>)").parse().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<¾>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(6, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<¾a>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(6, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P<☃>)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(4, 1, 5), + Position::new(7, 1, 6), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + assert_eq!( + parser("(?P)").parse().unwrap_err(), + TestError { + span: Span::new( + Position::new(5, 1, 6), + Position::new(8, 1, 7), + ), + kind: ast::ErrorKind::GroupNameInvalid, + } + ); + } + + #[test] + fn parse_flags() { + assert_eq!( + parser("i:").parse_flags(), + Ok(ast::Flags { + span: span(0..1), + items: vec![ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), + }], + }) + ); + assert_eq!( + parser("i)").parse_flags(), + Ok(ast::Flags { + span: span(0..1), + items: vec![ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive), + }], + }) + ); + + assert_eq!( + parser("isU:").parse_flags(), + Ok(ast::Flags { + span: span(0..3), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + + assert_eq!( + parser("-isU:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + assert_eq!( + parser("i-sU:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::SwapGreed), + }, + ], + }) + ); + assert_eq!( + parser("i-sR:").parse_flags(), + Ok(ast::Flags { + span: span(0..4), + items: vec![ + ast::FlagsItem { + span: span(0..1), + kind: ast::FlagsItemKind::Flag( + ast::Flag::CaseInsensitive + ), + }, + ast::FlagsItem { + span: span(1..2), + kind: ast::FlagsItemKind::Negation, + }, + ast::FlagsItem { + span: span(2..3), + kind: ast::FlagsItemKind::Flag( + ast::Flag::DotMatchesNewLine + ), + }, + ast::FlagsItem { + span: span(3..4), + kind: ast::FlagsItemKind::Flag(ast::Flag::CRLF), + }, + ], + }) + ); + + assert_eq!( + parser("isU").parse_flags().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::FlagUnexpectedEof, + } + ); + assert_eq!( + parser("isUa:").parse_flags().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + assert_eq!( + parser("isUi:").parse_flags().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::FlagDuplicate { original: span(0..1) }, + } + ); + assert_eq!( + parser("i-sU-i:").parse_flags().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::FlagRepeatedNegation { + original: span(1..2), + }, + } + ); + assert_eq!( + parser("-)").parse_flags().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + assert_eq!( + parser("i-)").parse_flags().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + assert_eq!( + parser("iU-)").parse_flags().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::FlagDanglingNegation, + } + ); + } + + #[test] + fn parse_flag() { + assert_eq!(parser("i").parse_flag(), Ok(ast::Flag::CaseInsensitive)); + assert_eq!(parser("m").parse_flag(), Ok(ast::Flag::MultiLine)); + assert_eq!(parser("s").parse_flag(), Ok(ast::Flag::DotMatchesNewLine)); + assert_eq!(parser("U").parse_flag(), Ok(ast::Flag::SwapGreed)); + assert_eq!(parser("u").parse_flag(), Ok(ast::Flag::Unicode)); + assert_eq!(parser("R").parse_flag(), Ok(ast::Flag::CRLF)); + assert_eq!(parser("x").parse_flag(), Ok(ast::Flag::IgnoreWhitespace)); + + assert_eq!( + parser("a").parse_flag().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + assert_eq!( + parser("☃").parse_flag().unwrap_err(), + TestError { + span: span_range("☃", 0..3), + kind: ast::ErrorKind::FlagUnrecognized, + } + ); + } + + #[test] + fn parse_primitive_non_escape() { + assert_eq!( + parser(r".").parse_primitive(), + Ok(Primitive::Dot(span(0..1))) + ); + assert_eq!( + parser(r"^").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..1), + kind: ast::AssertionKind::StartLine, + })) + ); + assert_eq!( + parser(r"$").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..1), + kind: ast::AssertionKind::EndLine, + })) + ); + + assert_eq!( + parser(r"a").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..1), + kind: ast::LiteralKind::Verbatim, + c: 'a', + })) + ); + assert_eq!( + parser(r"|").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..1), + kind: ast::LiteralKind::Verbatim, + c: '|', + })) + ); + assert_eq!( + parser(r"☃").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span_range("☃", 0..3), + kind: ast::LiteralKind::Verbatim, + c: '☃', + })) + ); + } + + #[test] + fn parse_escape() { + assert_eq!( + parser(r"\|").parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Meta, + c: '|', + })) + ); + let specials = &[ + (r"\a", '\x07', ast::SpecialLiteralKind::Bell), + (r"\f", '\x0C', ast::SpecialLiteralKind::FormFeed), + (r"\t", '\t', ast::SpecialLiteralKind::Tab), + (r"\n", '\n', ast::SpecialLiteralKind::LineFeed), + (r"\r", '\r', ast::SpecialLiteralKind::CarriageReturn), + (r"\v", '\x0B', ast::SpecialLiteralKind::VerticalTab), + ]; + for &(pat, c, ref kind) in specials { + assert_eq!( + parser(pat).parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Special(kind.clone()), + c, + })) + ); + } + assert_eq!( + parser(r"\A").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::StartText, + })) + ); + assert_eq!( + parser(r"\z").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::EndText, + })) + ); + assert_eq!( + parser(r"\b").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundary, + })) + ); + assert_eq!( + parser(r"\b{start}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..9), + kind: ast::AssertionKind::WordBoundaryStart, + })) + ); + assert_eq!( + parser(r"\b{end}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..7), + kind: ast::AssertionKind::WordBoundaryEnd, + })) + ); + assert_eq!( + parser(r"\b{start-half}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..14), + kind: ast::AssertionKind::WordBoundaryStartHalf, + })) + ); + assert_eq!( + parser(r"\b{end-half}").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..12), + kind: ast::AssertionKind::WordBoundaryEndHalf, + })) + ); + assert_eq!( + parser(r"\<").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundaryStartAngle, + })) + ); + assert_eq!( + parser(r"\>").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::WordBoundaryEndAngle, + })) + ); + assert_eq!( + parser(r"\B").parse_primitive(), + Ok(Primitive::Assertion(ast::Assertion { + span: span(0..2), + kind: ast::AssertionKind::NotWordBoundary, + })) + ); + + // We also support superfluous escapes in most cases now too. + for c in ['!', '@', '%', '"', '\'', '/', ' '] { + let pat = format!(r"\{c}"); + assert_eq!( + parser(&pat).parse_primitive(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..2), + kind: ast::LiteralKind::Superfluous, + c, + })) + ); + } + + // Some superfluous escapes, namely [0-9A-Za-z], are still banned. This + // gives flexibility for future evolution. + assert_eq!( + parser(r"\e").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + assert_eq!( + parser(r"\y").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + + // Starting a special word boundary without any non-whitespace chars + // after the brace makes it ambiguous whether the user meant to write + // a counted repetition (probably not?) or an actual special word + // boundary assertion. + assert_eq!( + parser(r"\b{").parse_escape().unwrap_err(), + TestError { + span: span(0..3), + kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + } + ); + assert_eq!( + parser_ignore_whitespace(r"\b{ ").parse_escape().unwrap_err(), + TestError { + span: span(0..4), + kind: ast::ErrorKind::SpecialWordOrRepetitionUnexpectedEof, + } + ); + // When 'x' is not enabled, the space is seen as a non-[-A-Za-z] char, + // and thus causes the parser to treat it as a counted repetition. + assert_eq!( + parser(r"\b{ ").parse().unwrap_err(), + TestError { + span: span(2..4), + kind: ast::ErrorKind::RepetitionCountUnclosed, + } + ); + // In this case, we got some valid chars that makes it look like the + // user is writing one of the special word boundary assertions, but + // we forget to close the brace. + assert_eq!( + parser(r"\b{foo").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, + } + ); + // We get the same error as above, except it is provoked by seeing a + // char that we know is invalid before seeing a closing brace. + assert_eq!( + parser(r"\b{foo!}").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnclosed, + } + ); + // And this one occurs when, syntactically, everything looks okay, but + // we don't use a valid spelling of a word boundary assertion. + assert_eq!( + parser(r"\b{foo}").parse_escape().unwrap_err(), + TestError { + span: span(3..6), + kind: ast::ErrorKind::SpecialWordBoundaryUnrecognized, + } + ); + + // An unfinished escape is illegal. + assert_eq!( + parser(r"\").parse_escape().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + } + + #[test] + fn parse_unsupported_backreference() { + assert_eq!( + parser(r"\0").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::UnsupportedBackreference, + } + ); + assert_eq!( + parser(r"\9").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::UnsupportedBackreference, + } + ); + } + + #[test] + fn parse_octal() { + for i in 0..511 { + let pat = format!(r"\{i:o}"); + assert_eq!( + parser_octal(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::Octal, + c: char::from_u32(i).unwrap(), + })) + ); + } + assert_eq!( + parser_octal(r"\778").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..3), + kind: ast::LiteralKind::Octal, + c: '?', + })) + ); + assert_eq!( + parser_octal(r"\7777").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..4), + kind: ast::LiteralKind::Octal, + c: '\u{01FF}', + })) + ); + assert_eq!( + parser_octal(r"\778").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..4), + asts: vec![ + Ast::literal(ast::Literal { + span: span(0..3), + kind: ast::LiteralKind::Octal, + c: '?', + }), + Ast::literal(ast::Literal { + span: span(3..4), + kind: ast::LiteralKind::Verbatim, + c: '8', + }), + ], + })) + ); + assert_eq!( + parser_octal(r"\7777").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..5), + asts: vec![ + Ast::literal(ast::Literal { + span: span(0..4), + kind: ast::LiteralKind::Octal, + c: '\u{01FF}', + }), + Ast::literal(ast::Literal { + span: span(4..5), + kind: ast::LiteralKind::Verbatim, + c: '7', + }), + ], + })) + ); + + assert_eq!( + parser_octal(r"\8").parse_escape().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::EscapeUnrecognized, + } + ); + } + + #[test] + fn parse_hex_two() { + for i in 0..256 { + let pat = format!(r"\x{i:02x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed(ast::HexLiteralKind::X), + c: char::from_u32(i).unwrap(), + })) + ); + } + + assert_eq!( + parser(r"\xF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\xG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\xFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + } + + #[test] + fn parse_hex_four() { + for i in 0..65536 { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let pat = format!(r"\u{i:04x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed( + ast::HexLiteralKind::UnicodeShort + ), + c, + })) + ); + } + + assert_eq!( + parser(r"\uF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\uG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFFG").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uFFFG").parse_escape().unwrap_err(), + TestError { + span: span(5..6), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\uD800").parse_escape().unwrap_err(), + TestError { + span: span(2..6), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + } + + #[test] + fn parse_hex_eight() { + for i in 0..65536 { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let pat = format!(r"\U{i:08x}"); + assert_eq!( + parser(&pat).parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..pat.len()), + kind: ast::LiteralKind::HexFixed( + ast::HexLiteralKind::UnicodeLong + ), + c, + })) + ); + } + + assert_eq!( + parser(r"\UF").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\UG").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFG").parse_escape().unwrap_err(), + TestError { + span: span(3..4), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFG").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFG").parse_escape().unwrap_err(), + TestError { + span: span(5..6), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(6..7), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(7..8), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(8..9), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\UFFFFFFFG").parse_escape().unwrap_err(), + TestError { + span: span(9..10), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + } + + #[test] + fn parse_hex_brace() { + assert_eq!( + parser(r"\u{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::UnicodeShort + ), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\U{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace( + ast::HexLiteralKind::UnicodeLong + ), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{26c4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{26C4}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..8), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '⛄', + })) + ); + assert_eq!( + parser(r"\x{10fFfF}").parse_escape(), + Ok(Primitive::Literal(ast::Literal { + span: span(0..10), + kind: ast::LiteralKind::HexBrace(ast::HexLiteralKind::X), + c: '\u{10FFFF}', + })) + ); + + assert_eq!( + parser(r"\x").parse_escape().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{").parse_escape().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{FF").parse_escape().unwrap_err(), + TestError { + span: span(2..5), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\x{}").parse_escape().unwrap_err(), + TestError { + span: span(2..4), + kind: ast::ErrorKind::EscapeHexEmpty, + } + ); + assert_eq!( + parser(r"\x{FGF}").parse_escape().unwrap_err(), + TestError { + span: span(4..5), + kind: ast::ErrorKind::EscapeHexInvalidDigit, + } + ); + assert_eq!( + parser(r"\x{FFFFFF}").parse_escape().unwrap_err(), + TestError { + span: span(3..9), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + assert_eq!( + parser(r"\x{D800}").parse_escape().unwrap_err(), + TestError { + span: span(3..7), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + assert_eq!( + parser(r"\x{FFFFFFFFF}").parse_escape().unwrap_err(), + TestError { + span: span(3..12), + kind: ast::ErrorKind::EscapeHexInvalid, + } + ); + } + + #[test] + fn parse_decimal() { + assert_eq!(parser("123").parse_decimal(), Ok(123)); + assert_eq!(parser("0").parse_decimal(), Ok(0)); + assert_eq!(parser("01").parse_decimal(), Ok(1)); + + assert_eq!( + parser("-1").parse_decimal().unwrap_err(), + TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } + ); + assert_eq!( + parser("").parse_decimal().unwrap_err(), + TestError { span: span(0..0), kind: ast::ErrorKind::DecimalEmpty } + ); + assert_eq!( + parser("9999999999").parse_decimal().unwrap_err(), + TestError { + span: span(0..10), + kind: ast::ErrorKind::DecimalInvalid, + } + ); + } + + #[test] + fn parse_set_class() { + fn union(span: Span, items: Vec) -> ast::ClassSet { + ast::ClassSet::union(ast::ClassSetUnion { span, items }) + } + + fn intersection( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::Intersection, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn difference( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::Difference, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn symdifference( + span: Span, + lhs: ast::ClassSet, + rhs: ast::ClassSet, + ) -> ast::ClassSet { + ast::ClassSet::BinaryOp(ast::ClassSetBinaryOp { + span, + kind: ast::ClassSetBinaryOpKind::SymmetricDifference, + lhs: Box::new(lhs), + rhs: Box::new(rhs), + }) + } + + fn itemset(item: ast::ClassSetItem) -> ast::ClassSet { + ast::ClassSet::Item(item) + } + + fn item_ascii(cls: ast::ClassAscii) -> ast::ClassSetItem { + ast::ClassSetItem::Ascii(cls) + } + + fn item_unicode(cls: ast::ClassUnicode) -> ast::ClassSetItem { + ast::ClassSetItem::Unicode(cls) + } + + fn item_perl(cls: ast::ClassPerl) -> ast::ClassSetItem { + ast::ClassSetItem::Perl(cls) + } + + fn item_bracket(cls: ast::ClassBracketed) -> ast::ClassSetItem { + ast::ClassSetItem::Bracketed(Box::new(cls)) + } + + fn lit(span: Span, c: char) -> ast::ClassSetItem { + ast::ClassSetItem::Literal(ast::Literal { + span, + kind: ast::LiteralKind::Verbatim, + c, + }) + } + + fn empty(span: Span) -> ast::ClassSetItem { + ast::ClassSetItem::Empty(span) + } + + fn range(span: Span, start: char, end: char) -> ast::ClassSetItem { + let pos1 = Position { + offset: span.start.offset + start.len_utf8(), + column: span.start.column + 1, + ..span.start + }; + let pos2 = Position { + offset: span.end.offset - end.len_utf8(), + column: span.end.column - 1, + ..span.end + }; + ast::ClassSetItem::Range(ast::ClassSetRange { + span, + start: ast::Literal { + span: Span { end: pos1, ..span }, + kind: ast::LiteralKind::Verbatim, + c: start, + }, + end: ast::Literal { + span: Span { start: pos2, ..span }, + kind: ast::LiteralKind::Verbatim, + c: end, + }, + }) + } + + fn alnum(span: Span, negated: bool) -> ast::ClassAscii { + ast::ClassAscii { span, kind: ast::ClassAsciiKind::Alnum, negated } + } + + fn lower(span: Span, negated: bool) -> ast::ClassAscii { + ast::ClassAscii { span, kind: ast::ClassAsciiKind::Lower, negated } + } + + assert_eq!( + parser("[[:alnum:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..11), + negated: false, + kind: itemset(item_ascii(alnum(span(1..10), false))), + })) + ); + assert_eq!( + parser("[[[:alnum:]]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..13), + negated: false, + kind: itemset(item_bracket(ast::ClassBracketed { + span: span(1..12), + negated: false, + kind: itemset(item_ascii(alnum(span(2..11), false))), + })), + })) + ); + assert_eq!( + parser("[[:alnum:]&&[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: intersection( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + assert_eq!( + parser("[[:alnum:]--[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: difference( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + assert_eq!( + parser("[[:alnum:]~~[:lower:]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..22), + negated: false, + kind: symdifference( + span(1..21), + itemset(item_ascii(alnum(span(1..10), false))), + itemset(item_ascii(lower(span(12..21), false))), + ), + })) + ); + + assert_eq!( + parser("[a]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: itemset(lit(span(1..2), 'a')), + })) + ); + assert_eq!( + parser(r"[a\]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: union( + span(1..4), + vec![ + lit(span(1..2), 'a'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: ']', + }), + ] + ), + })) + ); + assert_eq!( + parser(r"[a\-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: union( + span(1..5), + vec![ + lit(span(1..2), 'a'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: '-', + }), + lit(span(4..5), 'z'), + ] + ), + })) + ); + assert_eq!( + parser("[ab]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), 'a'), lit(span(2..3), 'b'),] + ), + })) + ); + assert_eq!( + parser("[a-]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), 'a'), lit(span(2..3), '-'),] + ), + })) + ); + assert_eq!( + parser("[-a]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: union( + span(1..3), + vec![lit(span(1..2), '-'), lit(span(2..3), 'a'),] + ), + })) + ); + assert_eq!( + parser(r"[\pL]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: itemset(item_unicode(ast::ClassUnicode { + span: span(1..4), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('L'), + })), + })) + ); + assert_eq!( + parser(r"[\w]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: itemset(item_perl(ast::ClassPerl { + span: span(1..3), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + })) + ); + assert_eq!( + parser(r"[a\wz]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: union( + span(1..5), + vec![ + lit(span(1..2), 'a'), + item_perl(ast::ClassPerl { + span: span(2..4), + kind: ast::ClassPerlKind::Word, + negated: false, + }), + lit(span(4..5), 'z'), + ] + ), + })) + ); + + assert_eq!( + parser("[a-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: itemset(range(span(1..4), 'a', 'z')), + })) + ); + assert_eq!( + parser("[a-cx-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..8), + negated: false, + kind: union( + span(1..7), + vec![ + range(span(1..4), 'a', 'c'), + range(span(4..7), 'x', 'z'), + ] + ), + })) + ); + assert_eq!( + parser(r"[\w&&a-cx-z]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..12), + negated: false, + kind: intersection( + span(1..11), + itemset(item_perl(ast::ClassPerl { + span: span(1..3), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + union( + span(5..11), + vec![ + range(span(5..8), 'a', 'c'), + range(span(8..11), 'x', 'z'), + ] + ), + ), + })) + ); + assert_eq!( + parser(r"[a-cx-z&&\w]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..12), + negated: false, + kind: intersection( + span(1..11), + union( + span(1..7), + vec![ + range(span(1..4), 'a', 'c'), + range(span(4..7), 'x', 'z'), + ] + ), + itemset(item_perl(ast::ClassPerl { + span: span(9..11), + kind: ast::ClassPerlKind::Word, + negated: false, + })), + ), + })) + ); + assert_eq!( + parser(r"[a--b--c]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..9), + negated: false, + kind: difference( + span(1..8), + difference( + span(1..5), + itemset(lit(span(1..2), 'a')), + itemset(lit(span(4..5), 'b')), + ), + itemset(lit(span(7..8), 'c')), + ), + })) + ); + assert_eq!( + parser(r"[a~~b~~c]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..9), + negated: false, + kind: symdifference( + span(1..8), + symdifference( + span(1..5), + itemset(lit(span(1..2), 'a')), + itemset(lit(span(4..5), 'b')), + ), + itemset(lit(span(7..8), 'c')), + ), + })) + ); + assert_eq!( + parser(r"[\^&&^]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..7), + negated: false, + kind: intersection( + span(1..6), + itemset(ast::ClassSetItem::Literal(ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '^', + })), + itemset(lit(span(5..6), '^')), + ), + })) + ); + assert_eq!( + parser(r"[\&&&&]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..7), + negated: false, + kind: intersection( + span(1..6), + itemset(ast::ClassSetItem::Literal(ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '&', + })), + itemset(lit(span(5..6), '&')), + ), + })) + ); + assert_eq!( + parser(r"[&&&&]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..6), + negated: false, + kind: intersection( + span(1..5), + intersection( + span(1..3), + itemset(empty(span(1..1))), + itemset(empty(span(3..3))), + ), + itemset(empty(span(5..5))), + ), + })) + ); + + let pat = "[☃-⛄]"; + assert_eq!( + parser(pat).parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span_range(pat, 0..9), + negated: false, + kind: itemset(ast::ClassSetItem::Range(ast::ClassSetRange { + span: span_range(pat, 1..8), + start: ast::Literal { + span: span_range(pat, 1..4), + kind: ast::LiteralKind::Verbatim, + c: '☃', + }, + end: ast::Literal { + span: span_range(pat, 5..8), + kind: ast::LiteralKind::Verbatim, + c: '⛄', + }, + })), + })) + ); + + assert_eq!( + parser(r"[]]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: itemset(lit(span(1..2), ']')), + })) + ); + assert_eq!( + parser(r"[]\[]").parse(), + Ok(Ast::class_bracketed(ast::ClassBracketed { + span: span(0..5), + negated: false, + kind: union( + span(1..4), + vec![ + lit(span(1..2), ']'), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..4), + kind: ast::LiteralKind::Meta, + c: '[', + }), + ] + ), + })) + ); + assert_eq!( + parser(r"[\[]]").parse(), + Ok(concat( + 0..5, + vec![ + Ast::class_bracketed(ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: itemset(ast::ClassSetItem::Literal( + ast::Literal { + span: span(1..3), + kind: ast::LiteralKind::Meta, + c: '[', + } + )), + }), + Ast::literal(ast::Literal { + span: span(4..5), + kind: ast::LiteralKind::Verbatim, + c: ']', + }), + ] + )) + ); + + assert_eq!( + parser("[").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[-]").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[[[:alnum:]").parse().unwrap_err(), + TestError { + span: span(1..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser(r"[\b]").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::ClassEscapeInvalid, + } + ); + assert_eq!( + parser(r"[\w-a]").parse().unwrap_err(), + TestError { + span: span(1..3), + kind: ast::ErrorKind::ClassRangeLiteral, + } + ); + assert_eq!( + parser(r"[a-\w]").parse().unwrap_err(), + TestError { + span: span(3..5), + kind: ast::ErrorKind::ClassRangeLiteral, + } + ); + assert_eq!( + parser(r"[z-a]").parse().unwrap_err(), + TestError { + span: span(1..4), + kind: ast::ErrorKind::ClassRangeInvalid, + } + ); + + assert_eq!( + parser_ignore_whitespace("[a ").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser_ignore_whitespace("[a- ").parse().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + } + + #[test] + fn parse_set_class_open() { + assert_eq!(parser("[a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..1), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { span: span(1..1), items: vec![] }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(4..4), + items: vec![], + }), + }; + let union = + ast::ClassSetUnion { span: span(4..4), items: vec![] }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { span: span(2..2), items: vec![] }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ ^ a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(4..4), + items: vec![], + }), + }; + let union = + ast::ClassSetUnion { span: span(4..4), items: vec![] }; + Ok((set, union)) + } + ); + assert_eq!(parser("[-a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ - a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^-a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + assert_eq!(parser("[--a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..3), + items: vec![ + ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + }), + ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: '-', + }), + ], + }; + Ok((set, union)) + }); + assert_eq!(parser("[]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + }); + assert_eq!( + parser_ignore_whitespace("[ ] a]").parse_set_class_open(), + { + let set = ast::ClassBracketed { + span: span(0..4), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + } + ); + assert_eq!(parser("[^]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..3), + negated: true, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(2..2), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(2..3), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: ']', + })], + }; + Ok((set, union)) + }); + assert_eq!(parser("[-]a]").parse_set_class_open(), { + let set = ast::ClassBracketed { + span: span(0..2), + negated: false, + kind: ast::ClassSet::union(ast::ClassSetUnion { + span: span(1..1), + items: vec![], + }), + }; + let union = ast::ClassSetUnion { + span: span(1..2), + items: vec![ast::ClassSetItem::Literal(ast::Literal { + span: span(1..2), + kind: ast::LiteralKind::Verbatim, + c: '-', + })], + }; + Ok((set, union)) + }); + + assert_eq!( + parser("[").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..1), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser_ignore_whitespace("[ ") + .parse_set_class_open() + .unwrap_err(), + TestError { + span: span(0..5), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[^").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[]").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..2), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[-").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + assert_eq!( + parser("[--").parse_set_class_open().unwrap_err(), + TestError { + span: span(0..0), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + + // See: https://github.com/rust-lang/regex/issues/792 + assert_eq!( + parser("(?x)[-#]").parse_with_comments().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::ClassUnclosed, + } + ); + } + + #[test] + fn maybe_parse_ascii_class() { + assert_eq!( + parser(r"[:alnum:]").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..9), + kind: ast::ClassAsciiKind::Alnum, + negated: false, + }) + ); + assert_eq!( + parser(r"[:alnum:]A").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..9), + kind: ast::ClassAsciiKind::Alnum, + negated: false, + }) + ); + assert_eq!( + parser(r"[:^alnum:]").maybe_parse_ascii_class(), + Some(ast::ClassAscii { + span: span(0..10), + kind: ast::ClassAsciiKind::Alnum, + negated: true, + }) + ); + + let p = parser(r"[:"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:^"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[^:alnum:]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnnum:]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnum]"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + + let p = parser(r"[:alnum:"); + assert_eq!(p.maybe_parse_ascii_class(), None); + assert_eq!(p.offset(), 0); + } + + #[test] + fn parse_unicode_class() { + assert_eq!( + parser(r"\pN").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..3), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('N'), + })) + ); + assert_eq!( + parser(r"\PN").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..3), + negated: true, + kind: ast::ClassUnicodeKind::OneLetter('N'), + })) + ); + assert_eq!( + parser(r"\p{N}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("N")), + })) + ); + assert_eq!( + parser(r"\P{N}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: true, + kind: ast::ClassUnicodeKind::Named(s("N")), + })) + ); + assert_eq!( + parser(r"\p{Greek}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..9), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("Greek")), + })) + ); + + assert_eq!( + parser(r"\p{scx:Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..16), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + assert_eq!( + parser(r"\p{scx=Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..16), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + assert_eq!( + parser(r"\p{scx!=Katakana}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..17), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: s("scx"), + value: s("Katakana"), + }, + })) + ); + + assert_eq!( + parser(r"\p{:}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Colon, + name: s(""), + value: s(""), + }, + })) + ); + assert_eq!( + parser(r"\p{=}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..5), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::Equal, + name: s(""), + value: s(""), + }, + })) + ); + assert_eq!( + parser(r"\p{!=}").parse_escape(), + Ok(Primitive::Unicode(ast::ClassUnicode { + span: span(0..6), + negated: false, + kind: ast::ClassUnicodeKind::NamedValue { + op: ast::ClassUnicodeOpKind::NotEqual, + name: s(""), + value: s(""), + }, + })) + ); + + assert_eq!( + parser(r"\p").parse_escape().unwrap_err(), + TestError { + span: span(2..2), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{").parse_escape().unwrap_err(), + TestError { + span: span(3..3), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{N").parse_escape().unwrap_err(), + TestError { + span: span(4..4), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + assert_eq!( + parser(r"\p{Greek").parse_escape().unwrap_err(), + TestError { + span: span(8..8), + kind: ast::ErrorKind::EscapeUnexpectedEof, + } + ); + + assert_eq!( + parser(r"\pNz").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..4), + asts: vec![ + Ast::class_unicode(ast::ClassUnicode { + span: span(0..3), + negated: false, + kind: ast::ClassUnicodeKind::OneLetter('N'), + }), + Ast::literal(ast::Literal { + span: span(3..4), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + assert_eq!( + parser(r"\p{Greek}z").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..10), + asts: vec![ + Ast::class_unicode(ast::ClassUnicode { + span: span(0..9), + negated: false, + kind: ast::ClassUnicodeKind::Named(s("Greek")), + }), + Ast::literal(ast::Literal { + span: span(9..10), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + assert_eq!( + parser(r"\p\{").parse().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::UnicodeClassInvalid, + } + ); + assert_eq!( + parser(r"\P\{").parse().unwrap_err(), + TestError { + span: span(2..3), + kind: ast::ErrorKind::UnicodeClassInvalid, + } + ); + } + + #[test] + fn parse_perl_class() { + assert_eq!( + parser(r"\d").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + })) + ); + assert_eq!( + parser(r"\D").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: true, + })) + ); + assert_eq!( + parser(r"\s").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Space, + negated: false, + })) + ); + assert_eq!( + parser(r"\S").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Space, + negated: true, + })) + ); + assert_eq!( + parser(r"\w").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Word, + negated: false, + })) + ); + assert_eq!( + parser(r"\W").parse_escape(), + Ok(Primitive::Perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Word, + negated: true, + })) + ); + + assert_eq!( + parser(r"\d").parse(), + Ok(Ast::class_perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + })) + ); + assert_eq!( + parser(r"\dz").parse(), + Ok(Ast::concat(ast::Concat { + span: span(0..3), + asts: vec![ + Ast::class_perl(ast::ClassPerl { + span: span(0..2), + kind: ast::ClassPerlKind::Digit, + negated: false, + }), + Ast::literal(ast::Literal { + span: span(2..3), + kind: ast::LiteralKind::Verbatim, + c: 'z', + }), + ], + })) + ); + } + + // This tests a bug fix where the nest limit checker wasn't decrementing + // its depth during post-traversal, which causes long regexes to trip + // the default limit too aggressively. + #[test] + fn regression_454_nest_too_big() { + let pattern = r#" + 2(?: + [45]\d{3}| + 7(?: + 1[0-267]| + 2[0-289]| + 3[0-29]| + 4[01]| + 5[1-3]| + 6[013]| + 7[0178]| + 91 + )| + 8(?: + 0[125]| + [139][1-6]| + 2[0157-9]| + 41| + 6[1-35]| + 7[1-5]| + 8[1-8]| + 90 + )| + 9(?: + 0[0-2]| + 1[0-4]| + 2[568]| + 3[3-6]| + 5[5-7]| + 6[0167]| + 7[15]| + 8[0146-9] + ) + )\d{4} + "#; + assert!(parser_nest_limit(pattern, 50).parse().is_ok()); + } + + // This tests that we treat a trailing `-` in a character class as a + // literal `-` even when whitespace mode is enabled and there is whitespace + // after the trailing `-`. + #[test] + fn regression_455_trailing_dash_ignore_whitespace() { + assert!(parser("(?x)[ / - ]").parse().is_ok()); + assert!(parser("(?x)[ a - ]").parse().is_ok()); + assert!(parser( + "(?x)[ + a + - ] + " + ) + .parse() + .is_ok()); + assert!(parser( + "(?x)[ + a # wat + - ] + " + ) + .parse() + .is_ok()); + + assert!(parser("(?x)[ / -").parse().is_err()); + assert!(parser("(?x)[ / - ").parse().is_err()); + assert!(parser( + "(?x)[ + / - + " + ) + .parse() + .is_err()); + assert!(parser( + "(?x)[ + / - # wat + " + ) + .parse() + .is_err()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/print.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/print.rs new file mode 100644 index 0000000000000000000000000000000000000000..556d91f4a0087a05860fbc4b84b7de92980b631c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/print.rs @@ -0,0 +1,577 @@ +/*! +This module provides a regular expression printer for `Ast`. +*/ + +use core::fmt; + +use crate::ast::{ + self, + visitor::{self, Visitor}, + Ast, +}; + +/// A builder for constructing a printer. +/// +/// Note that since a printer doesn't have any configuration knobs, this type +/// remains unexported. +#[derive(Clone, Debug)] +struct PrinterBuilder { + _priv: (), +} + +impl Default for PrinterBuilder { + fn default() -> PrinterBuilder { + PrinterBuilder::new() + } +} + +impl PrinterBuilder { + fn new() -> PrinterBuilder { + PrinterBuilder { _priv: () } + } + + fn build(&self) -> Printer { + Printer { _priv: () } + } +} + +/// A printer for a regular expression abstract syntax tree. +/// +/// A printer converts an abstract syntax tree (AST) to a regular expression +/// pattern string. This particular printer uses constant stack space and heap +/// space proportional to the size of the AST. +/// +/// This printer will not necessarily preserve the original formatting of the +/// regular expression pattern string. For example, all whitespace and comments +/// are ignored. +#[derive(Debug)] +pub struct Printer { + _priv: (), +} + +impl Printer { + /// Create a new printer. + pub fn new() -> Printer { + PrinterBuilder::new().build() + } + + /// Print the given `Ast` to the given writer. The writer must implement + /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used + /// here are a `fmt::Formatter` (which is available in `fmt::Display` + /// implementations) or a `&mut String`. + pub fn print(&mut self, ast: &Ast, wtr: W) -> fmt::Result { + visitor::visit(ast, Writer { wtr }) + } +} + +#[derive(Debug)] +struct Writer { + wtr: W, +} + +impl Visitor for Writer { + type Output = (); + type Err = fmt::Error; + + fn finish(self) -> fmt::Result { + Ok(()) + } + + fn visit_pre(&mut self, ast: &Ast) -> fmt::Result { + match *ast { + Ast::Group(ref x) => self.fmt_group_pre(x), + Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_pre(x), + _ => Ok(()), + } + } + + fn visit_post(&mut self, ast: &Ast) -> fmt::Result { + match *ast { + Ast::Empty(_) => Ok(()), + Ast::Flags(ref x) => self.fmt_set_flags(x), + Ast::Literal(ref x) => self.fmt_literal(x), + Ast::Dot(_) => self.wtr.write_str("."), + Ast::Assertion(ref x) => self.fmt_assertion(x), + Ast::ClassPerl(ref x) => self.fmt_class_perl(x), + Ast::ClassUnicode(ref x) => self.fmt_class_unicode(x), + Ast::ClassBracketed(ref x) => self.fmt_class_bracketed_post(x), + Ast::Repetition(ref x) => self.fmt_repetition(x), + Ast::Group(ref x) => self.fmt_group_post(x), + Ast::Alternation(_) => Ok(()), + Ast::Concat(_) => Ok(()), + } + } + + fn visit_alternation_in(&mut self) -> fmt::Result { + self.wtr.write_str("|") + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + match *ast { + ast::ClassSetItem::Bracketed(ref x) => { + self.fmt_class_bracketed_pre(x) + } + _ => Ok(()), + } + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + use crate::ast::ClassSetItem::*; + + match *ast { + Empty(_) => Ok(()), + Literal(ref x) => self.fmt_literal(x), + Range(ref x) => { + self.fmt_literal(&x.start)?; + self.wtr.write_str("-")?; + self.fmt_literal(&x.end)?; + Ok(()) + } + Ascii(ref x) => self.fmt_class_ascii(x), + Unicode(ref x) => self.fmt_class_unicode(x), + Perl(ref x) => self.fmt_class_perl(x), + Bracketed(ref x) => self.fmt_class_bracketed_post(x), + Union(_) => Ok(()), + } + } + + fn visit_class_set_binary_op_in( + &mut self, + ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + self.fmt_class_set_binary_op_kind(&ast.kind) + } +} + +impl Writer { + fn fmt_group_pre(&mut self, ast: &ast::Group) -> fmt::Result { + use crate::ast::GroupKind::*; + match ast.kind { + CaptureIndex(_) => self.wtr.write_str("("), + CaptureName { ref name, starts_with_p } => { + let start = if starts_with_p { "(?P<" } else { "(?<" }; + self.wtr.write_str(start)?; + self.wtr.write_str(&name.name)?; + self.wtr.write_str(">")?; + Ok(()) + } + NonCapturing(ref flags) => { + self.wtr.write_str("(?")?; + self.fmt_flags(flags)?; + self.wtr.write_str(":")?; + Ok(()) + } + } + } + + fn fmt_group_post(&mut self, _ast: &ast::Group) -> fmt::Result { + self.wtr.write_str(")") + } + + fn fmt_repetition(&mut self, ast: &ast::Repetition) -> fmt::Result { + use crate::ast::RepetitionKind::*; + match ast.op.kind { + ZeroOrOne if ast.greedy => self.wtr.write_str("?"), + ZeroOrOne => self.wtr.write_str("??"), + ZeroOrMore if ast.greedy => self.wtr.write_str("*"), + ZeroOrMore => self.wtr.write_str("*?"), + OneOrMore if ast.greedy => self.wtr.write_str("+"), + OneOrMore => self.wtr.write_str("+?"), + Range(ref x) => { + self.fmt_repetition_range(x)?; + if !ast.greedy { + self.wtr.write_str("?")?; + } + Ok(()) + } + } + } + + fn fmt_repetition_range( + &mut self, + ast: &ast::RepetitionRange, + ) -> fmt::Result { + use crate::ast::RepetitionRange::*; + match *ast { + Exactly(x) => write!(self.wtr, "{{{x}}}"), + AtLeast(x) => write!(self.wtr, "{{{x},}}"), + Bounded(x, y) => write!(self.wtr, "{{{x},{y}}}"), + } + } + + fn fmt_literal(&mut self, ast: &ast::Literal) -> fmt::Result { + use crate::ast::LiteralKind::*; + + match ast.kind { + Verbatim => self.wtr.write_char(ast.c), + Meta | Superfluous => write!(self.wtr, r"\{}", ast.c), + Octal => write!(self.wtr, r"\{:o}", u32::from(ast.c)), + HexFixed(ast::HexLiteralKind::X) => { + write!(self.wtr, r"\x{:02X}", u32::from(ast.c)) + } + HexFixed(ast::HexLiteralKind::UnicodeShort) => { + write!(self.wtr, r"\u{:04X}", u32::from(ast.c)) + } + HexFixed(ast::HexLiteralKind::UnicodeLong) => { + write!(self.wtr, r"\U{:08X}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::X) => { + write!(self.wtr, r"\x{{{:X}}}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::UnicodeShort) => { + write!(self.wtr, r"\u{{{:X}}}", u32::from(ast.c)) + } + HexBrace(ast::HexLiteralKind::UnicodeLong) => { + write!(self.wtr, r"\U{{{:X}}}", u32::from(ast.c)) + } + Special(ast::SpecialLiteralKind::Bell) => { + self.wtr.write_str(r"\a") + } + Special(ast::SpecialLiteralKind::FormFeed) => { + self.wtr.write_str(r"\f") + } + Special(ast::SpecialLiteralKind::Tab) => self.wtr.write_str(r"\t"), + Special(ast::SpecialLiteralKind::LineFeed) => { + self.wtr.write_str(r"\n") + } + Special(ast::SpecialLiteralKind::CarriageReturn) => { + self.wtr.write_str(r"\r") + } + Special(ast::SpecialLiteralKind::VerticalTab) => { + self.wtr.write_str(r"\v") + } + Special(ast::SpecialLiteralKind::Space) => { + self.wtr.write_str(r"\ ") + } + } + } + + fn fmt_assertion(&mut self, ast: &ast::Assertion) -> fmt::Result { + use crate::ast::AssertionKind::*; + match ast.kind { + StartLine => self.wtr.write_str("^"), + EndLine => self.wtr.write_str("$"), + StartText => self.wtr.write_str(r"\A"), + EndText => self.wtr.write_str(r"\z"), + WordBoundary => self.wtr.write_str(r"\b"), + NotWordBoundary => self.wtr.write_str(r"\B"), + WordBoundaryStart => self.wtr.write_str(r"\b{start}"), + WordBoundaryEnd => self.wtr.write_str(r"\b{end}"), + WordBoundaryStartAngle => self.wtr.write_str(r"\<"), + WordBoundaryEndAngle => self.wtr.write_str(r"\>"), + WordBoundaryStartHalf => self.wtr.write_str(r"\b{start-half}"), + WordBoundaryEndHalf => self.wtr.write_str(r"\b{end-half}"), + } + } + + fn fmt_set_flags(&mut self, ast: &ast::SetFlags) -> fmt::Result { + self.wtr.write_str("(?")?; + self.fmt_flags(&ast.flags)?; + self.wtr.write_str(")")?; + Ok(()) + } + + fn fmt_flags(&mut self, ast: &ast::Flags) -> fmt::Result { + use crate::ast::{Flag, FlagsItemKind}; + + for item in &ast.items { + match item.kind { + FlagsItemKind::Negation => self.wtr.write_str("-"), + FlagsItemKind::Flag(ref flag) => match *flag { + Flag::CaseInsensitive => self.wtr.write_str("i"), + Flag::MultiLine => self.wtr.write_str("m"), + Flag::DotMatchesNewLine => self.wtr.write_str("s"), + Flag::SwapGreed => self.wtr.write_str("U"), + Flag::Unicode => self.wtr.write_str("u"), + Flag::CRLF => self.wtr.write_str("R"), + Flag::IgnoreWhitespace => self.wtr.write_str("x"), + }, + }?; + } + Ok(()) + } + + fn fmt_class_bracketed_pre( + &mut self, + ast: &ast::ClassBracketed, + ) -> fmt::Result { + if ast.negated { + self.wtr.write_str("[^") + } else { + self.wtr.write_str("[") + } + } + + fn fmt_class_bracketed_post( + &mut self, + _ast: &ast::ClassBracketed, + ) -> fmt::Result { + self.wtr.write_str("]") + } + + fn fmt_class_set_binary_op_kind( + &mut self, + ast: &ast::ClassSetBinaryOpKind, + ) -> fmt::Result { + use crate::ast::ClassSetBinaryOpKind::*; + match *ast { + Intersection => self.wtr.write_str("&&"), + Difference => self.wtr.write_str("--"), + SymmetricDifference => self.wtr.write_str("~~"), + } + } + + fn fmt_class_perl(&mut self, ast: &ast::ClassPerl) -> fmt::Result { + use crate::ast::ClassPerlKind::*; + match ast.kind { + Digit if ast.negated => self.wtr.write_str(r"\D"), + Digit => self.wtr.write_str(r"\d"), + Space if ast.negated => self.wtr.write_str(r"\S"), + Space => self.wtr.write_str(r"\s"), + Word if ast.negated => self.wtr.write_str(r"\W"), + Word => self.wtr.write_str(r"\w"), + } + } + + fn fmt_class_ascii(&mut self, ast: &ast::ClassAscii) -> fmt::Result { + use crate::ast::ClassAsciiKind::*; + match ast.kind { + Alnum if ast.negated => self.wtr.write_str("[:^alnum:]"), + Alnum => self.wtr.write_str("[:alnum:]"), + Alpha if ast.negated => self.wtr.write_str("[:^alpha:]"), + Alpha => self.wtr.write_str("[:alpha:]"), + Ascii if ast.negated => self.wtr.write_str("[:^ascii:]"), + Ascii => self.wtr.write_str("[:ascii:]"), + Blank if ast.negated => self.wtr.write_str("[:^blank:]"), + Blank => self.wtr.write_str("[:blank:]"), + Cntrl if ast.negated => self.wtr.write_str("[:^cntrl:]"), + Cntrl => self.wtr.write_str("[:cntrl:]"), + Digit if ast.negated => self.wtr.write_str("[:^digit:]"), + Digit => self.wtr.write_str("[:digit:]"), + Graph if ast.negated => self.wtr.write_str("[:^graph:]"), + Graph => self.wtr.write_str("[:graph:]"), + Lower if ast.negated => self.wtr.write_str("[:^lower:]"), + Lower => self.wtr.write_str("[:lower:]"), + Print if ast.negated => self.wtr.write_str("[:^print:]"), + Print => self.wtr.write_str("[:print:]"), + Punct if ast.negated => self.wtr.write_str("[:^punct:]"), + Punct => self.wtr.write_str("[:punct:]"), + Space if ast.negated => self.wtr.write_str("[:^space:]"), + Space => self.wtr.write_str("[:space:]"), + Upper if ast.negated => self.wtr.write_str("[:^upper:]"), + Upper => self.wtr.write_str("[:upper:]"), + Word if ast.negated => self.wtr.write_str("[:^word:]"), + Word => self.wtr.write_str("[:word:]"), + Xdigit if ast.negated => self.wtr.write_str("[:^xdigit:]"), + Xdigit => self.wtr.write_str("[:xdigit:]"), + } + } + + fn fmt_class_unicode(&mut self, ast: &ast::ClassUnicode) -> fmt::Result { + use crate::ast::ClassUnicodeKind::*; + use crate::ast::ClassUnicodeOpKind::*; + + if ast.negated { + self.wtr.write_str(r"\P")?; + } else { + self.wtr.write_str(r"\p")?; + } + match ast.kind { + OneLetter(c) => self.wtr.write_char(c), + Named(ref x) => write!(self.wtr, "{{{}}}", x), + NamedValue { op: Equal, ref name, ref value } => { + write!(self.wtr, "{{{}={}}}", name, value) + } + NamedValue { op: Colon, ref name, ref value } => { + write!(self.wtr, "{{{}:{}}}", name, value) + } + NamedValue { op: NotEqual, ref name, ref value } => { + write!(self.wtr, "{{{}!={}}}", name, value) + } + } + } +} + +#[cfg(test)] +mod tests { + use alloc::string::String; + + use crate::ast::parse::ParserBuilder; + + use super::*; + + fn roundtrip(given: &str) { + roundtrip_with(|b| b, given); + } + + fn roundtrip_with(mut f: F, given: &str) + where + F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, + { + let mut builder = ParserBuilder::new(); + f(&mut builder); + let ast = builder.build().parse(given).unwrap(); + + let mut printer = Printer::new(); + let mut dst = String::new(); + printer.print(&ast, &mut dst).unwrap(); + assert_eq!(given, dst); + } + + #[test] + fn print_literal() { + roundtrip("a"); + roundtrip(r"\["); + roundtrip_with(|b| b.octal(true), r"\141"); + roundtrip(r"\x61"); + roundtrip(r"\x7F"); + roundtrip(r"\u0061"); + roundtrip(r"\U00000061"); + roundtrip(r"\x{61}"); + roundtrip(r"\x{7F}"); + roundtrip(r"\u{61}"); + roundtrip(r"\U{61}"); + + roundtrip(r"\a"); + roundtrip(r"\f"); + roundtrip(r"\t"); + roundtrip(r"\n"); + roundtrip(r"\r"); + roundtrip(r"\v"); + roundtrip(r"(?x)\ "); + } + + #[test] + fn print_dot() { + roundtrip("."); + } + + #[test] + fn print_concat() { + roundtrip("ab"); + roundtrip("abcde"); + roundtrip("a(bcd)ef"); + } + + #[test] + fn print_alternation() { + roundtrip("a|b"); + roundtrip("a|b|c|d|e"); + roundtrip("|a|b|c|d|e"); + roundtrip("|a|b|c|d|e|"); + roundtrip("a(b|c|d)|e|f"); + } + + #[test] + fn print_assertion() { + roundtrip(r"^"); + roundtrip(r"$"); + roundtrip(r"\A"); + roundtrip(r"\z"); + roundtrip(r"\b"); + roundtrip(r"\B"); + } + + #[test] + fn print_repetition() { + roundtrip("a?"); + roundtrip("a??"); + roundtrip("a*"); + roundtrip("a*?"); + roundtrip("a+"); + roundtrip("a+?"); + roundtrip("a{5}"); + roundtrip("a{5}?"); + roundtrip("a{5,}"); + roundtrip("a{5,}?"); + roundtrip("a{5,10}"); + roundtrip("a{5,10}?"); + } + + #[test] + fn print_flags() { + roundtrip("(?i)"); + roundtrip("(?-i)"); + roundtrip("(?s-i)"); + roundtrip("(?-si)"); + roundtrip("(?siUmux)"); + } + + #[test] + fn print_group() { + roundtrip("(?i:a)"); + roundtrip("(?Pa)"); + roundtrip("(?a)"); + roundtrip("(a)"); + } + + #[test] + fn print_class() { + roundtrip(r"[abc]"); + roundtrip(r"[a-z]"); + roundtrip(r"[^a-z]"); + roundtrip(r"[a-z0-9]"); + roundtrip(r"[-a-z0-9]"); + roundtrip(r"[-a-z0-9]"); + roundtrip(r"[a-z0-9---]"); + roundtrip(r"[a-z&&m-n]"); + roundtrip(r"[[a-z&&m-n]]"); + roundtrip(r"[a-z--m-n]"); + roundtrip(r"[a-z~~m-n]"); + roundtrip(r"[a-z[0-9]]"); + roundtrip(r"[a-z[^0-9]]"); + + roundtrip(r"\d"); + roundtrip(r"\D"); + roundtrip(r"\s"); + roundtrip(r"\S"); + roundtrip(r"\w"); + roundtrip(r"\W"); + + roundtrip(r"[[:alnum:]]"); + roundtrip(r"[[:^alnum:]]"); + roundtrip(r"[[:alpha:]]"); + roundtrip(r"[[:^alpha:]]"); + roundtrip(r"[[:ascii:]]"); + roundtrip(r"[[:^ascii:]]"); + roundtrip(r"[[:blank:]]"); + roundtrip(r"[[:^blank:]]"); + roundtrip(r"[[:cntrl:]]"); + roundtrip(r"[[:^cntrl:]]"); + roundtrip(r"[[:digit:]]"); + roundtrip(r"[[:^digit:]]"); + roundtrip(r"[[:graph:]]"); + roundtrip(r"[[:^graph:]]"); + roundtrip(r"[[:lower:]]"); + roundtrip(r"[[:^lower:]]"); + roundtrip(r"[[:print:]]"); + roundtrip(r"[[:^print:]]"); + roundtrip(r"[[:punct:]]"); + roundtrip(r"[[:^punct:]]"); + roundtrip(r"[[:space:]]"); + roundtrip(r"[[:^space:]]"); + roundtrip(r"[[:upper:]]"); + roundtrip(r"[[:^upper:]]"); + roundtrip(r"[[:word:]]"); + roundtrip(r"[[:^word:]]"); + roundtrip(r"[[:xdigit:]]"); + roundtrip(r"[[:^xdigit:]]"); + + roundtrip(r"\pL"); + roundtrip(r"\PL"); + roundtrip(r"\p{L}"); + roundtrip(r"\P{L}"); + roundtrip(r"\p{X=Y}"); + roundtrip(r"\P{X=Y}"); + roundtrip(r"\p{X:Y}"); + roundtrip(r"\P{X:Y}"); + roundtrip(r"\p{X!=Y}"); + roundtrip(r"\P{X!=Y}"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/visitor.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/visitor.rs new file mode 100644 index 0000000000000000000000000000000000000000..36cd713c0f3dc60d062adbf1709a3fe0120baba5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/ast/visitor.rs @@ -0,0 +1,522 @@ +use alloc::{vec, vec::Vec}; + +use crate::ast::{self, Ast}; + +/// A trait for visiting an abstract syntax tree (AST) in depth first order. +/// +/// The principle aim of this trait is to enable callers to perform case +/// analysis on an abstract syntax tree without necessarily using recursion. +/// In particular, this permits callers to do case analysis with constant stack +/// usage, which can be important since the size of an abstract syntax tree +/// may be proportional to end user input. +/// +/// Typical usage of this trait involves providing an implementation and then +/// running it using the [`visit`] function. +/// +/// Note that the abstract syntax tree for a regular expression is quite +/// complex. Unless you specifically need it, you might be able to use the much +/// simpler [high-level intermediate representation](crate::hir::Hir) and its +/// [corresponding `Visitor` trait](crate::hir::Visitor) instead. +pub trait Visitor { + /// The result of visiting an AST. + type Output; + /// An error that visiting an AST might return. + type Err; + + /// All implementors of `Visitor` must provide a `finish` method, which + /// yields the result of visiting the AST or an error. + fn finish(self) -> Result; + + /// This method is called before beginning traversal of the AST. + fn start(&mut self) {} + + /// This method is called on an `Ast` before descending into child `Ast` + /// nodes. + fn visit_pre(&mut self, _ast: &Ast) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on an `Ast` after descending all of its child + /// `Ast` nodes. + fn visit_post(&mut self, _ast: &Ast) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of an + /// [`Alternation`](ast::Alternation). + fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of a concatenation. + fn visit_concat_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) + /// before descending into child nodes. + fn visit_class_set_item_pre( + &mut self, + _ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every [`ClassSetItem`](ast::ClassSetItem) + /// after descending into child nodes. + fn visit_class_set_item_post( + &mut self, + _ast: &ast::ClassSetItem, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every + /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) before descending into + /// child nodes. + fn visit_class_set_binary_op_pre( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on every + /// [`ClassSetBinaryOp`](ast::ClassSetBinaryOp) after descending into child + /// nodes. + fn visit_class_set_binary_op_post( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between the left hand and right hand child nodes + /// of a [`ClassSetBinaryOp`](ast::ClassSetBinaryOp). + fn visit_class_set_binary_op_in( + &mut self, + _ast: &ast::ClassSetBinaryOp, + ) -> Result<(), Self::Err> { + Ok(()) + } +} + +/// Executes an implementation of `Visitor` in constant stack space. +/// +/// This function will visit every node in the given `Ast` while calling the +/// appropriate methods provided by the [`Visitor`] trait. +/// +/// The primary use case for this method is when one wants to perform case +/// analysis over an `Ast` without using a stack size proportional to the depth +/// of the `Ast`. Namely, this method will instead use constant stack size, but +/// will use heap space proportional to the size of the `Ast`. This may be +/// desirable in cases where the size of `Ast` is proportional to end user +/// input. +/// +/// If the visitor returns an error at any point, then visiting is stopped and +/// the error is returned. +pub fn visit(ast: &Ast, visitor: V) -> Result { + HeapVisitor::new().visit(ast, visitor) +} + +/// HeapVisitor visits every item in an `Ast` recursively using constant stack +/// size and a heap size proportional to the size of the `Ast`. +struct HeapVisitor<'a> { + /// A stack of `Ast` nodes. This is roughly analogous to the call stack + /// used in a typical recursive visitor. + stack: Vec<(&'a Ast, Frame<'a>)>, + /// Similar to the `Ast` stack above, but is used only for character + /// classes. In particular, character classes embed their own mini + /// recursive syntax. + stack_class: Vec<(ClassInduct<'a>, ClassFrame<'a>)>, +} + +/// Represents a single stack frame while performing structural induction over +/// an `Ast`. +enum Frame<'a> { + /// A stack frame allocated just before descending into a repetition + /// operator's child node. + Repetition(&'a ast::Repetition), + /// A stack frame allocated just before descending into a group's child + /// node. + Group(&'a ast::Group), + /// The stack frame used while visiting every child node of a concatenation + /// of expressions. + Concat { + /// The child node we are currently visiting. + head: &'a Ast, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Ast], + }, + /// The stack frame used while visiting every child node of an alternation + /// of expressions. + Alternation { + /// The child node we are currently visiting. + head: &'a Ast, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Ast], + }, +} + +/// Represents a single stack frame while performing structural induction over +/// a character class. +enum ClassFrame<'a> { + /// The stack frame used while visiting every child node of a union of + /// character class items. + Union { + /// The child node we are currently visiting. + head: &'a ast::ClassSetItem, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [ast::ClassSetItem], + }, + /// The stack frame used while a binary class operation. + Binary { op: &'a ast::ClassSetBinaryOp }, + /// A stack frame allocated just before descending into a binary operator's + /// left hand child node. + BinaryLHS { + op: &'a ast::ClassSetBinaryOp, + lhs: &'a ast::ClassSet, + rhs: &'a ast::ClassSet, + }, + /// A stack frame allocated just before descending into a binary operator's + /// right hand child node. + BinaryRHS { op: &'a ast::ClassSetBinaryOp, rhs: &'a ast::ClassSet }, +} + +/// A representation of the inductive step when performing structural induction +/// over a character class. +/// +/// Note that there is no analogous explicit type for the inductive step for +/// `Ast` nodes because the inductive step is just an `Ast`. For character +/// classes, the inductive step can produce one of two possible child nodes: +/// an item or a binary operation. (An item cannot be a binary operation +/// because that would imply binary operations can be unioned in the concrete +/// syntax, which is not possible.) +enum ClassInduct<'a> { + Item(&'a ast::ClassSetItem), + BinaryOp(&'a ast::ClassSetBinaryOp), +} + +impl<'a> HeapVisitor<'a> { + fn new() -> HeapVisitor<'a> { + HeapVisitor { stack: vec![], stack_class: vec![] } + } + + fn visit( + &mut self, + mut ast: &'a Ast, + mut visitor: V, + ) -> Result { + self.stack.clear(); + self.stack_class.clear(); + + visitor.start(); + loop { + visitor.visit_pre(ast)?; + if let Some(x) = self.induct(ast, &mut visitor)? { + let child = x.child(); + self.stack.push((ast, x)); + ast = child; + continue; + } + // No induction means we have a base case, so we can post visit + // it now. + visitor.visit_post(ast)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_ast, frame) = match self.stack.pop() { + None => return visitor.finish(), + Some((post_ast, frame)) => (post_ast, frame), + }; + // If this is a concat/alternate, then we might have additional + // inductive steps to process. + if let Some(x) = self.pop(frame) { + match x { + Frame::Alternation { .. } => { + visitor.visit_alternation_in()?; + } + Frame::Concat { .. } => { + visitor.visit_concat_in()?; + } + _ => {} + } + ast = x.child(); + self.stack.push((post_ast, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this AST, so we can post visit it now. + visitor.visit_post(post_ast)?; + } + } + } + + /// Build a stack frame for the given AST if one is needed (which occurs if + /// and only if there are child nodes in the AST). Otherwise, return None. + /// + /// If this visits a class, then the underlying visitor implementation may + /// return an error which will be passed on here. + fn induct( + &mut self, + ast: &'a Ast, + visitor: &mut V, + ) -> Result>, V::Err> { + Ok(match *ast { + Ast::ClassBracketed(ref x) => { + self.visit_class(x, visitor)?; + None + } + Ast::Repetition(ref x) => Some(Frame::Repetition(x)), + Ast::Group(ref x) => Some(Frame::Group(x)), + Ast::Concat(ref x) if x.asts.is_empty() => None, + Ast::Concat(ref x) => { + Some(Frame::Concat { head: &x.asts[0], tail: &x.asts[1..] }) + } + Ast::Alternation(ref x) if x.asts.is_empty() => None, + Ast::Alternation(ref x) => Some(Frame::Alternation { + head: &x.asts[0], + tail: &x.asts[1..], + }), + _ => None, + }) + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop(&self, induct: Frame<'a>) -> Option> { + match induct { + Frame::Repetition(_) => None, + Frame::Group(_) => None, + Frame::Concat { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) + } + } + Frame::Alternation { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Alternation { + head: &tail[0], + tail: &tail[1..], + }) + } + } + } + } + + fn visit_class( + &mut self, + ast: &'a ast::ClassBracketed, + visitor: &mut V, + ) -> Result<(), V::Err> { + let mut ast = ClassInduct::from_bracketed(ast); + loop { + self.visit_class_pre(&ast, visitor)?; + if let Some(x) = self.induct_class(&ast) { + let child = x.child(); + self.stack_class.push((ast, x)); + ast = child; + continue; + } + self.visit_class_post(&ast, visitor)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_ast, frame) = match self.stack_class.pop() { + None => return Ok(()), + Some((post_ast, frame)) => (post_ast, frame), + }; + // If this is a union or a binary op, then we might have + // additional inductive steps to process. + if let Some(x) = self.pop_class(frame) { + if let ClassFrame::BinaryRHS { ref op, .. } = x { + visitor.visit_class_set_binary_op_in(op)?; + } + ast = x.child(); + self.stack_class.push((post_ast, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this class node, so we can post visit it now. + self.visit_class_post(&post_ast, visitor)?; + } + } + } + + /// Call the appropriate `Visitor` methods given an inductive step. + fn visit_class_pre( + &self, + ast: &ClassInduct<'a>, + visitor: &mut V, + ) -> Result<(), V::Err> { + match *ast { + ClassInduct::Item(item) => { + visitor.visit_class_set_item_pre(item)?; + } + ClassInduct::BinaryOp(op) => { + visitor.visit_class_set_binary_op_pre(op)?; + } + } + Ok(()) + } + + /// Call the appropriate `Visitor` methods given an inductive step. + fn visit_class_post( + &self, + ast: &ClassInduct<'a>, + visitor: &mut V, + ) -> Result<(), V::Err> { + match *ast { + ClassInduct::Item(item) => { + visitor.visit_class_set_item_post(item)?; + } + ClassInduct::BinaryOp(op) => { + visitor.visit_class_set_binary_op_post(op)?; + } + } + Ok(()) + } + + /// Build a stack frame for the given class node if one is needed (which + /// occurs if and only if there are child nodes). Otherwise, return None. + fn induct_class(&self, ast: &ClassInduct<'a>) -> Option> { + match *ast { + ClassInduct::Item(&ast::ClassSetItem::Bracketed(ref x)) => { + match x.kind { + ast::ClassSet::Item(ref item) => { + Some(ClassFrame::Union { head: item, tail: &[] }) + } + ast::ClassSet::BinaryOp(ref op) => { + Some(ClassFrame::Binary { op }) + } + } + } + ClassInduct::Item(&ast::ClassSetItem::Union(ref x)) => { + if x.items.is_empty() { + None + } else { + Some(ClassFrame::Union { + head: &x.items[0], + tail: &x.items[1..], + }) + } + } + ClassInduct::BinaryOp(op) => { + Some(ClassFrame::BinaryLHS { op, lhs: &op.lhs, rhs: &op.rhs }) + } + _ => None, + } + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop_class(&self, induct: ClassFrame<'a>) -> Option> { + match induct { + ClassFrame::Union { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(ClassFrame::Union { + head: &tail[0], + tail: &tail[1..], + }) + } + } + ClassFrame::Binary { .. } => None, + ClassFrame::BinaryLHS { op, rhs, .. } => { + Some(ClassFrame::BinaryRHS { op, rhs }) + } + ClassFrame::BinaryRHS { .. } => None, + } + } +} + +impl<'a> Frame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child AST node to visit. + fn child(&self) -> &'a Ast { + match *self { + Frame::Repetition(rep) => &rep.ast, + Frame::Group(group) => &group.ast, + Frame::Concat { head, .. } => head, + Frame::Alternation { head, .. } => head, + } + } +} + +impl<'a> ClassFrame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child class node to visit. + fn child(&self) -> ClassInduct<'a> { + match *self { + ClassFrame::Union { head, .. } => ClassInduct::Item(head), + ClassFrame::Binary { op, .. } => ClassInduct::BinaryOp(op), + ClassFrame::BinaryLHS { ref lhs, .. } => { + ClassInduct::from_set(lhs) + } + ClassFrame::BinaryRHS { ref rhs, .. } => { + ClassInduct::from_set(rhs) + } + } + } +} + +impl<'a> ClassInduct<'a> { + fn from_bracketed(ast: &'a ast::ClassBracketed) -> ClassInduct<'a> { + ClassInduct::from_set(&ast.kind) + } + + fn from_set(ast: &'a ast::ClassSet) -> ClassInduct<'a> { + match *ast { + ast::ClassSet::Item(ref item) => ClassInduct::Item(item), + ast::ClassSet::BinaryOp(ref op) => ClassInduct::BinaryOp(op), + } + } +} + +impl<'a> core::fmt::Debug for ClassFrame<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let x = match *self { + ClassFrame::Union { .. } => "Union", + ClassFrame::Binary { .. } => "Binary", + ClassFrame::BinaryLHS { .. } => "BinaryLHS", + ClassFrame::BinaryRHS { .. } => "BinaryRHS", + }; + write!(f, "{x}") + } +} + +impl<'a> core::fmt::Debug for ClassInduct<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let x = match *self { + ClassInduct::Item(it) => match *it { + ast::ClassSetItem::Empty(_) => "Item(Empty)", + ast::ClassSetItem::Literal(_) => "Item(Literal)", + ast::ClassSetItem::Range(_) => "Item(Range)", + ast::ClassSetItem::Ascii(_) => "Item(Ascii)", + ast::ClassSetItem::Perl(_) => "Item(Perl)", + ast::ClassSetItem::Unicode(_) => "Item(Unicode)", + ast::ClassSetItem::Bracketed(_) => "Item(Bracketed)", + ast::ClassSetItem::Union(_) => "Item(Union)", + }, + ClassInduct::BinaryOp(it) => match it.kind { + ast::ClassSetBinaryOpKind::Intersection => { + "BinaryOp(Intersection)" + } + ast::ClassSetBinaryOpKind::Difference => { + "BinaryOp(Difference)" + } + ast::ClassSetBinaryOpKind::SymmetricDifference => { + "BinaryOp(SymmetricDifference)" + } + }, + }; + write!(f, "{x}") + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/debug.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/debug.rs new file mode 100644 index 0000000000000000000000000000000000000000..7a47d9de8eb339fd485e0ff6ee08dde3d586749e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/debug.rs @@ -0,0 +1,107 @@ +/// A type that wraps a single byte with a convenient fmt::Debug impl that +/// escapes the byte. +pub(crate) struct Byte(pub(crate) u8); + +impl core::fmt::Debug for Byte { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + // Special case ASCII space. It's too hard to read otherwise, so + // put quotes around it. I sometimes wonder whether just '\x20' would + // be better... + if self.0 == b' ' { + return write!(f, "' '"); + } + // 10 bytes is enough to cover any output from ascii::escape_default. + let mut bytes = [0u8; 10]; + let mut len = 0; + for (i, mut b) in core::ascii::escape_default(self.0).enumerate() { + // capitalize \xab to \xAB + if i >= 2 && b'a' <= b && b <= b'f' { + b -= 32; + } + bytes[len] = b; + len += 1; + } + write!(f, "{}", core::str::from_utf8(&bytes[..len]).unwrap()) + } +} + +/// A type that provides a human readable debug impl for arbitrary bytes. +/// +/// This generally works best when the bytes are presumed to be mostly UTF-8, +/// but will work for anything. +/// +/// N.B. This is copied nearly verbatim from regex-automata. Sigh. +pub(crate) struct Bytes<'a>(pub(crate) &'a [u8]); + +impl<'a> core::fmt::Debug for Bytes<'a> { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "\"")?; + // This is a sad re-implementation of a similar impl found in bstr. + let mut bytes = self.0; + while let Some(result) = utf8_decode(bytes) { + let ch = match result { + Ok(ch) => ch, + Err(byte) => { + write!(f, r"\x{byte:02x}")?; + bytes = &bytes[1..]; + continue; + } + }; + bytes = &bytes[ch.len_utf8()..]; + match ch { + '\0' => write!(f, "\\0")?, + // ASCII control characters except \0, \n, \r, \t + '\x01'..='\x08' + | '\x0b' + | '\x0c' + | '\x0e'..='\x19' + | '\x7f' => { + write!(f, "\\x{:02x}", u32::from(ch))?; + } + '\n' | '\r' | '\t' | _ => { + write!(f, "{}", ch.escape_debug())?; + } + } + } + write!(f, "\"")?; + Ok(()) + } +} + +/// Decodes the next UTF-8 encoded codepoint from the given byte slice. +/// +/// If no valid encoding of a codepoint exists at the beginning of the given +/// byte slice, then the first byte is returned instead. +/// +/// This returns `None` if and only if `bytes` is empty. +pub(crate) fn utf8_decode(bytes: &[u8]) -> Option> { + fn len(byte: u8) -> Option { + if byte <= 0x7F { + return Some(1); + } else if byte & 0b1100_0000 == 0b1000_0000 { + return None; + } else if byte <= 0b1101_1111 { + Some(2) + } else if byte <= 0b1110_1111 { + Some(3) + } else if byte <= 0b1111_0111 { + Some(4) + } else { + None + } + } + + if bytes.is_empty() { + return None; + } + let len = match len(bytes[0]) { + None => return Some(Err(bytes[0])), + Some(len) if len > bytes.len() => return Some(Err(bytes[0])), + Some(1) => return Some(Ok(char::from(bytes[0]))), + Some(len) => len, + }; + match core::str::from_utf8(&bytes[..len]) { + Ok(s) => Some(Ok(s.chars().next().unwrap())), + Err(_) => Some(Err(bytes[0])), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/either.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/either.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ae41e4ced7460d31b8e19f7b534f0924578dc28 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/either.rs @@ -0,0 +1,8 @@ +/// A simple binary sum type. +/// +/// This is occasionally useful in an ad hoc fashion. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Either { + Left(Left), + Right(Right), +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..21e484df96dcd904f1fd5a987d2a73151d5c63c1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/error.rs @@ -0,0 +1,311 @@ +use alloc::{ + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ast, hir}; + +/// This error type encompasses any error that can be returned by this crate. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum Error { + /// An error that occurred while translating concrete syntax into abstract + /// syntax (AST). + Parse(ast::Error), + /// An error that occurred while translating abstract syntax into a high + /// level intermediate representation (HIR). + Translate(hir::Error), +} + +impl From for Error { + fn from(err: ast::Error) -> Error { + Error::Parse(err) + } +} + +impl From for Error { + fn from(err: hir::Error) -> Error { + Error::Translate(err) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match *self { + Error::Parse(ref x) => x.fmt(f), + Error::Translate(ref x) => x.fmt(f), + } + } +} + +/// A helper type for formatting nice error messages. +/// +/// This type is responsible for reporting regex parse errors in a nice human +/// readable format. Most of its complexity is from interspersing notational +/// markers pointing out the position where an error occurred. +#[derive(Debug)] +pub struct Formatter<'e, E> { + /// The original regex pattern in which the error occurred. + pattern: &'e str, + /// The error kind. It must impl fmt::Display. + err: &'e E, + /// The primary span of the error. + span: &'e ast::Span, + /// An auxiliary and optional span, in case the error needs to point to + /// two locations (e.g., when reporting a duplicate capture group name). + aux_span: Option<&'e ast::Span>, +} + +impl<'e> From<&'e ast::Error> for Formatter<'e, ast::ErrorKind> { + fn from(err: &'e ast::Error) -> Self { + Formatter { + pattern: err.pattern(), + err: err.kind(), + span: err.span(), + aux_span: err.auxiliary_span(), + } + } +} + +impl<'e> From<&'e hir::Error> for Formatter<'e, hir::ErrorKind> { + fn from(err: &'e hir::Error) -> Self { + Formatter { + pattern: err.pattern(), + err: err.kind(), + span: err.span(), + aux_span: None, + } + } +} + +impl<'e, E: core::fmt::Display> core::fmt::Display for Formatter<'e, E> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let spans = Spans::from_formatter(self); + if self.pattern.contains('\n') { + let divider = repeat_char('~', 79); + + writeln!(f, "regex parse error:")?; + writeln!(f, "{divider}")?; + let notated = spans.notate(); + write!(f, "{notated}")?; + writeln!(f, "{divider}")?; + // If we have error spans that cover multiple lines, then we just + // note the line numbers. + if !spans.multi_line.is_empty() { + let mut notes = vec![]; + for span in &spans.multi_line { + notes.push(format!( + "on line {} (column {}) through line {} (column {})", + span.start.line, + span.start.column, + span.end.line, + span.end.column - 1 + )); + } + writeln!(f, "{}", notes.join("\n"))?; + } + write!(f, "error: {}", self.err)?; + } else { + writeln!(f, "regex parse error:")?; + let notated = Spans::from_formatter(self).notate(); + write!(f, "{notated}")?; + write!(f, "error: {}", self.err)?; + } + Ok(()) + } +} + +/// This type represents an arbitrary number of error spans in a way that makes +/// it convenient to notate the regex pattern. ("Notate" means "point out +/// exactly where the error occurred in the regex pattern.") +/// +/// Technically, we can only ever have two spans given our current error +/// structure. However, after toiling with a specific algorithm for handling +/// two spans, it became obvious that an algorithm to handle an arbitrary +/// number of spans was actually much simpler. +struct Spans<'p> { + /// The original regex pattern string. + pattern: &'p str, + /// The total width that should be used for line numbers. The width is + /// used for left padding the line numbers for alignment. + /// + /// A value of `0` means line numbers should not be displayed. That is, + /// the pattern is itself only one line. + line_number_width: usize, + /// All error spans that occur on a single line. This sequence always has + /// length equivalent to the number of lines in `pattern`, where the index + /// of the sequence represents a line number, starting at `0`. The spans + /// in each line are sorted in ascending order. + by_line: Vec>, + /// All error spans that occur over one or more lines. That is, the start + /// and end position of the span have different line numbers. The spans are + /// sorted in ascending order. + multi_line: Vec, +} + +impl<'p> Spans<'p> { + /// Build a sequence of spans from a formatter. + fn from_formatter<'e, E: core::fmt::Display>( + fmter: &'p Formatter<'e, E>, + ) -> Spans<'p> { + let mut line_count = fmter.pattern.lines().count(); + // If the pattern ends with a `\n` literal, then our line count is + // off by one, since a span can occur immediately after the last `\n`, + // which is consider to be an additional line. + if fmter.pattern.ends_with('\n') { + line_count += 1; + } + let line_number_width = + if line_count <= 1 { 0 } else { line_count.to_string().len() }; + let mut spans = Spans { + pattern: &fmter.pattern, + line_number_width, + by_line: vec![vec![]; line_count], + multi_line: vec![], + }; + spans.add(fmter.span.clone()); + if let Some(span) = fmter.aux_span { + spans.add(span.clone()); + } + spans + } + + /// Add the given span to this sequence, putting it in the right place. + fn add(&mut self, span: ast::Span) { + // This is grossly inefficient since we sort after each add, but right + // now, we only ever add two spans at most. + if span.is_one_line() { + let i = span.start.line - 1; // because lines are 1-indexed + self.by_line[i].push(span); + self.by_line[i].sort(); + } else { + self.multi_line.push(span); + self.multi_line.sort(); + } + } + + /// Notate the pattern string with carets (`^`) pointing at each span + /// location. This only applies to spans that occur within a single line. + fn notate(&self) -> String { + let mut notated = String::new(); + for (i, line) in self.pattern.lines().enumerate() { + if self.line_number_width > 0 { + notated.push_str(&self.left_pad_line_number(i + 1)); + notated.push_str(": "); + } else { + notated.push_str(" "); + } + notated.push_str(line); + notated.push('\n'); + if let Some(notes) = self.notate_line(i) { + notated.push_str(¬es); + notated.push('\n'); + } + } + notated + } + + /// Return notes for the line indexed at `i` (zero-based). If there are no + /// spans for the given line, then `None` is returned. Otherwise, an + /// appropriately space padded string with correctly positioned `^` is + /// returned, accounting for line numbers. + fn notate_line(&self, i: usize) -> Option { + let spans = &self.by_line[i]; + if spans.is_empty() { + return None; + } + let mut notes = String::new(); + for _ in 0..self.line_number_padding() { + notes.push(' '); + } + let mut pos = 0; + for span in spans { + for _ in pos..(span.start.column - 1) { + notes.push(' '); + pos += 1; + } + let note_len = span.end.column.saturating_sub(span.start.column); + for _ in 0..core::cmp::max(1, note_len) { + notes.push('^'); + pos += 1; + } + } + Some(notes) + } + + /// Left pad the given line number with spaces such that it is aligned with + /// other line numbers. + fn left_pad_line_number(&self, n: usize) -> String { + let n = n.to_string(); + let pad = self.line_number_width.checked_sub(n.len()).unwrap(); + let mut result = repeat_char(' ', pad); + result.push_str(&n); + result + } + + /// Return the line number padding beginning at the start of each line of + /// the pattern. + /// + /// If the pattern is only one line, then this returns a fixed padding + /// for visual indentation. + fn line_number_padding(&self) -> usize { + if self.line_number_width == 0 { + 4 + } else { + 2 + self.line_number_width + } + } +} + +fn repeat_char(c: char, count: usize) -> String { + core::iter::repeat(c).take(count).collect() +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + use crate::ast::parse::Parser; + + fn assert_panic_message(pattern: &str, expected_msg: &str) { + let result = Parser::new().parse(pattern); + match result { + Ok(_) => { + panic!("regex should not have parsed"); + } + Err(err) => { + assert_eq!(err.to_string(), expected_msg.trim()); + } + } + } + + // See: https://github.com/rust-lang/regex/issues/464 + #[test] + fn regression_464() { + let err = Parser::new().parse("a{\n").unwrap_err(); + // This test checks that the error formatter doesn't panic. + assert!(!err.to_string().is_empty()); + } + + // See: https://github.com/rust-lang/regex/issues/545 + #[test] + fn repetition_quantifier_expects_a_valid_decimal() { + assert_panic_message( + r"\\u{[^}]*}", + r#" +regex parse error: + \\u{[^}]*} + ^ +error: repetition quantifier expects a valid decimal +"#, + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/interval.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/interval.rs new file mode 100644 index 0000000000000000000000000000000000000000..d507ee724d3918ec0ff1eaa2c48e4946451605f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/interval.rs @@ -0,0 +1,564 @@ +use core::{char, cmp, fmt::Debug, slice}; + +use alloc::vec::Vec; + +use crate::unicode; + +// This module contains an *internal* implementation of interval sets. +// +// The primary invariant that interval sets guards is canonical ordering. That +// is, every interval set contains an ordered sequence of intervals where +// no two intervals are overlapping or adjacent. While this invariant is +// occasionally broken within the implementation, it should be impossible for +// callers to observe it. +// +// Since case folding (as implemented below) breaks that invariant, we roll +// that into this API even though it is a little out of place in an otherwise +// generic interval set. (Hence the reason why the `unicode` module is imported +// here.) +// +// Some of the implementation complexity here is a result of me wanting to +// preserve the sequential representation without using additional memory. +// In many cases, we do use linear extra memory, but it is at most 2x and it +// is amortized. If we relaxed the memory requirements, this implementation +// could become much simpler. The extra memory is honestly probably OK, but +// character classes (especially of the Unicode variety) can become quite +// large, and it would be nice to keep regex compilation snappy even in debug +// builds. (In the past, I have been careless with this area of code and it has +// caused slow regex compilations in debug mode, so this isn't entirely +// unwarranted.) +// +// Tests on this are relegated to the public API of HIR in src/hir.rs. + +#[derive(Clone, Debug)] +pub struct IntervalSet { + /// A sorted set of non-overlapping ranges. + ranges: Vec, + /// While not required at all for correctness, we keep track of whether an + /// interval set has been case folded or not. This helps us avoid doing + /// redundant work if, for example, a set has already been cased folded. + /// And note that whether a set is folded or not is preserved through + /// all of the pairwise set operations. That is, if both interval sets + /// have been case folded, then any of difference, union, intersection or + /// symmetric difference all produce a case folded set. + /// + /// Note that when this is true, it *must* be the case that the set is case + /// folded. But when it's false, the set *may* be case folded. In other + /// words, we only set this to true when we know it to be case, but we're + /// okay with it being false if it would otherwise be costly to determine + /// whether it should be true. This means code cannot assume that a false + /// value necessarily indicates that the set is not case folded. + /// + /// Bottom line: this is a performance optimization. + folded: bool, +} + +impl Eq for IntervalSet {} + +// We implement PartialEq manually so that we don't consider the set's internal +// 'folded' property to be part of its identity. The 'folded' property is +// strictly an optimization. +impl PartialEq for IntervalSet { + fn eq(&self, other: &IntervalSet) -> bool { + self.ranges.eq(&other.ranges) + } +} + +impl IntervalSet { + /// Create a new set from a sequence of intervals. Each interval is + /// specified as a pair of bounds, where both bounds are inclusive. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. + pub fn new>(intervals: T) -> IntervalSet { + let ranges: Vec = intervals.into_iter().collect(); + // An empty set is case folded. + let folded = ranges.is_empty(); + let mut set = IntervalSet { ranges, folded }; + set.canonicalize(); + set + } + + /// Add a new interval to this set. + pub fn push(&mut self, interval: I) { + // TODO: This could be faster. e.g., Push the interval such that + // it preserves canonicalization. + self.ranges.push(interval); + self.canonicalize(); + // We don't know whether the new interval added here is considered + // case folded, so we conservatively assume that the entire set is + // no longer case folded if it was previously. + self.folded = false; + } + + /// Return an iterator over all intervals in this set. + /// + /// The iterator yields intervals in ascending order. + pub fn iter(&self) -> IntervalSetIter<'_, I> { + IntervalSetIter(self.ranges.iter()) + } + + /// Return an immutable slice of intervals in this set. + /// + /// The sequence returned is in canonical ordering. + pub fn intervals(&self) -> &[I] { + &self.ranges + } + + /// Expand this interval set such that it contains all case folded + /// characters. For example, if this class consists of the range `a-z`, + /// then applying case folding will result in the class containing both the + /// ranges `a-z` and `A-Z`. + /// + /// This returns an error if the necessary case mapping data is not + /// available. + pub fn case_fold_simple(&mut self) -> Result<(), unicode::CaseFoldError> { + if self.folded { + return Ok(()); + } + let len = self.ranges.len(); + for i in 0..len { + let range = self.ranges[i]; + if let Err(err) = range.case_fold_simple(&mut self.ranges) { + self.canonicalize(); + return Err(err); + } + } + self.canonicalize(); + self.folded = true; + Ok(()) + } + + /// Union this set with the given set, in place. + pub fn union(&mut self, other: &IntervalSet) { + if other.ranges.is_empty() || self.ranges == other.ranges { + return; + } + // This could almost certainly be done more efficiently. + self.ranges.extend(&other.ranges); + self.canonicalize(); + self.folded = self.folded && other.folded; + } + + /// Intersect this set with the given set, in place. + pub fn intersect(&mut self, other: &IntervalSet) { + if self.ranges.is_empty() { + return; + } + if other.ranges.is_empty() { + self.ranges.clear(); + // An empty set is case folded. + self.folded = true; + return; + } + + // There should be a way to do this in-place with constant memory, + // but I couldn't figure out a simple way to do it. So just append + // the intersection to the end of this range, and then drain it before + // we're done. + let drain_end = self.ranges.len(); + + let mut ita = 0..drain_end; + let mut itb = 0..other.ranges.len(); + let mut a = ita.next().unwrap(); + let mut b = itb.next().unwrap(); + loop { + if let Some(ab) = self.ranges[a].intersect(&other.ranges[b]) { + self.ranges.push(ab); + } + let (it, aorb) = + if self.ranges[a].upper() < other.ranges[b].upper() { + (&mut ita, &mut a) + } else { + (&mut itb, &mut b) + }; + match it.next() { + Some(v) => *aorb = v, + None => break, + } + } + self.ranges.drain(..drain_end); + self.folded = self.folded && other.folded; + } + + /// Subtract the given set from this set, in place. + pub fn difference(&mut self, other: &IntervalSet) { + if self.ranges.is_empty() || other.ranges.is_empty() { + return; + } + + // This algorithm is (to me) surprisingly complex. A search of the + // interwebs indicate that this is a potentially interesting problem. + // Folks seem to suggest interval or segment trees, but I'd like to + // avoid the overhead (both runtime and conceptual) of that. + // + // The following is basically my Shitty First Draft. Therefore, in + // order to grok it, you probably need to read each line carefully. + // Simplifications are most welcome! + // + // Remember, we can assume the canonical format invariant here, which + // says that all ranges are sorted, not overlapping and not adjacent in + // each class. + let drain_end = self.ranges.len(); + let (mut a, mut b) = (0, 0); + 'LOOP: while a < drain_end && b < other.ranges.len() { + // Basically, the easy cases are when neither range overlaps with + // each other. If the `b` range is less than our current `a` + // range, then we can skip it and move on. + if other.ranges[b].upper() < self.ranges[a].lower() { + b += 1; + continue; + } + // ... similarly for the `a` range. If it's less than the smallest + // `b` range, then we can add it as-is. + if self.ranges[a].upper() < other.ranges[b].lower() { + let range = self.ranges[a]; + self.ranges.push(range); + a += 1; + continue; + } + // Otherwise, we have overlapping ranges. + assert!(!self.ranges[a].is_intersection_empty(&other.ranges[b])); + + // This part is tricky and was non-obvious to me without looking + // at explicit examples (see the tests). The trickiness stems from + // two things: 1) subtracting a range from another range could + // yield two ranges and 2) after subtracting a range, it's possible + // that future ranges can have an impact. The loop below advances + // the `b` ranges until they can't possible impact the current + // range. + // + // For example, if our `a` range is `a-t` and our next three `b` + // ranges are `a-c`, `g-i`, `r-t` and `x-z`, then we need to apply + // subtraction three times before moving on to the next `a` range. + let mut range = self.ranges[a]; + while b < other.ranges.len() + && !range.is_intersection_empty(&other.ranges[b]) + { + let old_range = range; + range = match range.difference(&other.ranges[b]) { + (None, None) => { + // We lost the entire range, so move on to the next + // without adding this one. + a += 1; + continue 'LOOP; + } + (Some(range1), None) | (None, Some(range1)) => range1, + (Some(range1), Some(range2)) => { + self.ranges.push(range1); + range2 + } + }; + // It's possible that the `b` range has more to contribute + // here. In particular, if it is greater than the original + // range, then it might impact the next `a` range *and* it + // has impacted the current `a` range as much as possible, + // so we can quit. We don't bump `b` so that the next `a` + // range can apply it. + if other.ranges[b].upper() > old_range.upper() { + break; + } + // Otherwise, the next `b` range might apply to the current + // `a` range. + b += 1; + } + self.ranges.push(range); + a += 1; + } + while a < drain_end { + let range = self.ranges[a]; + self.ranges.push(range); + a += 1; + } + self.ranges.drain(..drain_end); + self.folded = self.folded && other.folded; + } + + /// Compute the symmetric difference of the two sets, in place. + /// + /// This computes the symmetric difference of two interval sets. This + /// removes all elements in this set that are also in the given set, + /// but also adds all elements from the given set that aren't in this + /// set. That is, the set will contain all elements in either set, + /// but will not contain any elements that are in both sets. + pub fn symmetric_difference(&mut self, other: &IntervalSet) { + // TODO(burntsushi): Fix this so that it amortizes allocation. + let mut intersection = self.clone(); + intersection.intersect(other); + self.union(other); + self.difference(&intersection); + } + + /// Negate this interval set. + /// + /// For all `x` where `x` is any element, if `x` was in this set, then it + /// will not be in this set after negation. + pub fn negate(&mut self) { + if self.ranges.is_empty() { + let (min, max) = (I::Bound::min_value(), I::Bound::max_value()); + self.ranges.push(I::create(min, max)); + // The set containing everything must case folded. + self.folded = true; + return; + } + + // There should be a way to do this in-place with constant memory, + // but I couldn't figure out a simple way to do it. So just append + // the negation to the end of this range, and then drain it before + // we're done. + let drain_end = self.ranges.len(); + + // We do checked arithmetic below because of the canonical ordering + // invariant. + if self.ranges[0].lower() > I::Bound::min_value() { + let upper = self.ranges[0].lower().decrement(); + self.ranges.push(I::create(I::Bound::min_value(), upper)); + } + for i in 1..drain_end { + let lower = self.ranges[i - 1].upper().increment(); + let upper = self.ranges[i].lower().decrement(); + self.ranges.push(I::create(lower, upper)); + } + if self.ranges[drain_end - 1].upper() < I::Bound::max_value() { + let lower = self.ranges[drain_end - 1].upper().increment(); + self.ranges.push(I::create(lower, I::Bound::max_value())); + } + self.ranges.drain(..drain_end); + // We don't need to update whether this set is folded or not, because + // it is conservatively preserved through negation. Namely, if a set + // is not folded, then it is possible that its negation is folded, for + // example, [^☃]. But we're fine with assuming that the set is not + // folded in that case. (`folded` permits false negatives but not false + // positives.) + // + // But what about when a set is folded, is its negation also + // necessarily folded? Yes. Because if a set is folded, then for every + // character in the set, it necessarily included its equivalence class + // of case folded characters. Negating it in turn means that all + // equivalence classes in the set are negated, and any equivalence + // class that was previously not in the set is now entirely in the set. + } + + /// Converts this set into a canonical ordering. + fn canonicalize(&mut self) { + if self.is_canonical() { + return; + } + self.ranges.sort(); + assert!(!self.ranges.is_empty()); + + // Is there a way to do this in-place with constant memory? I couldn't + // figure out a way to do it. So just append the canonicalization to + // the end of this range, and then drain it before we're done. + let drain_end = self.ranges.len(); + for oldi in 0..drain_end { + // If we've added at least one new range, then check if we can + // merge this range in the previously added range. + if self.ranges.len() > drain_end { + let (last, rest) = self.ranges.split_last_mut().unwrap(); + if let Some(union) = last.union(&rest[oldi]) { + *last = union; + continue; + } + } + let range = self.ranges[oldi]; + self.ranges.push(range); + } + self.ranges.drain(..drain_end); + } + + /// Returns true if and only if this class is in a canonical ordering. + fn is_canonical(&self) -> bool { + for pair in self.ranges.windows(2) { + if pair[0] >= pair[1] { + return false; + } + if pair[0].is_contiguous(&pair[1]) { + return false; + } + } + true + } +} + +/// An iterator over intervals. +#[derive(Debug)] +pub struct IntervalSetIter<'a, I>(slice::Iter<'a, I>); + +impl<'a, I> Iterator for IntervalSetIter<'a, I> { + type Item = &'a I; + + fn next(&mut self) -> Option<&'a I> { + self.0.next() + } +} + +pub trait Interval: + Clone + Copy + Debug + Default + Eq + PartialEq + PartialOrd + Ord +{ + type Bound: Bound; + + fn lower(&self) -> Self::Bound; + fn upper(&self) -> Self::Bound; + fn set_lower(&mut self, bound: Self::Bound); + fn set_upper(&mut self, bound: Self::Bound); + fn case_fold_simple( + &self, + intervals: &mut Vec, + ) -> Result<(), unicode::CaseFoldError>; + + /// Create a new interval. + fn create(lower: Self::Bound, upper: Self::Bound) -> Self { + let mut int = Self::default(); + if lower <= upper { + int.set_lower(lower); + int.set_upper(upper); + } else { + int.set_lower(upper); + int.set_upper(lower); + } + int + } + + /// Union the given overlapping range into this range. + /// + /// If the two ranges aren't contiguous, then this returns `None`. + fn union(&self, other: &Self) -> Option { + if !self.is_contiguous(other) { + return None; + } + let lower = cmp::min(self.lower(), other.lower()); + let upper = cmp::max(self.upper(), other.upper()); + Some(Self::create(lower, upper)) + } + + /// Intersect this range with the given range and return the result. + /// + /// If the intersection is empty, then this returns `None`. + fn intersect(&self, other: &Self) -> Option { + let lower = cmp::max(self.lower(), other.lower()); + let upper = cmp::min(self.upper(), other.upper()); + if lower <= upper { + Some(Self::create(lower, upper)) + } else { + None + } + } + + /// Subtract the given range from this range and return the resulting + /// ranges. + /// + /// If subtraction would result in an empty range, then no ranges are + /// returned. + fn difference(&self, other: &Self) -> (Option, Option) { + if self.is_subset(other) { + return (None, None); + } + if self.is_intersection_empty(other) { + return (Some(self.clone()), None); + } + let add_lower = other.lower() > self.lower(); + let add_upper = other.upper() < self.upper(); + // We know this because !self.is_subset(other) and the ranges have + // a non-empty intersection. + assert!(add_lower || add_upper); + let mut ret = (None, None); + if add_lower { + let upper = other.lower().decrement(); + ret.0 = Some(Self::create(self.lower(), upper)); + } + if add_upper { + let lower = other.upper().increment(); + let range = Self::create(lower, self.upper()); + if ret.0.is_none() { + ret.0 = Some(range); + } else { + ret.1 = Some(range); + } + } + ret + } + + /// Returns true if and only if the two ranges are contiguous. Two ranges + /// are contiguous if and only if the ranges are either overlapping or + /// adjacent. + fn is_contiguous(&self, other: &Self) -> bool { + let lower1 = self.lower().as_u32(); + let upper1 = self.upper().as_u32(); + let lower2 = other.lower().as_u32(); + let upper2 = other.upper().as_u32(); + cmp::max(lower1, lower2) <= cmp::min(upper1, upper2).saturating_add(1) + } + + /// Returns true if and only if the intersection of this range and the + /// other range is empty. + fn is_intersection_empty(&self, other: &Self) -> bool { + let (lower1, upper1) = (self.lower(), self.upper()); + let (lower2, upper2) = (other.lower(), other.upper()); + cmp::max(lower1, lower2) > cmp::min(upper1, upper2) + } + + /// Returns true if and only if this range is a subset of the other range. + fn is_subset(&self, other: &Self) -> bool { + let (lower1, upper1) = (self.lower(), self.upper()); + let (lower2, upper2) = (other.lower(), other.upper()); + (lower2 <= lower1 && lower1 <= upper2) + && (lower2 <= upper1 && upper1 <= upper2) + } +} + +pub trait Bound: + Copy + Clone + Debug + Eq + PartialEq + PartialOrd + Ord +{ + fn min_value() -> Self; + fn max_value() -> Self; + fn as_u32(self) -> u32; + fn increment(self) -> Self; + fn decrement(self) -> Self; +} + +impl Bound for u8 { + fn min_value() -> Self { + u8::MIN + } + fn max_value() -> Self { + u8::MAX + } + fn as_u32(self) -> u32 { + u32::from(self) + } + fn increment(self) -> Self { + self.checked_add(1).unwrap() + } + fn decrement(self) -> Self { + self.checked_sub(1).unwrap() + } +} + +impl Bound for char { + fn min_value() -> Self { + '\x00' + } + fn max_value() -> Self { + '\u{10FFFF}' + } + fn as_u32(self) -> u32 { + u32::from(self) + } + + fn increment(self) -> Self { + match self { + '\u{D7FF}' => '\u{E000}', + c => char::from_u32(u32::from(c).checked_add(1).unwrap()).unwrap(), + } + } + + fn decrement(self) -> Self { + match self { + '\u{E000}' => '\u{D7FF}', + c => char::from_u32(u32::from(c).checked_sub(1).unwrap()).unwrap(), + } + } +} + +// Tests for interval sets are written in src/hir.rs against the public API. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/literal.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/literal.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a6350e64663ce978d8d0aa2210e3e2810d02b58 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/literal.rs @@ -0,0 +1,3214 @@ +/*! +Provides literal extraction from `Hir` expressions. + +An [`Extractor`] pulls literals out of [`Hir`] expressions and returns a +[`Seq`] of [`Literal`]s. + +The purpose of literal extraction is generally to provide avenues for +optimizing regex searches. The main idea is that substring searches can be an +order of magnitude faster than a regex search. Therefore, if one can execute +a substring search to find candidate match locations and only run the regex +search at those locations, then it is possible for huge improvements in +performance to be realized. + +With that said, literal optimizations are generally a black art because even +though substring search is generally faster, if the number of candidates +produced is high, then it can create a lot of overhead by ping-ponging between +the substring search and the regex search. + +Here are some heuristics that might be used to help increase the chances of +effective literal optimizations: + +* Stick to small [`Seq`]s. If you search for too many literals, it's likely +to lead to substring search that is only a little faster than a regex search, +and thus the overhead of using literal optimizations in the first place might +make things slower overall. +* The literals in your [`Seq`] shouldn't be too short. In general, longer is +better. A sequence corresponding to single bytes that occur frequently in the +haystack, for example, is probably a bad literal optimization because it's +likely to produce many false positive candidates. Longer literals are less +likely to match, and thus probably produce fewer false positives. +* If it's possible to estimate the approximate frequency of each byte according +to some pre-computed background distribution, it is possible to compute a score +of how "good" a `Seq` is. If a `Seq` isn't good enough, you might consider +skipping the literal optimization and just use the regex engine. + +(It should be noted that there are always pathological cases that can make +any kind of literal optimization be a net slower result. This is why it +might be a good idea to be conservative, or to even provide a means for +literal optimizations to be dynamically disabled if they are determined to be +ineffective according to some measure.) + +You're encouraged to explore the methods on [`Seq`], which permit shrinking +the size of sequences in a preference-order preserving fashion. + +Finally, note that it isn't strictly necessary to use an [`Extractor`]. Namely, +an `Extractor` only uses public APIs of the [`Seq`] and [`Literal`] types, +so it is possible to implement your own extractor. For example, for n-grams +or "inner" literals (i.e., not prefix or suffix literals). The `Extractor` +is mostly responsible for the case analysis over `Hir` expressions. Much of +the "trickier" parts are how to combine literal sequences, and that is all +implemented on [`Seq`]. +*/ + +use core::{cmp, mem, num::NonZeroUsize}; + +use alloc::{vec, vec::Vec}; + +use crate::hir::{self, Hir}; + +/// Extracts prefix or suffix literal sequences from [`Hir`] expressions. +/// +/// Literal extraction is based on the following observations: +/// +/// * Many regexes start with one or a small number of literals. +/// * Substring search for literals is often much faster (sometimes by an order +/// of magnitude) than a regex search. +/// +/// Thus, in many cases, one can search for literals to find candidate starting +/// locations of a match, and then only run the full regex engine at each such +/// location instead of over the full haystack. +/// +/// The main downside of literal extraction is that it can wind up causing a +/// search to be slower overall. For example, if there are many matches or if +/// there are many candidates that don't ultimately lead to a match, then a +/// lot of overhead will be spent in shuffling back-and-forth between substring +/// search and the regex engine. This is the fundamental reason why literal +/// optimizations for regex patterns is sometimes considered a "black art." +/// +/// # Look-around assertions +/// +/// Literal extraction treats all look-around assertions as-if they match every +/// empty string. So for example, the regex `\bquux\b` will yield a sequence +/// containing a single exact literal `quux`. However, not all occurrences +/// of `quux` correspond to a match a of the regex. For example, `\bquux\b` +/// does not match `ZquuxZ` anywhere because `quux` does not fall on a word +/// boundary. +/// +/// In effect, if your regex contains look-around assertions, then a match of +/// an exact literal does not necessarily mean the regex overall matches. So +/// you may still need to run the regex engine in such cases to confirm the +/// match. +/// +/// The precise guarantee you get from a literal sequence is: if every literal +/// in the sequence is exact and the original regex contains zero look-around +/// assertions, then a preference-order multi-substring search of those +/// literals will precisely match a preference-order search of the original +/// regex. +/// +/// # Example +/// +/// This shows how to extract prefixes: +/// +/// ``` +/// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; +/// +/// let hir = parse(r"(a|b|c)(x|y|z)[A-Z]+foo")?; +/// let got = Extractor::new().extract(&hir); +/// // All literals returned are "inexact" because none of them reach the +/// // match state. +/// let expected = Seq::from_iter([ +/// Literal::inexact("ax"), +/// Literal::inexact("ay"), +/// Literal::inexact("az"), +/// Literal::inexact("bx"), +/// Literal::inexact("by"), +/// Literal::inexact("bz"), +/// Literal::inexact("cx"), +/// Literal::inexact("cy"), +/// Literal::inexact("cz"), +/// ]); +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +/// +/// This shows how to extract suffixes: +/// +/// ``` +/// use regex_syntax::{ +/// hir::literal::{Extractor, ExtractKind, Literal, Seq}, +/// parse, +/// }; +/// +/// let hir = parse(r"foo|[A-Z]+bar")?; +/// let got = Extractor::new().kind(ExtractKind::Suffix).extract(&hir); +/// // Since 'foo' gets to a match state, it is considered exact. But 'bar' +/// // does not because of the '[A-Z]+', and thus is marked inexact. +/// let expected = Seq::from_iter([ +/// Literal::exact("foo"), +/// Literal::inexact("bar"), +/// ]); +/// assert_eq!(expected, got); +/// +/// # Ok::<(), Box>(()) +/// ``` +#[derive(Clone, Debug)] +pub struct Extractor { + kind: ExtractKind, + limit_class: usize, + limit_repeat: usize, + limit_literal_len: usize, + limit_total: usize, +} + +impl Extractor { + /// Create a new extractor with a default configuration. + /// + /// The extractor can be optionally configured before calling + /// [`Extractor::extract`] to get a literal sequence. + pub fn new() -> Extractor { + Extractor { + kind: ExtractKind::Prefix, + limit_class: 10, + limit_repeat: 10, + limit_literal_len: 100, + limit_total: 250, + } + } + + /// Execute the extractor and return a sequence of literals. + pub fn extract(&self, hir: &Hir) -> Seq { + use crate::hir::HirKind::*; + + match *hir.kind() { + Empty | Look(_) => Seq::singleton(self::Literal::exact(vec![])), + Literal(hir::Literal(ref bytes)) => { + let mut seq = + Seq::singleton(self::Literal::exact(bytes.to_vec())); + self.enforce_literal_len(&mut seq); + seq + } + Class(hir::Class::Unicode(ref cls)) => { + self.extract_class_unicode(cls) + } + Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls), + Repetition(ref rep) => self.extract_repetition(rep), + Capture(hir::Capture { ref sub, .. }) => self.extract(sub), + Concat(ref hirs) => match self.kind { + ExtractKind::Prefix => self.extract_concat(hirs.iter()), + ExtractKind::Suffix => self.extract_concat(hirs.iter().rev()), + }, + Alternation(ref hirs) => { + // Unlike concat, we always union starting from the beginning, + // since the beginning corresponds to the highest preference, + // which doesn't change based on forwards vs reverse. + self.extract_alternation(hirs.iter()) + } + } + } + + /// Set the kind of literal sequence to extract from an [`Hir`] expression. + /// + /// The default is to extract prefixes, but suffixes can be selected + /// instead. The contract for prefixes is that every match of the + /// corresponding `Hir` must start with one of the literals in the sequence + /// returned. Moreover, the _order_ of the sequence returned corresponds to + /// the preference order. + /// + /// Suffixes satisfy a similar contract in that every match of the + /// corresponding `Hir` must end with one of the literals in the sequence + /// returned. However, there is no guarantee that the literals are in + /// preference order. + /// + /// Remember that a sequence can be infinite. For example, unless the + /// limits are configured to be impractically large, attempting to extract + /// prefixes (or suffixes) for the pattern `[A-Z]` will return an infinite + /// sequence. Generally speaking, if the sequence returned is infinite, + /// then it is presumed to be unwise to do prefix (or suffix) optimizations + /// for the pattern. + pub fn kind(&mut self, kind: ExtractKind) -> &mut Extractor { + self.kind = kind; + self + } + + /// Configure a limit on the length of the sequence that is permitted for + /// a character class. If a character class exceeds this limit, then the + /// sequence returned for it is infinite. + /// + /// This prevents classes like `[A-Z]` or `\pL` from getting turned into + /// huge and likely unproductive sequences of literals. + /// + /// # Example + /// + /// This example shows how this limit can be lowered to decrease the tolerance + /// for character classes being turned into literal sequences. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Seq}, parse}; + /// + /// let hir = parse(r"[0-9]")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new([ + /// "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", + /// ]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_class(4).extract(&hir); + /// let expected = Seq::infinite(); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_class(&mut self, limit: usize) -> &mut Extractor { + self.limit_class = limit; + self + } + + /// Configure a limit on the total number of repetitions that is permitted + /// before literal extraction is stopped. + /// + /// This is useful for limiting things like `(abcde){50}`, or more + /// insidiously, `(?:){1000000000}`. This limit prevents any one single + /// repetition from adding too much to a literal sequence. + /// + /// With this limit set, repetitions that exceed it will be stopped and any + /// literals extracted up to that point will be made inexact. + /// + /// # Example + /// + /// This shows how to decrease the limit and compares it with the default. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"(abc){8}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_repeat(4).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("abcabcabcabc"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_repeat(&mut self, limit: usize) -> &mut Extractor { + self.limit_repeat = limit; + self + } + + /// Configure a limit on the maximum length of any literal in a sequence. + /// + /// This is useful for limiting things like `(abcde){5}{5}{5}{5}`. While + /// each repetition or literal in that regex is small, when all the + /// repetitions are applied, one ends up with a literal of length `5^4 = + /// 625`. + /// + /// With this limit set, literals that exceed it will be made inexact and + /// thus prevented from growing. + /// + /// # Example + /// + /// This shows how to decrease the limit and compares it with the default. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"(abc){2}{2}{2}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new(["abcabcabcabcabcabcabcabc"]); + /// assert_eq!(expected, got); + /// + /// // Now let's shrink the limit and see how that changes things. + /// let got = Extractor::new().limit_literal_len(14).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("abcabcabcabcab"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_literal_len(&mut self, limit: usize) -> &mut Extractor { + self.limit_literal_len = limit; + self + } + + /// Configure a limit on the total number of literals that will be + /// returned. + /// + /// This is useful as a practical measure for avoiding the creation of + /// large sequences of literals. While the extractor will automatically + /// handle local creations of large sequences (for example, `[A-Z]` yields + /// an infinite sequence by default), large sequences can be created + /// through non-local means as well. + /// + /// For example, `[ab]{3}{3}` would yield a sequence of length `512 = 2^9` + /// despite each of the repetitions being small on their own. This limit + /// thus represents a "catch all" for avoiding locally small sequences from + /// combining into large sequences. + /// + /// # Example + /// + /// This example shows how reducing the limit will change the literal + /// sequence returned. + /// + /// ``` + /// use regex_syntax::{hir::literal::{Extractor, Literal, Seq}, parse}; + /// + /// let hir = parse(r"[ab]{2}{2}")?; + /// + /// let got = Extractor::new().extract(&hir); + /// let expected = Seq::new([ + /// "aaaa", "aaab", "aaba", "aabb", + /// "abaa", "abab", "abba", "abbb", + /// "baaa", "baab", "baba", "babb", + /// "bbaa", "bbab", "bbba", "bbbb", + /// ]); + /// assert_eq!(expected, got); + /// + /// // The default limit is not too big, but big enough to extract all + /// // literals from '[ab]{2}{2}'. If we shrink the limit to less than 16, + /// // then we'll get a truncated set. Notice that it returns a sequence of + /// // length 4 even though our limit was 10. This is because the sequence + /// // is difficult to increase without blowing the limit. Notice also + /// // that every literal in the sequence is now inexact because they were + /// // stripped of some suffix. + /// let got = Extractor::new().limit_total(10).extract(&hir); + /// let expected = Seq::from_iter([ + /// Literal::inexact("aa"), + /// Literal::inexact("ab"), + /// Literal::inexact("ba"), + /// Literal::inexact("bb"), + /// ]); + /// assert_eq!(expected, got); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn limit_total(&mut self, limit: usize) -> &mut Extractor { + self.limit_total = limit; + self + } + + /// Extract a sequence from the given concatenation. Sequences from each of + /// the child HIR expressions are combined via cross product. + /// + /// This short circuits once the cross product turns into a sequence + /// containing only inexact literals. + fn extract_concat<'a, I: Iterator>(&self, it: I) -> Seq { + let mut seq = Seq::singleton(self::Literal::exact(vec![])); + for hir in it { + // If every element in the sequence is inexact, then a cross + // product will always be a no-op. Thus, there is nothing else we + // can add to it and can quit early. Note that this also includes + // infinite sequences. + if seq.is_inexact() { + break; + } + // Note that 'cross' also dispatches based on whether we're + // extracting prefixes or suffixes. + seq = self.cross(seq, &mut self.extract(hir)); + } + seq + } + + /// Extract a sequence from the given alternation. + /// + /// This short circuits once the union turns into an infinite sequence. + fn extract_alternation<'a, I: Iterator>( + &self, + it: I, + ) -> Seq { + let mut seq = Seq::empty(); + for hir in it { + // Once our 'seq' is infinite, every subsequent union + // operation on it will itself always result in an + // infinite sequence. Thus, it can never change and we can + // short-circuit. + if !seq.is_finite() { + break; + } + seq = self.union(seq, &mut self.extract(hir)); + } + seq + } + + /// Extract a sequence of literals from the given repetition. We do our + /// best, Some examples: + /// + /// 'a*' => [inexact(a), exact("")] + /// 'a*?' => [exact(""), inexact(a)] + /// 'a+' => [inexact(a)] + /// 'a{3}' => [exact(aaa)] + /// 'a{3,5} => [inexact(aaa)] + /// + /// The key here really is making sure we get the 'inexact' vs 'exact' + /// attributes correct on each of the literals we add. For example, the + /// fact that 'a*' gives us an inexact 'a' and an exact empty string means + /// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)] + /// literals being extracted, which might actually be a better prefilter + /// than just 'a'. + fn extract_repetition(&self, rep: &hir::Repetition) -> Seq { + let mut subseq = self.extract(&rep.sub); + match *rep { + hir::Repetition { min: 0, max, greedy, .. } => { + // When 'max=1', we can retain exactness, since 'a?' is + // equivalent to 'a|'. Similarly below, 'a??' is equivalent to + // '|a'. + if max != Some(1) { + subseq.make_inexact(); + } + let mut empty = Seq::singleton(Literal::exact(vec![])); + if !greedy { + mem::swap(&mut subseq, &mut empty); + } + self.union(subseq, &mut empty) + } + hir::Repetition { min, max: Some(max), .. } if min == max => { + assert!(min > 0); // handled above + let limit = + u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); + let mut seq = Seq::singleton(Literal::exact(vec![])); + for _ in 0..cmp::min(min, limit) { + if seq.is_inexact() { + break; + } + seq = self.cross(seq, &mut subseq.clone()); + } + if usize::try_from(min).is_err() || min > limit { + seq.make_inexact(); + } + seq + } + hir::Repetition { min, .. } => { + assert!(min > 0); // handled above + let limit = + u32::try_from(self.limit_repeat).unwrap_or(u32::MAX); + let mut seq = Seq::singleton(Literal::exact(vec![])); + for _ in 0..cmp::min(min, limit) { + if seq.is_inexact() { + break; + } + seq = self.cross(seq, &mut subseq.clone()); + } + seq.make_inexact(); + seq + } + } + } + + /// Convert the given Unicode class into a sequence of literals if the + /// class is small enough. If the class is too big, return an infinite + /// sequence. + fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> Seq { + if self.class_over_limit_unicode(cls) { + return Seq::infinite(); + } + let mut seq = Seq::empty(); + for r in cls.iter() { + for ch in r.start()..=r.end() { + seq.push(Literal::from(ch)); + } + } + self.enforce_literal_len(&mut seq); + seq + } + + /// Convert the given byte class into a sequence of literals if the class + /// is small enough. If the class is too big, return an infinite sequence. + fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> Seq { + if self.class_over_limit_bytes(cls) { + return Seq::infinite(); + } + let mut seq = Seq::empty(); + for r in cls.iter() { + for b in r.start()..=r.end() { + seq.push(Literal::from(b)); + } + } + self.enforce_literal_len(&mut seq); + seq + } + + /// Returns true if the given Unicode class exceeds the configured limits + /// on this extractor. + fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool { + let mut count = 0; + for r in cls.iter() { + if count > self.limit_class { + return true; + } + count += r.len(); + } + count > self.limit_class + } + + /// Returns true if the given byte class exceeds the configured limits on + /// this extractor. + fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool { + let mut count = 0; + for r in cls.iter() { + if count > self.limit_class { + return true; + } + count += r.len(); + } + count > self.limit_class + } + + /// Compute the cross product of the two sequences if the result would be + /// within configured limits. Otherwise, make `seq2` infinite and cross the + /// infinite sequence with `seq1`. + fn cross(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { + if seq1.max_cross_len(seq2).map_or(false, |len| len > self.limit_total) + { + seq2.make_infinite(); + } + if let ExtractKind::Suffix = self.kind { + seq1.cross_reverse(seq2); + } else { + seq1.cross_forward(seq2); + } + assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); + self.enforce_literal_len(&mut seq1); + seq1 + } + + /// Union the two sequences if the result would be within configured + /// limits. Otherwise, make `seq2` infinite and union the infinite sequence + /// with `seq1`. + fn union(&self, mut seq1: Seq, seq2: &mut Seq) -> Seq { + if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total) + { + // We try to trim our literal sequences to see if we can make + // room for more literals. The idea is that we'd rather trim down + // literals already in our sequence if it means we can add a few + // more and retain a finite sequence. Otherwise, we'll union with + // an infinite sequence and that infects everything and effectively + // stops literal extraction in its tracks. + // + // We do we keep 4 bytes here? Well, it's a bit of an abstraction + // leakage. Downstream, the literals may wind up getting fed to + // the Teddy algorithm, which supports searching literals up to + // length 4. So that's why we pick that number here. Arguably this + // should be a tunable parameter, but it seems a little tricky to + // describe. And I'm still unsure if this is the right way to go + // about culling literal sequences. + match self.kind { + ExtractKind::Prefix => { + seq1.keep_first_bytes(4); + seq2.keep_first_bytes(4); + } + ExtractKind::Suffix => { + seq1.keep_last_bytes(4); + seq2.keep_last_bytes(4); + } + } + seq1.dedup(); + seq2.dedup(); + if seq1 + .max_union_len(seq2) + .map_or(false, |len| len > self.limit_total) + { + seq2.make_infinite(); + } + } + seq1.union(seq2); + assert!(seq1.len().map_or(true, |x| x <= self.limit_total)); + seq1 + } + + /// Applies the literal length limit to the given sequence. If none of the + /// literals in the sequence exceed the limit, then this is a no-op. + fn enforce_literal_len(&self, seq: &mut Seq) { + let len = self.limit_literal_len; + match self.kind { + ExtractKind::Prefix => seq.keep_first_bytes(len), + ExtractKind::Suffix => seq.keep_last_bytes(len), + } + } +} + +impl Default for Extractor { + fn default() -> Extractor { + Extractor::new() + } +} + +/// The kind of literals to extract from an [`Hir`] expression. +/// +/// The default extraction kind is `Prefix`. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub enum ExtractKind { + /// Extracts only prefix literals from a regex. + Prefix, + /// Extracts only suffix literals from a regex. + /// + /// Note that the sequence returned by suffix literals currently may + /// not correctly represent leftmost-first or "preference" order match + /// semantics. + Suffix, +} + +impl ExtractKind { + /// Returns true if this kind is the `Prefix` variant. + pub fn is_prefix(&self) -> bool { + matches!(*self, ExtractKind::Prefix) + } + + /// Returns true if this kind is the `Suffix` variant. + pub fn is_suffix(&self) -> bool { + matches!(*self, ExtractKind::Suffix) + } +} + +impl Default for ExtractKind { + fn default() -> ExtractKind { + ExtractKind::Prefix + } +} + +/// A sequence of literals. +/// +/// A `Seq` is very much like a set in that it represents a union of its +/// members. That is, it corresponds to a set of literals where at least one +/// must match in order for a particular [`Hir`] expression to match. (Whether +/// this corresponds to the entire `Hir` expression, a prefix of it or a suffix +/// of it depends on how the `Seq` was extracted from the `Hir`.) +/// +/// It is also unlike a set in that multiple identical literals may appear, +/// and that the order of the literals in the `Seq` matters. For example, if +/// the sequence is `[sam, samwise]` and leftmost-first matching is used, then +/// `samwise` can never match and the sequence is equivalent to `[sam]`. +/// +/// # States of a sequence +/// +/// A `Seq` has a few different logical states to consider: +/// +/// * The sequence can represent "any" literal. When this happens, the set does +/// not have a finite size. The purpose of this state is to inhibit callers +/// from making assumptions about what literals are required in order to match +/// a particular [`Hir`] expression. Generally speaking, when a set is in this +/// state, literal optimizations are inhibited. A good example of a regex that +/// will cause this sort of set to appear is `[A-Za-z]`. The character class +/// is just too big (and also too narrow) to be usefully expanded into 52 +/// different literals. (Note that the decision for when a seq should become +/// infinite is determined by the caller. A seq itself has no hard-coded +/// limits.) +/// * The sequence can be empty, in which case, it is an affirmative statement +/// that there are no literals that can match the corresponding `Hir`. +/// Consequently, the `Hir` never matches any input. For example, `[a&&b]`. +/// * The sequence can be non-empty, in which case, at least one of the +/// literals must match in order for the corresponding `Hir` to match. +/// +/// # Example +/// +/// This example shows how literal sequences can be simplified by stripping +/// suffixes and minimizing while maintaining preference order. +/// +/// ``` +/// use regex_syntax::hir::literal::{Literal, Seq}; +/// +/// let mut seq = Seq::new(&[ +/// "farm", +/// "appliance", +/// "faraway", +/// "apple", +/// "fare", +/// "gap", +/// "applicant", +/// "applaud", +/// ]); +/// seq.keep_first_bytes(3); +/// seq.minimize_by_preference(); +/// // Notice that 'far' comes before 'app', which matches the order in the +/// // original sequence. This guarantees that leftmost-first semantics are +/// // not altered by simplifying the set. +/// let expected = Seq::from_iter([ +/// Literal::inexact("far"), +/// Literal::inexact("app"), +/// Literal::exact("gap"), +/// ]); +/// assert_eq!(expected, seq); +/// ``` +#[derive(Clone, Eq, PartialEq)] +pub struct Seq { + /// The members of this seq. + /// + /// When `None`, the seq represents all possible literals. That is, it + /// prevents one from making assumptions about specific literals in the + /// seq, and forces one to treat it as if any literal might be in the seq. + /// + /// Note that `Some(vec![])` is valid and corresponds to the empty seq of + /// literals, i.e., a regex that can never match. For example, `[a&&b]`. + /// It is distinct from `Some(vec![""])`, which corresponds to the seq + /// containing an empty string, which matches at every position. + literals: Option>, +} + +impl Seq { + /// Returns an empty sequence. + /// + /// An empty sequence matches zero literals, and thus corresponds to a + /// regex that itself can never match. + #[inline] + pub fn empty() -> Seq { + Seq { literals: Some(vec![]) } + } + + /// Returns a sequence of literals without a finite size and may contain + /// any literal. + /// + /// A sequence without finite size does not reveal anything about the + /// characteristics of the literals in its set. There are no fixed prefixes + /// or suffixes, nor are lower or upper bounds on the length of the literals + /// in the set known. + /// + /// This is useful to represent constructs in a regex that are "too big" + /// to useful represent as a sequence of literals. For example, `[A-Za-z]`. + /// When sequences get too big, they lose their discriminating nature and + /// are more likely to produce false positives, which in turn makes them + /// less likely to speed up searches. + /// + /// More pragmatically, for many regexes, enumerating all possible literals + /// is itself not possible or might otherwise use too many resources. So + /// constraining the size of sets during extraction is a practical trade + /// off to make. + #[inline] + pub fn infinite() -> Seq { + Seq { literals: None } + } + + /// Returns a sequence containing a single literal. + #[inline] + pub fn singleton(lit: Literal) -> Seq { + Seq { literals: Some(vec![lit]) } + } + + /// Returns a sequence of exact literals from the given byte strings. + #[inline] + pub fn new(it: I) -> Seq + where + I: IntoIterator, + B: AsRef<[u8]>, + { + it.into_iter().map(|b| Literal::exact(b.as_ref())).collect() + } + + /// If this is a finite sequence, return its members as a slice of + /// literals. + /// + /// The slice returned may be empty, in which case, there are no literals + /// that can match this sequence. + #[inline] + pub fn literals(&self) -> Option<&[Literal]> { + self.literals.as_deref() + } + + /// Push a literal to the end of this sequence. + /// + /// If this sequence is not finite, then this is a no-op. + /// + /// Similarly, if the most recently added item of this sequence is + /// equivalent to the literal given, then it is not added. This reflects + /// a `Seq`'s "set like" behavior, and represents a practical trade off. + /// Namely, there is never any need to have two adjacent and equivalent + /// literals in the same sequence, _and_ it is easy to detect in some + /// cases. + #[inline] + pub fn push(&mut self, lit: Literal) { + let lits = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + if lits.last().map_or(false, |m| m == &lit) { + return; + } + lits.push(lit); + } + + /// Make all of the literals in this sequence inexact. + /// + /// This is a no-op if this sequence is not finite. + #[inline] + pub fn make_inexact(&mut self) { + let lits = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + for lit in lits.iter_mut() { + lit.make_inexact(); + } + } + + /// Converts this sequence to an infinite sequence. + /// + /// This is a no-op if the sequence is already infinite. + #[inline] + pub fn make_infinite(&mut self) { + self.literals = None; + } + + /// Modify this sequence to contain the cross product between it and the + /// sequence given. + /// + /// The cross product only considers literals in this sequence that are + /// exact. That is, inexact literals are not extended. + /// + /// The literals are always drained from `other`, even if none are used. + /// This permits callers to reuse the sequence allocation elsewhere. + /// + /// If this sequence is infinite, then this is a no-op, regardless of what + /// `other` contains (and in this case, the literals are still drained from + /// `other`). If `other` is infinite and this sequence is finite, then this + /// is a no-op, unless this sequence contains a zero-length literal. In + /// which case, the infiniteness of `other` infects this sequence, and this + /// sequence is itself made infinite. + /// + /// Like [`Seq::union`], this may attempt to deduplicate literals. See + /// [`Seq::dedup`] for how deduplication deals with exact and inexact + /// literals. + /// + /// # Example + /// + /// This example shows basic usage and how exact and inexact literals + /// interact. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::from_iter([ + /// Literal::inexact("quux"), + /// Literal::exact("baz"), + /// ]); + /// seq1.cross_forward(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// let expected = Seq::from_iter([ + /// Literal::inexact("fooquux"), + /// Literal::exact("foobaz"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example shows the behavior of when `other` is an infinite + /// sequence. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_forward(&mut seq2); + /// + /// // When seq2 is infinite, cross product doesn't add anything, but + /// // ensures all members of seq1 are inexact. + /// let expected = Seq::from_iter([ + /// Literal::inexact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example is like the one above, but shows what happens when this + /// sequence contains an empty string. In this case, an infinite `other` + /// sequence infects this sequence (because the empty string means that + /// there are no finite prefixes): + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact(""), // inexact provokes same behavior + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_forward(&mut seq2); + /// + /// // seq1 is now infinite! + /// assert!(!seq1.is_finite()); + /// ``` + /// + /// This example shows the behavior of this sequence is infinite. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::infinite(); + /// let mut seq2 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// seq1.cross_forward(&mut seq2); + /// + /// // seq1 remains unchanged. + /// assert!(!seq1.is_finite()); + /// // Even though the literals in seq2 weren't used, it was still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn cross_forward(&mut self, other: &mut Seq) { + let (lits1, lits2) = match self.cross_preamble(other) { + None => return, + Some((lits1, lits2)) => (lits1, lits2), + }; + let newcap = lits1.len().saturating_mul(lits2.len()); + for selflit in mem::replace(lits1, Vec::with_capacity(newcap)) { + if !selflit.is_exact() { + lits1.push(selflit); + continue; + } + for otherlit in lits2.iter() { + let mut newlit = Literal::exact(Vec::with_capacity( + selflit.len() + otherlit.len(), + )); + newlit.extend(&selflit); + newlit.extend(&otherlit); + if !otherlit.is_exact() { + newlit.make_inexact(); + } + lits1.push(newlit); + } + } + lits2.drain(..); + self.dedup(); + } + + /// Modify this sequence to contain the cross product between it and + /// the sequence given, where the sequences are treated as suffixes + /// instead of prefixes. Namely, the sequence `other` is *prepended* + /// to `self` (as opposed to `other` being *appended* to `self` in + /// [`Seq::cross_forward`]). + /// + /// The cross product only considers literals in this sequence that are + /// exact. That is, inexact literals are not extended. + /// + /// The literals are always drained from `other`, even if none are used. + /// This permits callers to reuse the sequence allocation elsewhere. + /// + /// If this sequence is infinite, then this is a no-op, regardless of what + /// `other` contains (and in this case, the literals are still drained from + /// `other`). If `other` is infinite and this sequence is finite, then this + /// is a no-op, unless this sequence contains a zero-length literal. In + /// which case, the infiniteness of `other` infects this sequence, and this + /// sequence is itself made infinite. + /// + /// Like [`Seq::union`], this may attempt to deduplicate literals. See + /// [`Seq::dedup`] for how deduplication deals with exact and inexact + /// literals. + /// + /// # Example + /// + /// This example shows basic usage and how exact and inexact literals + /// interact. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::from_iter([ + /// Literal::inexact("quux"), + /// Literal::exact("baz"), + /// ]); + /// seq1.cross_reverse(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// let expected = Seq::from_iter([ + /// Literal::inexact("quuxfoo"), + /// Literal::inexact("bar"), + /// Literal::exact("bazfoo"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example shows the behavior of when `other` is an infinite + /// sequence. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_reverse(&mut seq2); + /// + /// // When seq2 is infinite, cross product doesn't add anything, but + /// // ensures all members of seq1 are inexact. + /// let expected = Seq::from_iter([ + /// Literal::inexact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// assert_eq!(expected, seq1); + /// ``` + /// + /// This example is like the one above, but shows what happens when this + /// sequence contains an empty string. In this case, an infinite `other` + /// sequence infects this sequence (because the empty string means that + /// there are no finite suffixes): + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact(""), // inexact provokes same behavior + /// Literal::inexact("bar"), + /// ]); + /// let mut seq2 = Seq::infinite(); + /// seq1.cross_reverse(&mut seq2); + /// + /// // seq1 is now infinite! + /// assert!(!seq1.is_finite()); + /// ``` + /// + /// This example shows the behavior when this sequence is infinite. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq1 = Seq::infinite(); + /// let mut seq2 = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("bar"), + /// ]); + /// seq1.cross_reverse(&mut seq2); + /// + /// // seq1 remains unchanged. + /// assert!(!seq1.is_finite()); + /// // Even though the literals in seq2 weren't used, it was still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn cross_reverse(&mut self, other: &mut Seq) { + let (lits1, lits2) = match self.cross_preamble(other) { + None => return, + Some((lits1, lits2)) => (lits1, lits2), + }; + // We basically proceed as we do in 'cross_forward' at this point, + // except that the outer loop is now 'other' and the inner loop is now + // 'self'. That's because 'self' corresponds to suffixes and 'other' + // corresponds to the sequence we want to *prepend* to the suffixes. + let newcap = lits1.len().saturating_mul(lits2.len()); + let selflits = mem::replace(lits1, Vec::with_capacity(newcap)); + for (i, otherlit) in lits2.drain(..).enumerate() { + for selflit in selflits.iter() { + if !selflit.is_exact() { + // If the suffix isn't exact, then we can't prepend + // anything to it. However, we still want to keep it. But + // we only want to keep one of them, to avoid duplication. + // (The duplication is okay from a correctness perspective, + // but wasteful.) + if i == 0 { + lits1.push(selflit.clone()); + } + continue; + } + let mut newlit = Literal::exact(Vec::with_capacity( + otherlit.len() + selflit.len(), + )); + newlit.extend(&otherlit); + newlit.extend(&selflit); + if !otherlit.is_exact() { + newlit.make_inexact(); + } + lits1.push(newlit); + } + } + self.dedup(); + } + + /// A helper function the corresponds to the subtle preamble for both + /// `cross_forward` and `cross_reverse`. In effect, it handles the cases + /// of infinite sequences for both `self` and `other`, as well as ensuring + /// that literals from `other` are drained even if they aren't used. + fn cross_preamble<'a>( + &'a mut self, + other: &'a mut Seq, + ) -> Option<(&'a mut Vec, &'a mut Vec)> { + let lits2 = match other.literals { + None => { + // If our current seq contains the empty string and the seq + // we're adding matches any literal, then it follows that the + // current seq must now also match any literal. + // + // Otherwise, we just have to make sure everything in this + // sequence is inexact. + if self.min_literal_len() == Some(0) { + *self = Seq::infinite(); + } else { + self.make_inexact(); + } + return None; + } + Some(ref mut lits) => lits, + }; + let lits1 = match self.literals { + None => { + // If we aren't going to make it to the end of this routine + // where lits2 is drained, then we need to do it now. + lits2.drain(..); + return None; + } + Some(ref mut lits) => lits, + }; + Some((lits1, lits2)) + } + + /// Unions the `other` sequence into this one. + /// + /// The literals are always drained out of the given `other` sequence, + /// even if they are being unioned into an infinite sequence. This permits + /// the caller to reuse the `other` sequence in another context. + /// + /// Some literal deduping may be performed. If any deduping happens, + /// any leftmost-first or "preference" order match semantics will be + /// preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["foo", "bar"]); + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// + /// // Adjacent literals are deduped, but non-adjacent literals may not be. + /// assert_eq!(Seq::new(&["foo", "bar", "quux", "foo"]), seq1); + /// ``` + /// + /// This example shows that literals are drained from `other` even when + /// they aren't necessarily used. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::infinite(); + /// // Infinite sequences have no finite length. + /// assert_eq!(None, seq1.len()); + /// + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union(&mut seq2); + /// + /// // seq1 is still infinite and seq2 has been drained. + /// assert_eq!(None, seq1.len()); + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn union(&mut self, other: &mut Seq) { + let lits2 = match other.literals { + None => { + // Unioning with an infinite sequence always results in an + // infinite sequence. + self.make_infinite(); + return; + } + Some(ref mut lits) => lits.drain(..), + }; + let lits1 = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + lits1.extend(lits2); + self.dedup(); + } + + /// Unions the `other` sequence into this one by splice the `other` + /// sequence at the position of the first zero-length literal. + /// + /// This is useful for preserving preference order semantics when combining + /// two literal sequences. For example, in the regex `(a||f)+foo`, the + /// correct preference order prefix sequence is `[a, foo, f]`. + /// + /// The literals are always drained out of the given `other` sequence, + /// even if they are being unioned into an infinite sequence. This permits + /// the caller to reuse the `other` sequence in another context. Note that + /// the literals are drained even if no union is performed as well, i.e., + /// when this sequence does not contain a zero-length literal. + /// + /// Some literal deduping may be performed. If any deduping happens, + /// any leftmost-first or "preference" order match semantics will be + /// preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["a", "", "f", ""]); + /// let mut seq2 = Seq::new(&["foo"]); + /// seq1.union_into_empty(&mut seq2); + /// + /// // The literals are pulled out of seq2. + /// assert_eq!(Some(0), seq2.len()); + /// // 'foo' gets spliced into seq1 where the first empty string occurs. + /// assert_eq!(Seq::new(&["a", "foo", "f"]), seq1); + /// ``` + /// + /// This example shows that literals are drained from `other` even when + /// they aren't necessarily used. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq1 = Seq::new(&["foo", "bar"]); + /// let mut seq2 = Seq::new(&["bar", "quux", "foo"]); + /// seq1.union_into_empty(&mut seq2); + /// + /// // seq1 has no zero length literals, so no splicing happens. + /// assert_eq!(Seq::new(&["foo", "bar"]), seq1); + /// // Even though no splicing happens, seq2 is still drained. + /// assert_eq!(Some(0), seq2.len()); + /// ``` + #[inline] + pub fn union_into_empty(&mut self, other: &mut Seq) { + let lits2 = other.literals.as_mut().map(|lits| lits.drain(..)); + let lits1 = match self.literals { + None => return, + Some(ref mut lits) => lits, + }; + let first_empty = match lits1.iter().position(|m| m.is_empty()) { + None => return, + Some(i) => i, + }; + let lits2 = match lits2 { + None => { + // Note that we are only here if we've found an empty literal, + // which implies that an infinite sequence infects this seq and + // also turns it into an infinite sequence. + self.literals = None; + return; + } + Some(lits) => lits, + }; + // Clearing out the empties needs to come before the splice because + // the splice might add more empties that we don't want to get rid + // of. Since we're splicing into the position of the first empty, the + // 'first_empty' position computed above is still correct. + lits1.retain(|m| !m.is_empty()); + lits1.splice(first_empty..first_empty, lits2); + self.dedup(); + } + + /// Deduplicate adjacent equivalent literals in this sequence. + /// + /// If adjacent literals are equivalent strings but one is exact and the + /// other inexact, the inexact literal is kept and the exact one is + /// removed. + /// + /// Deduping an infinite sequence is a no-op. + /// + /// # Example + /// + /// This example shows how literals that are duplicate byte strings but + /// are not equivalent with respect to exactness are resolved. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::inexact("foo"), + /// ]); + /// seq.dedup(); + /// + /// assert_eq!(Seq::from_iter([Literal::inexact("foo")]), seq); + /// ``` + #[inline] + pub fn dedup(&mut self) { + if let Some(ref mut lits) = self.literals { + lits.dedup_by(|lit1, lit2| { + if lit1.as_bytes() != lit2.as_bytes() { + return false; + } + if lit1.is_exact() != lit2.is_exact() { + lit1.make_inexact(); + lit2.make_inexact(); + } + true + }); + } + } + + /// Sorts this sequence of literals lexicographically. + /// + /// Note that if, before sorting, if a literal that is a prefix of another + /// literal appears after it, then after sorting, the sequence will not + /// represent the same preference order match semantics. For example, + /// sorting the sequence `[samwise, sam]` yields the sequence `[sam, + /// samwise]`. Under preference order semantics, the latter sequence will + /// never match `samwise` where as the first sequence can. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq = Seq::new(&["foo", "quux", "bar"]); + /// seq.sort(); + /// + /// assert_eq!(Seq::new(&["bar", "foo", "quux"]), seq); + /// ``` + #[inline] + pub fn sort(&mut self) { + if let Some(ref mut lits) = self.literals { + lits.sort(); + } + } + + /// Reverses all of the literals in this sequence. + /// + /// The order of the sequence itself is preserved. + /// + /// # Example + /// + /// This example shows basic usage. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let mut seq = Seq::new(&["oof", "rab"]); + /// seq.reverse_literals(); + /// assert_eq!(Seq::new(&["foo", "bar"]), seq); + /// ``` + #[inline] + pub fn reverse_literals(&mut self) { + if let Some(ref mut lits) = self.literals { + for lit in lits.iter_mut() { + lit.reverse(); + } + } + } + + /// Shrinks this seq to its minimal size while respecting the preference + /// order of its literals. + /// + /// While this routine will remove duplicate literals from this seq, it + /// will also remove literals that can never match in a leftmost-first or + /// "preference order" search. Similar to [`Seq::dedup`], if a literal is + /// deduped, then the one that remains is made inexact. + /// + /// This is a no-op on seqs that are empty or not finite. + /// + /// # Example + /// + /// This example shows the difference between `{sam, samwise}` and + /// `{samwise, sam}`. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// // If 'sam' comes before 'samwise' and a preference order search is + /// // executed, then 'samwise' can never match. + /// let mut seq = Seq::new(&["sam", "samwise"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::from_iter([Literal::inexact("sam")]), seq); + /// + /// // But if they are reversed, then it's possible for 'samwise' to match + /// // since it is given higher preference. + /// let mut seq = Seq::new(&["samwise", "sam"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::new(&["samwise", "sam"]), seq); + /// ``` + /// + /// This example shows that if an empty string is in this seq, then + /// anything that comes after it can never match. + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// // An empty string is a prefix of all strings, so it automatically + /// // inhibits any subsequent strings from matching. + /// let mut seq = Seq::new(&["foo", "bar", "", "quux", "fox"]); + /// seq.minimize_by_preference(); + /// let expected = Seq::from_iter([ + /// Literal::exact("foo"), + /// Literal::exact("bar"), + /// Literal::inexact(""), + /// ]); + /// assert_eq!(expected, seq); + /// + /// // And of course, if it's at the beginning, then it makes it impossible + /// // for anything else to match. + /// let mut seq = Seq::new(&["", "foo", "quux", "fox"]); + /// seq.minimize_by_preference(); + /// assert_eq!(Seq::from_iter([Literal::inexact("")]), seq); + /// ``` + #[inline] + pub fn minimize_by_preference(&mut self) { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, false); + } + } + + /// Trims all literals in this seq such that only the first `len` bytes + /// remain. If a literal has less than or equal to `len` bytes, then it + /// remains unchanged. Otherwise, it is trimmed and made inexact. + /// + /// # Example + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::new(&["a", "foo", "quux"]); + /// seq.keep_first_bytes(2); + /// + /// let expected = Seq::from_iter([ + /// Literal::exact("a"), + /// Literal::inexact("fo"), + /// Literal::inexact("qu"), + /// ]); + /// assert_eq!(expected, seq); + /// ``` + #[inline] + pub fn keep_first_bytes(&mut self, len: usize) { + if let Some(ref mut lits) = self.literals { + for m in lits.iter_mut() { + m.keep_first_bytes(len); + } + } + } + + /// Trims all literals in this seq such that only the last `len` bytes + /// remain. If a literal has less than or equal to `len` bytes, then it + /// remains unchanged. Otherwise, it is trimmed and made inexact. + /// + /// # Example + /// + /// ``` + /// use regex_syntax::hir::literal::{Literal, Seq}; + /// + /// let mut seq = Seq::new(&["a", "foo", "quux"]); + /// seq.keep_last_bytes(2); + /// + /// let expected = Seq::from_iter([ + /// Literal::exact("a"), + /// Literal::inexact("oo"), + /// Literal::inexact("ux"), + /// ]); + /// assert_eq!(expected, seq); + /// ``` + #[inline] + pub fn keep_last_bytes(&mut self, len: usize) { + if let Some(ref mut lits) = self.literals { + for m in lits.iter_mut() { + m.keep_last_bytes(len); + } + } + } + + /// Returns true if this sequence is finite. + /// + /// When false, this sequence is infinite and must be treated as if it + /// contains every possible literal. + #[inline] + pub fn is_finite(&self) -> bool { + self.literals.is_some() + } + + /// Returns true if and only if this sequence is finite and empty. + /// + /// An empty sequence never matches anything. It can only be produced by + /// literal extraction when the corresponding regex itself cannot match. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == Some(0) + } + + /// Returns the number of literals in this sequence if the sequence is + /// finite. If the sequence is infinite, then `None` is returned. + #[inline] + pub fn len(&self) -> Option { + self.literals.as_ref().map(|lits| lits.len()) + } + + /// Returns true if and only if all literals in this sequence are exact. + /// + /// This returns false if the sequence is infinite. + #[inline] + pub fn is_exact(&self) -> bool { + self.literals().map_or(false, |lits| lits.iter().all(|x| x.is_exact())) + } + + /// Returns true if and only if all literals in this sequence are inexact. + /// + /// This returns true if the sequence is infinite. + #[inline] + pub fn is_inexact(&self) -> bool { + self.literals().map_or(true, |lits| lits.iter().all(|x| !x.is_exact())) + } + + /// Return the maximum length of the sequence that would result from + /// unioning `self` with `other`. If either set is infinite, then this + /// returns `None`. + #[inline] + pub fn max_union_len(&self, other: &Seq) -> Option { + let len1 = self.len()?; + let len2 = other.len()?; + Some(len1.saturating_add(len2)) + } + + /// Return the maximum length of the sequence that would result from the + /// cross product of `self` with `other`. If either set is infinite, then + /// this returns `None`. + #[inline] + pub fn max_cross_len(&self, other: &Seq) -> Option { + let len1 = self.len()?; + let len2 = other.len()?; + Some(len1.saturating_mul(len2)) + } + + /// Returns the length of the shortest literal in this sequence. + /// + /// If the sequence is infinite or empty, then this returns `None`. + #[inline] + pub fn min_literal_len(&self) -> Option { + self.literals.as_ref()?.iter().map(|x| x.len()).min() + } + + /// Returns the length of the longest literal in this sequence. + /// + /// If the sequence is infinite or empty, then this returns `None`. + #[inline] + pub fn max_literal_len(&self) -> Option { + self.literals.as_ref()?.iter().map(|x| x.len()).max() + } + + /// Returns the longest common prefix from this seq. + /// + /// If the seq matches any literal or other contains no literals, then + /// there is no meaningful prefix and this returns `None`. + /// + /// # Example + /// + /// This shows some example seqs and their longest common prefix. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let seq = Seq::new(&["foo", "foobar", "fo"]); + /// assert_eq!(Some(&b"fo"[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&["foo", "foo"]); + /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&["foo", "bar"]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); + /// let seq = Seq::new(&[""]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_prefix()); + /// + /// let seq = Seq::infinite(); + /// assert_eq!(None, seq.longest_common_prefix()); + /// let seq = Seq::empty(); + /// assert_eq!(None, seq.longest_common_prefix()); + /// ``` + #[inline] + pub fn longest_common_prefix(&self) -> Option<&[u8]> { + // If we match everything or match nothing, then there's no meaningful + // longest common prefix. + let lits = match self.literals { + None => return None, + Some(ref lits) => lits, + }; + if lits.len() == 0 { + return None; + } + let base = lits[0].as_bytes(); + let mut len = base.len(); + for m in lits.iter().skip(1) { + len = m + .as_bytes() + .iter() + .zip(base[..len].iter()) + .take_while(|&(a, b)| a == b) + .count(); + if len == 0 { + return Some(&[]); + } + } + Some(&base[..len]) + } + + /// Returns the longest common suffix from this seq. + /// + /// If the seq matches any literal or other contains no literals, then + /// there is no meaningful suffix and this returns `None`. + /// + /// # Example + /// + /// This shows some example seqs and their longest common suffix. + /// + /// ``` + /// use regex_syntax::hir::literal::Seq; + /// + /// let seq = Seq::new(&["oof", "raboof", "of"]); + /// assert_eq!(Some(&b"of"[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&["foo", "foo"]); + /// assert_eq!(Some(&b"foo"[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&["foo", "bar"]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); + /// let seq = Seq::new(&[""]); + /// assert_eq!(Some(&b""[..]), seq.longest_common_suffix()); + /// + /// let seq = Seq::infinite(); + /// assert_eq!(None, seq.longest_common_suffix()); + /// let seq = Seq::empty(); + /// assert_eq!(None, seq.longest_common_suffix()); + /// ``` + #[inline] + pub fn longest_common_suffix(&self) -> Option<&[u8]> { + // If we match everything or match nothing, then there's no meaningful + // longest common suffix. + let lits = match self.literals { + None => return None, + Some(ref lits) => lits, + }; + if lits.len() == 0 { + return None; + } + let base = lits[0].as_bytes(); + let mut len = base.len(); + for m in lits.iter().skip(1) { + len = m + .as_bytes() + .iter() + .rev() + .zip(base[base.len() - len..].iter().rev()) + .take_while(|&(a, b)| a == b) + .count(); + if len == 0 { + return Some(&[]); + } + } + Some(&base[base.len() - len..]) + } + + /// Optimizes this seq while treating its literals as prefixes and + /// respecting the preference order of its literals. + /// + /// The specific way "optimization" works is meant to be an implementation + /// detail, as it essentially represents a set of heuristics. The goal + /// that optimization tries to accomplish is to make the literals in this + /// set reflect inputs that will result in a more effective prefilter. + /// Principally by reducing the false positive rate of candidates found by + /// the literals in this sequence. That is, when a match of a literal is + /// found, we would like it to be a strong predictor of the overall match + /// of the regex. If it isn't, then much time will be spent starting and + /// stopping the prefilter search and attempting to confirm the match only + /// to have it fail. + /// + /// Some of those heuristics might be: + /// + /// * Identifying a common prefix from a larger sequence of literals, and + /// shrinking the sequence down to that single common prefix. + /// * Rejecting the sequence entirely if it is believed to result in very + /// high false positive rate. When this happens, the sequence is made + /// infinite. + /// * Shrinking the sequence to a smaller number of literals representing + /// prefixes, but not shrinking it so much as to make literals too short. + /// (A sequence with very short literals, of 1 or 2 bytes, will typically + /// result in a higher false positive rate.) + /// + /// Optimization should only be run once extraction is complete. Namely, + /// optimization may make assumptions that do not compose with other + /// operations in the middle of extraction. For example, optimization will + /// reduce `[E(sam), E(samwise)]` to `[E(sam)]`, but such a transformation + /// is only valid if no other extraction will occur. If other extraction + /// may occur, then the correct transformation would be to `[I(sam)]`. + /// + /// The [`Seq::optimize_for_suffix_by_preference`] does the same thing, but + /// for suffixes. + /// + /// # Example + /// + /// This shows how optimization might transform a sequence. Note that + /// the specific behavior is not a documented guarantee. The heuristics + /// used are an implementation detail and may change over time in semver + /// compatible releases. + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// "sam", + /// "samwise", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert_eq!(Seq::from_iter([ + /// Literal::exact("samantha"), + /// // Kept exact even though 'samwise' got pruned + /// // because optimization assumes literal extraction + /// // has finished. + /// Literal::exact("sam"), + /// Literal::exact("frodo"), + /// ]), seq); + /// ``` + /// + /// # Example: optimization may make the sequence infinite + /// + /// If the heuristics deem that the sequence could cause a very high false + /// positive rate, then it may make the sequence infinite, effectively + /// disabling its use as a prefilter. + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// // An empty string matches at every position, + /// // thus rendering the prefilter completely + /// // ineffective. + /// "", + /// "sam", + /// "samwise", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert!(!seq.is_finite()); + /// ``` + /// + /// Do note that just because there is a `" "` in the sequence, that + /// doesn't mean the sequence will always be made infinite after it is + /// optimized. Namely, if the sequence is considered exact (any match + /// corresponds to an overall match of the original regex), then any match + /// is an overall match, and so the false positive rate is always `0`. + /// + /// To demonstrate this, we remove `samwise` from our sequence. This + /// results in no optimization happening and all literals remain exact. + /// Thus the entire sequence is exact, and it is kept as-is, even though + /// one is an ASCII space: + /// + /// ``` + /// use regex_syntax::hir::literal::{Seq, Literal}; + /// + /// let mut seq = Seq::new(&[ + /// "samantha", + /// " ", + /// "sam", + /// "frodo", + /// ]); + /// seq.optimize_for_prefix_by_preference(); + /// assert!(seq.is_finite()); + /// ``` + #[inline] + pub fn optimize_for_prefix_by_preference(&mut self) { + self.optimize_by_preference(true); + } + + /// Optimizes this seq while treating its literals as suffixes and + /// respecting the preference order of its literals. + /// + /// Optimization should only be run once extraction is complete. + /// + /// The [`Seq::optimize_for_prefix_by_preference`] does the same thing, but + /// for prefixes. See its documentation for more explanation. + #[inline] + pub fn optimize_for_suffix_by_preference(&mut self) { + self.optimize_by_preference(false); + } + + fn optimize_by_preference(&mut self, prefix: bool) { + let origlen = match self.len() { + None => return, + Some(len) => len, + }; + // Just give up now if our sequence contains an empty string. + if self.min_literal_len().map_or(false, |len| len == 0) { + // We squash the sequence so that nobody else gets any bright + // ideas to try and use it. An empty string implies a match at + // every position. A prefilter cannot help you here. + self.make_infinite(); + return; + } + // Make sure we start with the smallest sequence possible. We use a + // special version of preference minimization that retains exactness. + // This is legal because optimization is only expected to occur once + // extraction is complete. + if prefix { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, true); + } + } + + // Look for a common prefix (or suffix). If we found one of those and + // it's long enough, then it's a good bet that it will be our fastest + // possible prefilter since single-substring search is so fast. + let fix = if prefix { + self.longest_common_prefix() + } else { + self.longest_common_suffix() + }; + if let Some(fix) = fix { + // As a special case, if we have a common prefix and the leading + // byte of that prefix is one that we think probably occurs rarely, + // then strip everything down to just that single byte. This should + // promote the use of memchr. + // + // ... we only do this though if our sequence has more than one + // literal. Otherwise, we'd rather just stick with a single literal + // scan. That is, using memchr is probably better than looking + // for 2 or more literals, but probably not as good as a straight + // memmem search. + // + // ... and also only do this when the prefix is short and probably + // not too discriminatory anyway. If it's longer, then it's + // probably quite discriminatory and thus is likely to have a low + // false positive rate. + if prefix + && origlen > 1 + && fix.len() >= 1 + && fix.len() <= 3 + && rank(fix[0]) < 200 + { + self.keep_first_bytes(1); + self.dedup(); + return; + } + // We only strip down to the common prefix/suffix if we think + // the existing set of literals isn't great, or if the common + // prefix/suffix is expected to be particularly discriminatory. + let isfast = + self.is_exact() && self.len().map_or(false, |len| len <= 16); + let usefix = fix.len() > 4 || (fix.len() > 1 && !isfast); + if usefix { + // If we keep exactly the number of bytes equal to the length + // of the prefix (or suffix), then by the definition of a + // prefix, every literal in the sequence will be equivalent. + // Thus, 'dedup' will leave us with one literal. + // + // We do it this way to avoid an alloc, but also to make sure + // the exactness of literals is kept (or not). + if prefix { + self.keep_first_bytes(fix.len()); + } else { + self.keep_last_bytes(fix.len()); + } + self.dedup(); + assert_eq!(Some(1), self.len()); + // We still fall through here. In particular, we want our + // longest common prefix to be subject to the poison check. + } + } + // If we have an exact sequence, we *probably* just want to keep it + // as-is. But there are some cases where we don't. So we save a copy of + // the exact sequence now, and then try to do some more optimizations + // below. If those don't work out, we go back to this exact sequence. + // + // The specific motivation for this is that we sometimes wind up with + // an exact sequence with a hefty number of literals. Say, 100. If we + // stuck with that, it would be too big for Teddy and would result in + // using Aho-Corasick. Which is fine... but the lazy DFA is plenty + // suitable in such cases. The real issue is that we will wind up not + // using a fast prefilter at all. So in cases like this, even though + // we have an exact sequence, it would be better to try and shrink the + // sequence (which we do below) and use it as a prefilter that can + // produce false positive matches. + // + // But if the shrinking below results in a sequence that "sucks," then + // we don't want to use that because we already have an exact sequence + // in hand. + let exact: Option = + if self.is_exact() { Some(self.clone()) } else { None }; + // Now we attempt to shorten the sequence. The idea here is that we + // don't want to look for too many literals, but we want to shorten + // our sequence enough to improve our odds of using better algorithms + // downstream (such as Teddy). + // + // The pair of numbers in this list corresponds to the maximal prefix + // (in bytes) to keep for all literals and the length of the sequence + // at which to do it. + // + // So for example, the pair (3, 500) would mean, "if we have more than + // 500 literals in our sequence, then truncate all of our literals + // such that they are at most 3 bytes in length and the minimize the + // sequence." + const ATTEMPTS: [(usize, usize); 5] = + [(5, 10), (4, 10), (3, 64), (2, 64), (1, 10)]; + for (keep, limit) in ATTEMPTS { + let len = match self.len() { + None => break, + Some(len) => len, + }; + if len <= limit { + break; + } + if prefix { + self.keep_first_bytes(keep); + } else { + self.keep_last_bytes(keep); + } + if prefix { + if let Some(ref mut lits) = self.literals { + PreferenceTrie::minimize(lits, true); + } + } + } + // Check for a poison literal. A poison literal is one that is short + // and is believed to have a very high match count. These poisons + // generally lead to a prefilter with a very high false positive rate, + // and thus overall worse performance. + // + // We do this last because we could have gone from a non-poisonous + // sequence to a poisonous one. Perhaps we should add some code to + // prevent such transitions in the first place, but then again, we + // likely only made the transition in the first place if the sequence + // was itself huge. And huge sequences are themselves poisonous. So... + if let Some(lits) = self.literals() { + if lits.iter().any(|lit| lit.is_poisonous()) { + self.make_infinite(); + } + } + // OK, if we had an exact sequence before attempting more optimizations + // above and our post-optimized sequence sucks for some reason or + // another, then we go back to the exact sequence. + if let Some(exact) = exact { + // If optimizing resulted in dropping our literals, then certainly + // backup and use the exact sequence that we had. + if !self.is_finite() { + *self = exact; + return; + } + // If our optimized sequence contains a short literal, then it's + // *probably* not so great. So throw it away and revert to the + // exact sequence. + if self.min_literal_len().map_or(true, |len| len <= 2) { + *self = exact; + return; + } + // Finally, if our optimized sequence is "big" (i.e., can't use + // Teddy), then also don't use it and rely on the exact sequence. + if self.len().map_or(true, |len| len > 64) { + *self = exact; + return; + } + } + } +} + +impl core::fmt::Debug for Seq { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "Seq")?; + if let Some(lits) = self.literals() { + f.debug_list().entries(lits.iter()).finish() + } else { + write!(f, "[∞]") + } + } +} + +impl FromIterator for Seq { + fn from_iter>(it: T) -> Seq { + let mut seq = Seq::empty(); + for literal in it { + seq.push(literal); + } + seq + } +} + +/// A single literal extracted from an [`Hir`] expression. +/// +/// A literal is composed of two things: +/// +/// * A sequence of bytes. No guarantees with respect to UTF-8 are provided. +/// In particular, even if the regex a literal is extracted from is UTF-8, the +/// literal extracted may not be valid UTF-8. (For example, if an [`Extractor`] +/// limit resulted in trimming a literal in a way that splits a codepoint.) +/// * Whether the literal is "exact" or not. An "exact" literal means that it +/// has not been trimmed, and may continue to be extended. If a literal is +/// "exact" after visiting the entire `Hir` expression, then this implies that +/// the literal leads to a match state. (Although it doesn't necessarily imply +/// all occurrences of the literal correspond to a match of the regex, since +/// literal extraction ignores look-around assertions.) +#[derive(Clone, Eq, PartialEq, PartialOrd, Ord)] +pub struct Literal { + bytes: Vec, + exact: bool, +} + +impl Literal { + /// Returns a new exact literal containing the bytes given. + #[inline] + pub fn exact>>(bytes: B) -> Literal { + Literal { bytes: bytes.into(), exact: true } + } + + /// Returns a new inexact literal containing the bytes given. + #[inline] + pub fn inexact>>(bytes: B) -> Literal { + Literal { bytes: bytes.into(), exact: false } + } + + /// Returns the bytes in this literal. + #[inline] + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } + + /// Yields ownership of the bytes inside this literal. + /// + /// Note that this throws away whether the literal is "exact" or not. + #[inline] + pub fn into_bytes(self) -> Vec { + self.bytes + } + + /// Returns the length of this literal in bytes. + #[inline] + pub fn len(&self) -> usize { + self.as_bytes().len() + } + + /// Returns true if and only if this literal has zero bytes. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns true if and only if this literal is exact. + #[inline] + pub fn is_exact(&self) -> bool { + self.exact + } + + /// Marks this literal as inexact. + /// + /// Inexact literals can never be extended. For example, + /// [`Seq::cross_forward`] will not extend inexact literals. + #[inline] + pub fn make_inexact(&mut self) { + self.exact = false; + } + + /// Reverse the bytes in this literal. + #[inline] + pub fn reverse(&mut self) { + self.bytes.reverse(); + } + + /// Extend this literal with the literal given. + /// + /// If this literal is inexact, then this is a no-op. + #[inline] + pub fn extend(&mut self, lit: &Literal) { + if !self.is_exact() { + return; + } + self.bytes.extend_from_slice(&lit.bytes); + } + + /// Trims this literal such that only the first `len` bytes remain. If + /// this literal has fewer than `len` bytes, then it remains unchanged. + /// Otherwise, the literal is marked as inexact. + #[inline] + pub fn keep_first_bytes(&mut self, len: usize) { + if len >= self.len() { + return; + } + self.make_inexact(); + self.bytes.truncate(len); + } + + /// Trims this literal such that only the last `len` bytes remain. If this + /// literal has fewer than `len` bytes, then it remains unchanged. + /// Otherwise, the literal is marked as inexact. + #[inline] + pub fn keep_last_bytes(&mut self, len: usize) { + if len >= self.len() { + return; + } + self.make_inexact(); + self.bytes.drain(..self.len() - len); + } + + /// Returns true if it is believe that this literal is likely to match very + /// frequently, and is thus not a good candidate for a prefilter. + fn is_poisonous(&self) -> bool { + self.is_empty() || (self.len() == 1 && rank(self.as_bytes()[0]) >= 250) + } +} + +impl From for Literal { + fn from(byte: u8) -> Literal { + Literal::exact(vec![byte]) + } +} + +impl From for Literal { + fn from(ch: char) -> Literal { + use alloc::string::ToString; + Literal::exact(ch.encode_utf8(&mut [0; 4]).to_string()) + } +} + +impl AsRef<[u8]> for Literal { + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } +} + +impl core::fmt::Debug for Literal { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let tag = if self.exact { "E" } else { "I" }; + f.debug_tuple(tag) + .field(&crate::debug::Bytes(self.as_bytes())) + .finish() + } +} + +/// A "preference" trie that rejects literals that will never match when +/// executing a leftmost first or "preference" search. +/// +/// For example, if 'sam' is inserted, then trying to insert 'samwise' will be +/// rejected because 'samwise' can never match since 'sam' will always take +/// priority. However, if 'samwise' is inserted first, then inserting 'sam' +/// after it is accepted. In this case, either 'samwise' or 'sam' can match in +/// a "preference" search. +/// +/// Note that we only use this trie as a "set." That is, given a sequence of +/// literals, we insert each one in order. An `insert` will reject a literal +/// if a prefix of that literal already exists in the trie. Thus, to rebuild +/// the "minimal" sequence, we simply only keep literals that were successfully +/// inserted. (Since we don't need traversal, one wonders whether we can make +/// some simplifications here, but I haven't given it a ton of thought and I've +/// never seen this show up on a profile. Because of the heuristic limits +/// imposed on literal extractions, the size of the inputs here is usually +/// very small.) +#[derive(Debug)] +struct PreferenceTrie { + /// The states in this trie. The index of a state in this vector is its ID. + states: Vec, + /// This vec indicates which states are match states. It always has + /// the same length as `states` and is indexed by the same state ID. + /// A state with identifier `sid` is a match state if and only if + /// `matches[sid].is_some()`. The option contains the index of the literal + /// corresponding to the match. The index is offset by 1 so that it fits in + /// a NonZeroUsize. + matches: Vec>, + /// The index to allocate to the next literal added to this trie. Starts at + /// 1 and increments by 1 for every literal successfully added to the trie. + next_literal_index: usize, +} + +/// A single state in a trie. Uses a sparse representation for its transitions. +#[derive(Debug, Default)] +struct State { + /// Sparse representation of the transitions out of this state. Transitions + /// are sorted by byte. There is at most one such transition for any + /// particular byte. + trans: Vec<(u8, usize)>, +} + +impl PreferenceTrie { + /// Minimizes the given sequence of literals while preserving preference + /// order semantics. + /// + /// When `keep_exact` is true, the exactness of every literal retained is + /// kept. This is useful when dealing with a fully extracted `Seq` that + /// only contains exact literals. In that case, we can keep all retained + /// literals as exact because we know we'll never need to match anything + /// after them and because any removed literals are guaranteed to never + /// match. + fn minimize(literals: &mut Vec, keep_exact: bool) { + let mut trie = PreferenceTrie { + states: vec![], + matches: vec![], + next_literal_index: 1, + }; + let mut make_inexact = vec![]; + literals.retain_mut(|lit| match trie.insert(lit.as_bytes()) { + Ok(_) => true, + Err(i) => { + if !keep_exact { + make_inexact.push(i.checked_sub(1).unwrap()); + } + false + } + }); + for i in make_inexact { + literals[i].make_inexact(); + } + } + + /// Returns `Ok` if the given byte string is accepted into this trie and + /// `Err` otherwise. The index for the success case corresponds to the + /// index of the literal added. The index for the error case corresponds to + /// the index of the literal already in the trie that prevented the given + /// byte string from being added. (Which implies it is a prefix of the one + /// given.) + /// + /// In short, the byte string given is accepted into the trie if and only + /// if it is possible for it to match when executing a preference order + /// search. + fn insert(&mut self, bytes: &[u8]) -> Result { + let mut prev = self.root(); + if let Some(idx) = self.matches[prev] { + return Err(idx.get()); + } + for &b in bytes.iter() { + match self.states[prev].trans.binary_search_by_key(&b, |t| t.0) { + Ok(i) => { + prev = self.states[prev].trans[i].1; + if let Some(idx) = self.matches[prev] { + return Err(idx.get()); + } + } + Err(i) => { + let next = self.create_state(); + self.states[prev].trans.insert(i, (b, next)); + prev = next; + } + } + } + let idx = self.next_literal_index; + self.next_literal_index += 1; + self.matches[prev] = NonZeroUsize::new(idx); + Ok(idx) + } + + /// Returns the root state ID, and if it doesn't exist, creates it. + fn root(&mut self) -> usize { + if !self.states.is_empty() { + 0 + } else { + self.create_state() + } + } + + /// Creates a new empty state and returns its ID. + fn create_state(&mut self) -> usize { + let id = self.states.len(); + self.states.push(State::default()); + self.matches.push(None); + id + } +} + +/// Returns the "rank" of the given byte. +/// +/// The minimum rank value is `0` and the maximum rank value is `255`. +/// +/// The rank of a byte is derived from a heuristic background distribution of +/// relative frequencies of bytes. The heuristic says that lower the rank of a +/// byte, the less likely that byte is to appear in any arbitrary haystack. +pub fn rank(byte: u8) -> u8 { + crate::rank::BYTE_FREQUENCIES[usize::from(byte)] +} + +#[cfg(test)] +mod tests { + use super::*; + + fn parse(pattern: &str) -> Hir { + crate::ParserBuilder::new().utf8(false).build().parse(pattern).unwrap() + } + + fn prefixes(pattern: &str) -> Seq { + Extractor::new().kind(ExtractKind::Prefix).extract(&parse(pattern)) + } + + fn suffixes(pattern: &str) -> Seq { + Extractor::new().kind(ExtractKind::Suffix).extract(&parse(pattern)) + } + + fn e(pattern: &str) -> (Seq, Seq) { + (prefixes(pattern), suffixes(pattern)) + } + + #[allow(non_snake_case)] + fn E(x: &str) -> Literal { + Literal::exact(x.as_bytes()) + } + + #[allow(non_snake_case)] + fn I(x: &str) -> Literal { + Literal::inexact(x.as_bytes()) + } + + fn seq>(it: I) -> Seq { + Seq::from_iter(it) + } + + fn infinite() -> (Seq, Seq) { + (Seq::infinite(), Seq::infinite()) + } + + fn inexact(it1: I1, it2: I2) -> (Seq, Seq) + where + I1: IntoIterator, + I2: IntoIterator, + { + (Seq::from_iter(it1), Seq::from_iter(it2)) + } + + fn exact, I: IntoIterator>(it: I) -> (Seq, Seq) { + let s1 = Seq::new(it); + let s2 = s1.clone(); + (s1, s2) + } + + fn opt, I: IntoIterator>(it: I) -> (Seq, Seq) { + let (mut p, mut s) = exact(it); + p.optimize_for_prefix_by_preference(); + s.optimize_for_suffix_by_preference(); + (p, s) + } + + #[test] + fn literal() { + assert_eq!(exact(["a"]), e("a")); + assert_eq!(exact(["aaaaa"]), e("aaaaa")); + assert_eq!(exact(["A", "a"]), e("(?i-u)a")); + assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab")); + assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c")); + + assert_eq!(exact([b"\xFF"]), e(r"(?-u:\xFF)")); + + #[cfg(feature = "unicode-case")] + { + assert_eq!(exact(["☃"]), e("☃")); + assert_eq!(exact(["☃"]), e("(?i)☃")); + assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃")); + + assert_eq!(exact(["Δ"]), e("Δ")); + assert_eq!(exact(["δ"]), e("δ")); + assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ")); + assert_eq!(exact(["Δ", "δ"]), e("(?i)δ")); + + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S")); + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s")); + assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ")); + } + + let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ"; + assert_eq!(exact([letters]), e(letters)); + } + + #[test] + fn class() { + assert_eq!(exact(["a", "b", "c"]), e("[abc]")); + assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b")); + assert_eq!(exact(["δ", "ε"]), e("[εδ]")); + #[cfg(feature = "unicode-case")] + { + assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]")); + } + } + + #[test] + fn look() { + assert_eq!(exact(["ab"]), e(r"a\Ab")); + assert_eq!(exact(["ab"]), e(r"a\zb")); + assert_eq!(exact(["ab"]), e(r"a(?m:^)b")); + assert_eq!(exact(["ab"]), e(r"a(?m:$)b")); + assert_eq!(exact(["ab"]), e(r"a\bb")); + assert_eq!(exact(["ab"]), e(r"a\Bb")); + assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b")); + assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b")); + + assert_eq!(exact(["ab"]), e(r"^ab")); + assert_eq!(exact(["ab"]), e(r"$ab")); + assert_eq!(exact(["ab"]), e(r"(?m:^)ab")); + assert_eq!(exact(["ab"]), e(r"(?m:$)ab")); + assert_eq!(exact(["ab"]), e(r"\bab")); + assert_eq!(exact(["ab"]), e(r"\Bab")); + assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab")); + assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab")); + + assert_eq!(exact(["ab"]), e(r"ab^")); + assert_eq!(exact(["ab"]), e(r"ab$")); + assert_eq!(exact(["ab"]), e(r"ab(?m:^)")); + assert_eq!(exact(["ab"]), e(r"ab(?m:$)")); + assert_eq!(exact(["ab"]), e(r"ab\b")); + assert_eq!(exact(["ab"]), e(r"ab\B")); + assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)")); + assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)")); + + let expected = (seq([I("aZ"), E("ab")]), seq([I("Zb"), E("ab")])); + assert_eq!(expected, e(r"^aZ*b")); + } + + #[test] + fn repetition() { + assert_eq!(exact(["a", ""]), e(r"a?")); + assert_eq!(exact(["", "a"]), e(r"a??")); + assert_eq!(inexact([I("a"), E("")], [I("a"), E("")]), e(r"a*")); + assert_eq!(inexact([E(""), I("a")], [E(""), I("a")]), e(r"a*?")); + assert_eq!(inexact([I("a")], [I("a")]), e(r"a+")); + assert_eq!(inexact([I("a")], [I("a")]), e(r"(a+)+")); + + assert_eq!(exact(["ab"]), e(r"aZ{0}b")); + assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b")); + assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b")); + assert_eq!( + inexact([I("aZ"), E("ab")], [I("Zb"), E("ab")]), + e(r"aZ*b") + ); + assert_eq!( + inexact([E("ab"), I("aZ")], [E("ab"), I("Zb")]), + e(r"aZ*?b") + ); + assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+b")); + assert_eq!(inexact([I("aZ")], [I("Zb")]), e(r"aZ+?b")); + + assert_eq!(exact(["aZZb"]), e(r"aZ{2}b")); + assert_eq!(inexact([I("aZZ")], [I("ZZb")]), e(r"aZ{2,3}b")); + + assert_eq!(exact(["abc", ""]), e(r"(abc)?")); + assert_eq!(exact(["", "abc"]), e(r"(abc)??")); + + assert_eq!(inexact([I("a"), E("b")], [I("ab"), E("b")]), e(r"a*b")); + assert_eq!(inexact([E("b"), I("a")], [E("b"), I("ab")]), e(r"a*?b")); + assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); + assert_eq!(inexact([I("a"), I("b")], [I("b")]), e(r"a*b+")); + + // FIXME: The suffixes for this don't look quite right to me. I think + // the right suffixes would be: [I(ac), I(bc), E(c)]. The main issue I + // think is that suffixes are computed by iterating over concatenations + // in reverse, and then [bc, ac, c] ordering is indeed correct from + // that perspective. We also test a few more equivalent regexes, and + // we get the same result, so it is consistent at least I suppose. + // + // The reason why this isn't an issue is that it only messes up + // preference order, and currently, suffixes are never used in a + // context where preference order matters. For prefixes it matters + // because we sometimes want to use prefilters without confirmation + // when all of the literals are exact (and there's no look-around). But + // we never do that for suffixes. Any time we use suffixes, we always + // include a confirmation step. If that ever changes, then it's likely + // this bug will need to be fixed, but last time I looked, it appears + // hard to do so. + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"a*b*c") + ); + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"(a+)?(b+)?c") + ); + assert_eq!( + inexact([I("a"), I("b"), E("c")], [I("bc"), I("ac"), E("c")]), + e(r"(a+|)(b+|)c") + ); + // A few more similarish but not identical regexes. These may have a + // similar problem as above. + assert_eq!( + inexact( + [I("a"), I("b"), I("c"), E("")], + [I("c"), I("b"), I("a"), E("")] + ), + e(r"a*b*c*") + ); + assert_eq!(inexact([I("a"), I("b"), I("c")], [I("c")]), e(r"a*b*c+")); + assert_eq!(inexact([I("a"), I("b")], [I("bc")]), e(r"a*b+c")); + assert_eq!(inexact([I("a"), I("b")], [I("c"), I("b")]), e(r"a*b+c*")); + assert_eq!(inexact([I("ab"), E("a")], [I("b"), E("a")]), e(r"ab*")); + assert_eq!( + inexact([I("ab"), E("ac")], [I("bc"), E("ac")]), + e(r"ab*c") + ); + assert_eq!(inexact([I("ab")], [I("b")]), e(r"ab+")); + assert_eq!(inexact([I("ab")], [I("bc")]), e(r"ab+c")); + + assert_eq!( + inexact([I("z"), E("azb")], [I("zazb"), E("azb")]), + e(r"z*azb") + ); + + let expected = + exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]); + assert_eq!(expected, e(r"[ab]{3}")); + let expected = inexact( + [ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb"), + ], + [ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb"), + ], + ); + assert_eq!(expected, e(r"[ab]{3,4}")); + } + + #[test] + fn concat() { + let empty: [&str; 0] = []; + + assert_eq!(exact(["abcxyz"]), e(r"abc()xyz")); + assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)")); + assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz")); + assert_eq!(exact(empty), e(r"abc[a&&b]xyz")); + assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz")); + } + + #[test] + fn alternation() { + assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz")); + assert_eq!( + inexact( + [E("abc"), I("mZ"), E("mo"), E("xyz")], + [E("abc"), I("Zo"), E("mo"), E("xyz")] + ), + e(r"abc|mZ*o|xyz") + ); + assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz")); + assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz")); + + assert_eq!(exact(["aaa", "aaaaa"]), e(r"(?:|aa)aaa")); + assert_eq!( + inexact( + [I("aaa"), E(""), I("aaaaa"), E("aa")], + [I("aaa"), E(""), E("aa")] + ), + e(r"(?:|aa)(?:aaa)*") + ); + assert_eq!( + inexact( + [E(""), I("aaa"), E("aa"), I("aaaaa")], + [E(""), I("aaa"), E("aa")] + ), + e(r"(?:|aa)(?:aaa)*?") + ); + + assert_eq!( + inexact([E("a"), I("b"), E("")], [E("a"), I("b"), E("")]), + e(r"a|b*") + ); + assert_eq!(inexact([E("a"), I("b")], [E("a"), I("b")]), e(r"a|b+")); + + assert_eq!( + inexact([I("a"), E("b"), E("c")], [I("ab"), E("b"), E("c")]), + e(r"a*b|c") + ); + + assert_eq!( + inexact( + [E("a"), E("b"), I("c"), E("")], + [E("a"), E("b"), I("c"), E("")] + ), + e(r"a|(?:b|c*)") + ); + + assert_eq!( + inexact( + [I("a"), I("b"), E("c"), I("a"), I("ab"), E("c")], + [I("ac"), I("bc"), E("c"), I("ac"), I("abc"), E("c")], + ), + e(r"(a|b)*c|(a|ab)*c") + ); + + assert_eq!( + exact(["abef", "abgh", "cdef", "cdgh"]), + e(r"(ab|cd)(ef|gh)") + ); + assert_eq!( + exact([ + "abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl", + "cdghij", "cdghkl", + ]), + e(r"(ab|cd)(ef|gh)(ij|kl)") + ); + + assert_eq!(inexact([E("abab")], [E("abab")]), e(r"(ab){2}")); + + assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,3}")); + + assert_eq!(inexact([I("abab")], [I("abab")]), e(r"(ab){2,}")); + } + + #[test] + fn impossible() { + let empty: [&str; 0] = []; + + assert_eq!(exact(empty), e(r"[a&&b]")); + assert_eq!(exact(empty), e(r"a[a&&b]")); + assert_eq!(exact(empty), e(r"[a&&b]b")); + assert_eq!(exact(empty), e(r"a[a&&b]b")); + assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]|b")); + assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]|b")); + assert_eq!(exact(["a", "b"]), e(r"a|[a&&b]d|b")); + assert_eq!(exact(["a", "b"]), e(r"a|c[a&&b]d|b")); + assert_eq!(exact([""]), e(r"[a&&b]*")); + assert_eq!(exact(["MN"]), e(r"M[a&&b]*N")); + } + + // This tests patterns that contain something that defeats literal + // detection, usually because it would blow some limit on the total number + // of literals that can be returned. + // + // The main idea is that when literal extraction sees something that + // it knows will blow a limit, it replaces it with a marker that says + // "any literal will match here." While not necessarily true, the + // over-estimation is just fine for the purposes of literal extraction, + // because the imprecision doesn't matter: too big is too big. + // + // This is one of the trickier parts of literal extraction, since we need + // to make sure all of our literal extraction operations correctly compose + // with the markers. + #[test] + fn anything() { + assert_eq!(infinite(), e(r".")); + assert_eq!(infinite(), e(r"(?s).")); + assert_eq!(infinite(), e(r"[A-Za-z]")); + assert_eq!(infinite(), e(r"[A-Z]")); + assert_eq!(exact([""]), e(r"[A-Z]{0}")); + assert_eq!(infinite(), e(r"[A-Z]?")); + assert_eq!(infinite(), e(r"[A-Z]*")); + assert_eq!(infinite(), e(r"[A-Z]+")); + assert_eq!((seq([I("1")]), Seq::infinite()), e(r"1[A-Z]")); + assert_eq!((seq([I("1")]), seq([I("2")])), e(r"1[A-Z]2")); + assert_eq!((Seq::infinite(), seq([I("123")])), e(r"[A-Z]+123")); + assert_eq!(infinite(), e(r"[A-Z]+123[A-Z]+")); + assert_eq!(infinite(), e(r"1|[A-Z]|3")); + assert_eq!( + (seq([E("1"), I("2"), E("3")]), Seq::infinite()), + e(r"1|2[A-Z]|3"), + ); + assert_eq!( + (Seq::infinite(), seq([E("1"), I("2"), E("3")])), + e(r"1|[A-Z]2|3"), + ); + assert_eq!( + (seq([E("1"), I("2"), E("4")]), seq([E("1"), I("3"), E("4")])), + e(r"1|2[A-Z]3|4"), + ); + assert_eq!((Seq::infinite(), seq([I("2")])), e(r"(?:|1)[A-Z]2")); + assert_eq!(inexact([I("a")], [I("z")]), e(r"a.z")); + } + + // Like the 'anything' test, but it uses smaller limits in order to test + // the logic for effectively aborting literal extraction when the seqs get + // too big. + #[test] + fn anything_small_limits() { + fn prefixes(pattern: &str) -> Seq { + Extractor::new() + .kind(ExtractKind::Prefix) + .limit_total(10) + .extract(&parse(pattern)) + } + + fn suffixes(pattern: &str) -> Seq { + Extractor::new() + .kind(ExtractKind::Suffix) + .limit_total(10) + .extract(&parse(pattern)) + } + + fn e(pattern: &str) -> (Seq, Seq) { + (prefixes(pattern), suffixes(pattern)) + } + + assert_eq!( + ( + seq([ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb") + ]), + seq([ + I("aaa"), + I("aab"), + I("aba"), + I("abb"), + I("baa"), + I("bab"), + I("bba"), + I("bbb") + ]) + ), + e(r"[ab]{3}{3}") + ); + + assert_eq!(infinite(), e(r"ab|cd|ef|gh|ij|kl|mn|op|qr|st|uv|wx|yz")); + } + + #[test] + fn empty() { + assert_eq!(exact([""]), e(r"")); + assert_eq!(exact([""]), e(r"^")); + assert_eq!(exact([""]), e(r"$")); + assert_eq!(exact([""]), e(r"(?m:^)")); + assert_eq!(exact([""]), e(r"(?m:$)")); + assert_eq!(exact([""]), e(r"\b")); + assert_eq!(exact([""]), e(r"\B")); + assert_eq!(exact([""]), e(r"(?-u:\b)")); + assert_eq!(exact([""]), e(r"(?-u:\B)")); + } + + #[test] + fn odds_and_ends() { + assert_eq!((Seq::infinite(), seq([I("a")])), e(r".a")); + assert_eq!((seq([I("a")]), Seq::infinite()), e(r"a.")); + assert_eq!(infinite(), e(r"a|.")); + assert_eq!(infinite(), e(r".|a")); + + let pat = r"M[ou]'?am+[ae]r .*([AEae]l[- ])?[GKQ]h?[aeu]+([dtz][dhz]?)+af[iy]"; + let expected = inexact( + ["Mo'am", "Moam", "Mu'am", "Muam"].map(I), + [ + "ddafi", "ddafy", "dhafi", "dhafy", "dzafi", "dzafy", "dafi", + "dafy", "tdafi", "tdafy", "thafi", "thafy", "tzafi", "tzafy", + "tafi", "tafy", "zdafi", "zdafy", "zhafi", "zhafy", "zzafi", + "zzafy", "zafi", "zafy", + ] + .map(I), + ); + assert_eq!(expected, e(pat)); + + assert_eq!( + (seq(["fn is_", "fn as_"].map(I)), Seq::infinite()), + e(r"fn is_([A-Z]+)|fn as_([A-Z]+)"), + ); + assert_eq!( + inexact([I("foo")], [I("quux")]), + e(r"foo[A-Z]+bar[A-Z]+quux") + ); + assert_eq!(infinite(), e(r"[A-Z]+bar[A-Z]+")); + assert_eq!( + exact(["Sherlock Holmes"]), + e(r"(?m)^Sherlock Holmes|Sherlock Holmes$") + ); + + assert_eq!(exact(["sa", "sb"]), e(r"\bs(?:[ab])")); + } + + // This tests a specific regex along with some heuristic steps to reduce + // the sequences extracted. This is meant to roughly correspond to the + // types of heuristics used to shrink literal sets in practice. (Shrinking + // is done because you want to balance "spend too much work looking for + // too many literals" and "spend too much work processing false positive + // matches from short literals.") + #[test] + #[cfg(feature = "unicode-case")] + fn holmes() { + let expected = inexact( + ["HOL", "HOl", "HoL", "Hol", "hOL", "hOl", "hoL", "hol"].map(I), + [ + "MES", "MEs", "Eſ", "MeS", "Mes", "eſ", "mES", "mEs", "meS", + "mes", + ] + .map(I), + ); + let (mut prefixes, mut suffixes) = e(r"(?i)Holmes"); + prefixes.keep_first_bytes(3); + suffixes.keep_last_bytes(3); + prefixes.minimize_by_preference(); + suffixes.minimize_by_preference(); + assert_eq!(expected, (prefixes, suffixes)); + } + + // This tests that we get some kind of literals extracted for a beefier + // alternation with case insensitive mode enabled. At one point during + // development, this returned nothing, and motivated some special case + // code in Extractor::union to try and trim down the literal sequences + // if the union would blow the limits set. + #[test] + #[cfg(feature = "unicode-case")] + fn holmes_alt() { + let mut pre = + prefixes(r"(?i)Sherlock|Holmes|Watson|Irene|Adler|John|Baker"); + assert!(pre.len().unwrap() > 0); + pre.optimize_for_prefix_by_preference(); + assert!(pre.len().unwrap() > 0); + } + + // See: https://github.com/rust-lang/regex/security/advisories/GHSA-m5pq-gvj9-9vr8 + // See: CVE-2022-24713 + // + // We test this here to ensure literal extraction completes in reasonable + // time and isn't materially impacted by these sorts of pathological + // repeats. + #[test] + fn crazy_repeats() { + assert_eq!(inexact([E("")], [E("")]), e(r"(?:){4294967295}")); + assert_eq!( + inexact([E("")], [E("")]), + e(r"(?:){64}{64}{64}{64}{64}{64}") + ); + assert_eq!(inexact([E("")], [E("")]), e(r"x{0}{4294967295}")); + assert_eq!(inexact([E("")], [E("")]), e(r"(?:|){4294967295}")); + + assert_eq!( + inexact([E("")], [E("")]), + e(r"(?:){8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") + ); + let repa = "a".repeat(100); + assert_eq!( + inexact([I(&repa)], [I(&repa)]), + e(r"a{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}{8}") + ); + } + + #[test] + fn huge() { + let pat = r#"(?-u) + 2(?: + [45]\d{3}| + 7(?: + 1[0-267]| + 2[0-289]| + 3[0-29]| + 4[01]| + 5[1-3]| + 6[013]| + 7[0178]| + 91 + )| + 8(?: + 0[125]| + [139][1-6]| + 2[0157-9]| + 41| + 6[1-35]| + 7[1-5]| + 8[1-8]| + 90 + )| + 9(?: + 0[0-2]| + 1[0-4]| + 2[568]| + 3[3-6]| + 5[5-7]| + 6[0167]| + 7[15]| + 8[0146-9] + ) + )\d{4}| + 3(?: + 12?[5-7]\d{2}| + 0(?: + 2(?: + [025-79]\d| + [348]\d{1,2} + )| + 3(?: + [2-4]\d| + [56]\d? + ) + )| + 2(?: + 1\d{2}| + 2(?: + [12]\d| + [35]\d{1,2}| + 4\d? + ) + )| + 3(?: + 1\d{2}| + 2(?: + [2356]\d| + 4\d{1,2} + ) + )| + 4(?: + 1\d{2}| + 2(?: + 2\d{1,2}| + [47]| + 5\d{2} + ) + )| + 5(?: + 1\d{2}| + 29 + )| + [67]1\d{2}| + 8(?: + 1\d{2}| + 2(?: + 2\d{2}| + 3| + 4\d + ) + ) + )\d{3}| + 4(?: + 0(?: + 2(?: + [09]\d| + 7 + )| + 33\d{2} + )| + 1\d{3}| + 2(?: + 1\d{2}| + 2(?: + [25]\d?| + [348]\d| + [67]\d{1,2} + ) + )| + 3(?: + 1\d{2}(?: + \d{2} + )?| + 2(?: + [045]\d| + [236-9]\d{1,2} + )| + 32\d{2} + )| + 4(?: + [18]\d{2}| + 2(?: + [2-46]\d{2}| + 3 + )| + 5[25]\d{2} + )| + 5(?: + 1\d{2}| + 2(?: + 3\d| + 5 + ) + )| + 6(?: + [18]\d{2}| + 2(?: + 3(?: + \d{2} + )?| + [46]\d{1,2}| + 5\d{2}| + 7\d + )| + 5(?: + 3\d?| + 4\d| + [57]\d{1,2}| + 6\d{2}| + 8 + ) + )| + 71\d{2}| + 8(?: + [18]\d{2}| + 23\d{2}| + 54\d{2} + )| + 9(?: + [18]\d{2}| + 2[2-5]\d{2}| + 53\d{1,2} + ) + )\d{3}| + 5(?: + 02[03489]\d{2}| + 1\d{2}| + 2(?: + 1\d{2}| + 2(?: + 2(?: + \d{2} + )?| + [457]\d{2} + ) + )| + 3(?: + 1\d{2}| + 2(?: + [37](?: + \d{2} + )?| + [569]\d{2} + ) + )| + 4(?: + 1\d{2}| + 2[46]\d{2} + )| + 5(?: + 1\d{2}| + 26\d{1,2} + )| + 6(?: + [18]\d{2}| + 2| + 53\d{2} + )| + 7(?: + 1| + 24 + )\d{2}| + 8(?: + 1| + 26 + )\d{2}| + 91\d{2} + )\d{3}| + 6(?: + 0(?: + 1\d{2}| + 2(?: + 3\d{2}| + 4\d{1,2} + ) + )| + 2(?: + 2[2-5]\d{2}| + 5(?: + [3-5]\d{2}| + 7 + )| + 8\d{2} + )| + 3(?: + 1| + 2[3478] + )\d{2}| + 4(?: + 1| + 2[34] + )\d{2}| + 5(?: + 1| + 2[47] + )\d{2}| + 6(?: + [18]\d{2}| + 6(?: + 2(?: + 2\d| + [34]\d{2} + )| + 5(?: + [24]\d{2}| + 3\d| + 5\d{1,2} + ) + ) + )| + 72[2-5]\d{2}| + 8(?: + 1\d{2}| + 2[2-5]\d{2} + )| + 9(?: + 1\d{2}| + 2[2-6]\d{2} + ) + )\d{3}| + 7(?: + (?: + 02| + [3-589]1| + 6[12]| + 72[24] + )\d{2}| + 21\d{3}| + 32 + )\d{3}| + 8(?: + (?: + 4[12]| + [5-7]2| + 1\d? + )| + (?: + 0| + 3[12]| + [5-7]1| + 217 + )\d + )\d{4}| + 9(?: + [35]1| + (?: + [024]2| + 81 + )\d| + (?: + 1| + [24]1 + )\d{2} + )\d{3} + "#; + // TODO: This is a good candidate of a seq of literals that could be + // shrunk quite a bit and still be very productive with respect to + // literal optimizations. + let (prefixes, suffixes) = e(pat); + assert!(!suffixes.is_finite()); + assert_eq!(Some(243), prefixes.len()); + } + + #[test] + fn optimize() { + // This gets a common prefix that isn't too short. + let (p, s) = + opt(["foobarfoobar", "foobar", "foobarzfoobar", "foobarfoobar"]); + assert_eq!(seq([I("foobar")]), p); + assert_eq!(seq([I("foobar")]), s); + + // This also finds a common prefix, but since it's only one byte, it + // prefers the multiple literals. + let (p, s) = opt(["abba", "akka", "abccba"]); + assert_eq!(exact(["abba", "akka", "abccba"]), (p, s)); + + let (p, s) = opt(["sam", "samwise"]); + assert_eq!((seq([E("sam")]), seq([E("sam"), E("samwise")])), (p, s)); + + // The empty string is poisonous, so our seq becomes infinite, even + // though all literals are exact. + let (p, s) = opt(["foobarfoo", "foo", "", "foozfoo", "foofoo"]); + assert!(!p.is_finite()); + assert!(!s.is_finite()); + + // A space is also poisonous, so our seq becomes infinite. But this + // only gets triggered when we don't have a completely exact sequence. + // When the sequence is exact, spaces are okay, since we presume that + // any prefilter will match a space more quickly than the regex engine. + // (When the sequence is exact, there's a chance of the prefilter being + // used without needing the regex engine at all.) + let mut p = seq([E("foobarfoo"), I("foo"), E(" "), E("foofoo")]); + p.optimize_for_prefix_by_preference(); + assert!(!p.is_finite()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..6d57fe3fd537c42c82d07cfbe7f4ca6a9434c545 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/mod.rs @@ -0,0 +1,3873 @@ +/*! +Defines a high-level intermediate (HIR) representation for regular expressions. + +The HIR is represented by the [`Hir`] type, and it principally constructed via +[translation](translate) from an [`Ast`](crate::ast::Ast). Alternatively, users +may use the smart constructors defined on `Hir` to build their own by hand. The +smart constructors simultaneously simplify and "optimize" the HIR, and are also +the same routines used by translation. + +Most regex engines only have an HIR like this, and usually construct it +directly from the concrete syntax. This crate however first parses the +concrete syntax into an `Ast`, and only then creates the HIR from the `Ast`, +as mentioned above. It's done this way to facilitate better error reporting, +and to have a structured representation of a regex that faithfully represents +its concrete syntax. Namely, while an `Hir` value can be converted back to an +equivalent regex pattern string, it is unlikely to look like the original due +to its simplified structure. +*/ + +use core::{char, cmp}; + +use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + vec, + vec::Vec, +}; + +use crate::{ + ast::Span, + hir::interval::{Interval, IntervalSet, IntervalSetIter}, + unicode, +}; + +pub use crate::{ + hir::visitor::{visit, Visitor}, + unicode::CaseFoldError, +}; + +mod interval; +pub mod literal; +pub mod print; +pub mod translate; +mod visitor; + +/// An error that can occur while translating an `Ast` to a `Hir`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Error { + /// The kind of error. + kind: ErrorKind, + /// The original pattern that the translator's Ast was parsed from. Every + /// span in an error is a valid range into this string. + pattern: String, + /// The span of this error, derived from the Ast given to the translator. + span: Span, +} + +impl Error { + /// Return the type of this error. + pub fn kind(&self) -> &ErrorKind { + &self.kind + } + + /// The original pattern string in which this error occurred. + /// + /// Every span reported by this error is reported in terms of this string. + pub fn pattern(&self) -> &str { + &self.pattern + } + + /// Return the span at which this error occurred. + pub fn span(&self) -> &Span { + &self.span + } +} + +/// The type of an error that occurred while building an `Hir`. +/// +/// This error type is marked as `non_exhaustive`. This means that adding a +/// new variant is not considered a breaking change. +#[non_exhaustive] +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ErrorKind { + /// This error occurs when a Unicode feature is used when Unicode + /// support is disabled. For example `(?-u:\pL)` would trigger this error. + UnicodeNotAllowed, + /// This error occurs when translating a pattern that could match a byte + /// sequence that isn't UTF-8 and `utf8` was enabled. + InvalidUtf8, + /// This error occurs when one uses a non-ASCII byte for a line terminator, + /// but where Unicode mode is enabled and UTF-8 mode is disabled. + InvalidLineTerminator, + /// This occurs when an unrecognized Unicode property name could not + /// be found. + UnicodePropertyNotFound, + /// This occurs when an unrecognized Unicode property value could not + /// be found. + UnicodePropertyValueNotFound, + /// This occurs when a Unicode-aware Perl character class (`\w`, `\s` or + /// `\d`) could not be found. This can occur when the `unicode-perl` + /// crate feature is not enabled. + UnicodePerlClassNotFound, + /// This occurs when the Unicode simple case mapping tables are not + /// available, and the regular expression required Unicode aware case + /// insensitivity. + UnicodeCaseUnavailable, +} + +#[cfg(feature = "std")] +impl std::error::Error for Error {} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + crate::error::Formatter::from(self).fmt(f) + } +} + +impl core::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use self::ErrorKind::*; + + let msg = match *self { + UnicodeNotAllowed => "Unicode not allowed here", + InvalidUtf8 => "pattern can match invalid UTF-8", + InvalidLineTerminator => "invalid line terminator, must be ASCII", + UnicodePropertyNotFound => "Unicode property not found", + UnicodePropertyValueNotFound => "Unicode property value not found", + UnicodePerlClassNotFound => { + "Unicode-aware Perl class not found \ + (make sure the unicode-perl feature is enabled)" + } + UnicodeCaseUnavailable => { + "Unicode-aware case insensitivity matching is not available \ + (make sure the unicode-case feature is enabled)" + } + }; + f.write_str(msg) + } +} + +/// A high-level intermediate representation (HIR) for a regular expression. +/// +/// An HIR value is a combination of a [`HirKind`] and a set of [`Properties`]. +/// An `HirKind` indicates what kind of regular expression it is (a literal, +/// a repetition, a look-around assertion, etc.), where as a `Properties` +/// describes various facts about the regular expression. For example, whether +/// it matches UTF-8 or if it matches the empty string. +/// +/// The HIR of a regular expression represents an intermediate step between +/// its abstract syntax (a structured description of the concrete syntax) and +/// an actual regex matcher. The purpose of HIR is to make regular expressions +/// easier to analyze. In particular, the AST is much more complex than the +/// HIR. For example, while an AST supports arbitrarily nested character +/// classes, the HIR will flatten all nested classes into a single set. The HIR +/// will also "compile away" every flag present in the concrete syntax. For +/// example, users of HIR expressions never need to worry about case folding; +/// it is handled automatically by the translator (e.g., by translating +/// `(?i:A)` to `[aA]`). +/// +/// The specific type of an HIR expression can be accessed via its `kind` +/// or `into_kind` methods. This extra level of indirection exists for two +/// reasons: +/// +/// 1. Construction of an HIR expression *must* use the constructor methods on +/// this `Hir` type instead of building the `HirKind` values directly. This +/// permits construction to enforce invariants like "concatenations always +/// consist of two or more sub-expressions." +/// 2. Every HIR expression contains attributes that are defined inductively, +/// and can be computed cheaply during the construction process. For example, +/// one such attribute is whether the expression must match at the beginning of +/// the haystack. +/// +/// In particular, if you have an `HirKind` value, then there is intentionally +/// no way to build an `Hir` value from it. You instead need to do case +/// analysis on the `HirKind` value and build the `Hir` value using its smart +/// constructors. +/// +/// # UTF-8 +/// +/// If the HIR was produced by a translator with +/// [`TranslatorBuilder::utf8`](translate::TranslatorBuilder::utf8) enabled, +/// then the HIR is guaranteed to match UTF-8 exclusively for all non-empty +/// matches. +/// +/// For empty matches, those can occur at any position. It is the +/// responsibility of the regex engine to determine whether empty matches are +/// permitted between the code units of a single codepoint. +/// +/// # Stack space +/// +/// This type defines its own destructor that uses constant stack space and +/// heap space proportional to the size of the HIR. +/// +/// Also, an `Hir`'s `fmt::Display` implementation prints an HIR as a regular +/// expression pattern string, and uses constant stack space and heap space +/// proportional to the size of the `Hir`. The regex it prints is guaranteed to +/// be _semantically_ equivalent to the original concrete syntax, but it may +/// look very different. (And potentially not practically readable by a human.) +/// +/// An `Hir`'s `fmt::Debug` implementation currently does not use constant +/// stack space. The implementation will also suppress some details (such as +/// the `Properties` inlined into every `Hir` value to make it less noisy). +#[derive(Clone, Eq, PartialEq)] +pub struct Hir { + /// The underlying HIR kind. + kind: HirKind, + /// Analysis info about this HIR, computed during construction. + props: Properties, +} + +/// Methods for accessing the underlying `HirKind` and `Properties`. +impl Hir { + /// Returns a reference to the underlying HIR kind. + pub fn kind(&self) -> &HirKind { + &self.kind + } + + /// Consumes ownership of this HIR expression and returns its underlying + /// `HirKind`. + pub fn into_kind(mut self) -> HirKind { + core::mem::replace(&mut self.kind, HirKind::Empty) + } + + /// Returns the properties computed for this `Hir`. + pub fn properties(&self) -> &Properties { + &self.props + } + + /// Splits this HIR into its constituent parts. + /// + /// This is useful because `let Hir { kind, props } = hir;` does not work + /// because of `Hir`'s custom `Drop` implementation. + fn into_parts(mut self) -> (HirKind, Properties) { + ( + core::mem::replace(&mut self.kind, HirKind::Empty), + core::mem::replace(&mut self.props, Properties::empty()), + ) + } +} + +/// Smart constructors for HIR values. +/// +/// These constructors are called "smart" because they do inductive work or +/// simplifications. For example, calling `Hir::repetition` with a repetition +/// like `a{0}` will actually return a `Hir` with a `HirKind::Empty` kind +/// since it is equivalent to an empty regex. Another example is calling +/// `Hir::concat(vec![expr])`. Instead of getting a `HirKind::Concat`, you'll +/// just get back the original `expr` since it's precisely equivalent. +/// +/// Smart constructors enable maintaining invariants about the HIR data type +/// while also simultaneously keeping the representation as simple as possible. +impl Hir { + /// Returns an empty HIR expression. + /// + /// An empty HIR expression always matches, including the empty string. + #[inline] + pub fn empty() -> Hir { + let props = Properties::empty(); + Hir { kind: HirKind::Empty, props } + } + + /// Returns an HIR expression that can never match anything. That is, + /// the size of the set of strings in the language described by the HIR + /// returned is `0`. + /// + /// This is distinct from [`Hir::empty`] in that the empty string matches + /// the HIR returned by `Hir::empty`. That is, the set of strings in the + /// language describe described by `Hir::empty` is non-empty. + /// + /// Note that currently, the HIR returned uses an empty character class to + /// indicate that nothing can match. An equivalent expression that cannot + /// match is an empty alternation, but all such "fail" expressions are + /// normalized (via smart constructors) to empty character classes. This is + /// because empty character classes can be spelled in the concrete syntax + /// of a regex (e.g., `\P{any}` or `(?-u:[^\x00-\xFF])` or `[a&&b]`), but + /// empty alternations cannot. + #[inline] + pub fn fail() -> Hir { + let class = Class::Bytes(ClassBytes::empty()); + let props = Properties::class(&class); + // We can't just call Hir::class here because it defers to Hir::fail + // in order to canonicalize the Hir value used to represent "cannot + // match." + Hir { kind: HirKind::Class(class), props } + } + + /// Creates a literal HIR expression. + /// + /// This accepts anything that can be converted into a `Box<[u8]>`. + /// + /// Note that there is no mechanism for storing a `char` or a `Box` + /// in an HIR. Everything is "just bytes." Whether a `Literal` (or + /// any HIR node) matches valid UTF-8 exclusively can be queried via + /// [`Properties::is_utf8`]. + /// + /// # Example + /// + /// This example shows that concatenations of `Literal` HIR values will + /// automatically get flattened and combined together. So for example, even + /// if you concat multiple `Literal` values that are themselves not valid + /// UTF-8, they might add up to valid UTF-8. This also demonstrates just + /// how "smart" Hir's smart constructors are. + /// + /// ``` + /// use regex_syntax::hir::{Hir, HirKind, Literal}; + /// + /// let literals = vec![ + /// Hir::literal([0xE2]), + /// Hir::literal([0x98]), + /// Hir::literal([0x83]), + /// ]; + /// // Each literal, on its own, is invalid UTF-8. + /// assert!(literals.iter().all(|hir| !hir.properties().is_utf8())); + /// + /// let concat = Hir::concat(literals); + /// // But the concatenation is valid UTF-8! + /// assert!(concat.properties().is_utf8()); + /// + /// // And also notice that the literals have been concatenated into a + /// // single `Literal`, to the point where there is no explicit `Concat`! + /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); + /// assert_eq!(&expected, concat.kind()); + /// ``` + /// + /// # Example: building a literal from a `char` + /// + /// This example shows how to build a single `Hir` literal from a `char` + /// value. Since a [`Literal`] is just bytes, we just need to UTF-8 + /// encode a `char` value: + /// + /// ``` + /// use regex_syntax::hir::{Hir, HirKind, Literal}; + /// + /// let ch = '☃'; + /// let got = Hir::literal(ch.encode_utf8(&mut [0; 4]).as_bytes()); + /// + /// let expected = HirKind::Literal(Literal(Box::from("☃".as_bytes()))); + /// assert_eq!(&expected, got.kind()); + /// ``` + #[inline] + pub fn literal>>(lit: B) -> Hir { + let bytes = lit.into(); + if bytes.is_empty() { + return Hir::empty(); + } + + let lit = Literal(bytes); + let props = Properties::literal(&lit); + Hir { kind: HirKind::Literal(lit), props } + } + + /// Creates a class HIR expression. The class may either be defined over + /// ranges of Unicode codepoints or ranges of raw byte values. + /// + /// Note that an empty class is permitted. An empty class is equivalent to + /// `Hir::fail()`. + #[inline] + pub fn class(class: Class) -> Hir { + if class.is_empty() { + return Hir::fail(); + } else if let Some(bytes) = class.literal() { + return Hir::literal(bytes); + } + let props = Properties::class(&class); + Hir { kind: HirKind::Class(class), props } + } + + /// Creates a look-around assertion HIR expression. + #[inline] + pub fn look(look: Look) -> Hir { + let props = Properties::look(look); + Hir { kind: HirKind::Look(look), props } + } + + /// Creates a repetition HIR expression. + #[inline] + pub fn repetition(mut rep: Repetition) -> Hir { + // If the sub-expression of a repetition can only match the empty + // string, then we force its maximum to be at most 1. + if rep.sub.properties().maximum_len() == Some(0) { + rep.min = cmp::min(rep.min, 1); + rep.max = rep.max.map(|n| cmp::min(n, 1)).or(Some(1)); + } + // The regex 'a{0}' is always equivalent to the empty regex. This is + // true even when 'a' is an expression that never matches anything + // (like '\P{any}'). + // + // Additionally, the regex 'a{1}' is always equivalent to 'a'. + if rep.min == 0 && rep.max == Some(0) { + return Hir::empty(); + } else if rep.min == 1 && rep.max == Some(1) { + return *rep.sub; + } + let props = Properties::repetition(&rep); + Hir { kind: HirKind::Repetition(rep), props } + } + + /// Creates a capture HIR expression. + /// + /// Note that there is no explicit HIR value for a non-capturing group. + /// Since a non-capturing group only exists to override precedence in the + /// concrete syntax and since an HIR already does its own grouping based on + /// what is parsed, there is no need to explicitly represent non-capturing + /// groups in the HIR. + #[inline] + pub fn capture(capture: Capture) -> Hir { + let props = Properties::capture(&capture); + Hir { kind: HirKind::Capture(capture), props } + } + + /// Returns the concatenation of the given expressions. + /// + /// This attempts to flatten and simplify the concatenation as appropriate. + /// + /// # Example + /// + /// This shows a simple example of basic flattening of both concatenations + /// and literals. + /// + /// ``` + /// use regex_syntax::hir::Hir; + /// + /// let hir = Hir::concat(vec![ + /// Hir::concat(vec![ + /// Hir::literal([b'a']), + /// Hir::literal([b'b']), + /// Hir::literal([b'c']), + /// ]), + /// Hir::concat(vec![ + /// Hir::literal([b'x']), + /// Hir::literal([b'y']), + /// Hir::literal([b'z']), + /// ]), + /// ]); + /// let expected = Hir::literal("abcxyz".as_bytes()); + /// assert_eq!(expected, hir); + /// ``` + pub fn concat(subs: Vec) -> Hir { + // We rebuild the concatenation by simplifying it. Would be nice to do + // it in place, but that seems a little tricky? + let mut new = vec![]; + // This gobbles up any adjacent literals in a concatenation and smushes + // them together. Basically, when we see a literal, we add its bytes + // to 'prior_lit', and whenever we see anything else, we first take + // any bytes in 'prior_lit' and add it to the 'new' concatenation. + let mut prior_lit: Option> = None; + for sub in subs { + let (kind, props) = sub.into_parts(); + match kind { + HirKind::Literal(Literal(bytes)) => { + if let Some(ref mut prior_bytes) = prior_lit { + prior_bytes.extend_from_slice(&bytes); + } else { + prior_lit = Some(bytes.to_vec()); + } + } + // We also flatten concats that are direct children of another + // concat. We only need to do this one level deep since + // Hir::concat is the only way to build concatenations, and so + // flattening happens inductively. + HirKind::Concat(subs2) => { + for sub2 in subs2 { + let (kind2, props2) = sub2.into_parts(); + match kind2 { + HirKind::Literal(Literal(bytes)) => { + if let Some(ref mut prior_bytes) = prior_lit { + prior_bytes.extend_from_slice(&bytes); + } else { + prior_lit = Some(bytes.to_vec()); + } + } + kind2 => { + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + new.push(Hir { kind: kind2, props: props2 }); + } + } + } + } + // We can just skip empty HIRs. + HirKind::Empty => {} + kind => { + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + new.push(Hir { kind, props }); + } + } + } + if let Some(prior_bytes) = prior_lit.take() { + new.push(Hir::literal(prior_bytes)); + } + if new.is_empty() { + return Hir::empty(); + } else if new.len() == 1 { + return new.pop().unwrap(); + } + let props = Properties::concat(&new); + Hir { kind: HirKind::Concat(new), props } + } + + /// Returns the alternation of the given expressions. + /// + /// This flattens and simplifies the alternation as appropriate. This may + /// include factoring out common prefixes or even rewriting the alternation + /// as a character class. + /// + /// Note that an empty alternation is equivalent to `Hir::fail()`. (It + /// is not possible for one to write an empty alternation, or even an + /// alternation with a single sub-expression, in the concrete syntax of a + /// regex.) + /// + /// # Example + /// + /// This is a simple example showing how an alternation might get + /// simplified. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; + /// + /// let hir = Hir::alternation(vec![ + /// Hir::literal([b'a']), + /// Hir::literal([b'b']), + /// Hir::literal([b'c']), + /// Hir::literal([b'd']), + /// Hir::literal([b'e']), + /// Hir::literal([b'f']), + /// ]); + /// let expected = Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'f'), + /// ]))); + /// assert_eq!(expected, hir); + /// ``` + /// + /// And another example showing how common prefixes might get factored + /// out. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Class, ClassUnicode, ClassUnicodeRange}; + /// + /// let hir = Hir::alternation(vec![ + /// Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('A', 'Z'), + /// ]))), + /// ]), + /// Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'z'), + /// ]))), + /// ]), + /// ]); + /// let expected = Hir::concat(vec![ + /// Hir::literal("abc".as_bytes()), + /// Hir::alternation(vec![ + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('A', 'Z'), + /// ]))), + /// Hir::class(Class::Unicode(ClassUnicode::new([ + /// ClassUnicodeRange::new('a', 'z'), + /// ]))), + /// ]), + /// ]); + /// assert_eq!(expected, hir); + /// ``` + /// + /// Note that these sorts of simplifications are not guaranteed. + pub fn alternation(subs: Vec) -> Hir { + // We rebuild the alternation by simplifying it. We proceed similarly + // as the concatenation case. But in this case, there's no literal + // simplification happening. We're just flattening alternations. + let mut new = Vec::with_capacity(subs.len()); + for sub in subs { + let (kind, props) = sub.into_parts(); + match kind { + HirKind::Alternation(subs2) => { + new.extend(subs2); + } + kind => { + new.push(Hir { kind, props }); + } + } + } + if new.is_empty() { + return Hir::fail(); + } else if new.len() == 1 { + return new.pop().unwrap(); + } + // Now that it's completely flattened, look for the special case of + // 'char1|char2|...|charN' and collapse that into a class. Note that + // we look for 'char' first and then bytes. The issue here is that if + // we find both non-ASCII codepoints and non-ASCII singleton bytes, + // then it isn't actually possible to smush them into a single class. + // (Because classes are either "all codepoints" or "all bytes." You + // can have a class that both matches non-ASCII but valid UTF-8 and + // invalid UTF-8.) So we look for all chars and then all bytes, and + // don't handle anything else. + if let Some(singletons) = singleton_chars(&new) { + let it = singletons + .into_iter() + .map(|ch| ClassUnicodeRange { start: ch, end: ch }); + return Hir::class(Class::Unicode(ClassUnicode::new(it))); + } + if let Some(singletons) = singleton_bytes(&new) { + let it = singletons + .into_iter() + .map(|b| ClassBytesRange { start: b, end: b }); + return Hir::class(Class::Bytes(ClassBytes::new(it))); + } + // Similar to singleton chars, we can also look for alternations of + // classes. Those can be smushed into a single class. + if let Some(cls) = class_chars(&new) { + return Hir::class(cls); + } + if let Some(cls) = class_bytes(&new) { + return Hir::class(cls); + } + // Factor out a common prefix if we can, which might potentially + // simplify the expression and unlock other optimizations downstream. + // It also might generally make NFA matching and DFA construction + // faster by reducing the scope of branching in the regex. + new = match lift_common_prefix(new) { + Ok(hir) => return hir, + Err(unchanged) => unchanged, + }; + let props = Properties::alternation(&new); + Hir { kind: HirKind::Alternation(new), props } + } + + /// Returns an HIR expression for `.`. + /// + /// * [`Dot::AnyChar`] maps to `(?su-R:.)`. + /// * [`Dot::AnyByte`] maps to `(?s-Ru:.)`. + /// * [`Dot::AnyCharExceptLF`] maps to `(?u-Rs:.)`. + /// * [`Dot::AnyCharExceptCRLF`] maps to `(?Ru-s:.)`. + /// * [`Dot::AnyByteExceptLF`] maps to `(?-Rsu:.)`. + /// * [`Dot::AnyByteExceptCRLF`] maps to `(?R-su:.)`. + /// + /// # Example + /// + /// Note that this is a convenience routine for constructing the correct + /// character class based on the value of `Dot`. There is no explicit "dot" + /// HIR value. It is just an abbreviation for a common character class. + /// + /// ``` + /// use regex_syntax::hir::{Hir, Dot, Class, ClassBytes, ClassBytesRange}; + /// + /// let hir = Hir::dot(Dot::AnyByte); + /// let expected = Hir::class(Class::Bytes(ClassBytes::new([ + /// ClassBytesRange::new(0x00, 0xFF), + /// ]))); + /// assert_eq!(expected, hir); + /// ``` + #[inline] + pub fn dot(dot: Dot) -> Hir { + match dot { + Dot::AnyChar => Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\u{10FFFF}'), + ]))), + Dot::AnyByte => Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\xFF'), + ]))), + Dot::AnyCharExcept(ch) => { + let mut cls = + ClassUnicode::new([ClassUnicodeRange::new(ch, ch)]); + cls.negate(); + Hir::class(Class::Unicode(cls)) + } + Dot::AnyCharExceptLF => { + Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\x09'), + ClassUnicodeRange::new('\x0B', '\u{10FFFF}'), + ]))) + } + Dot::AnyCharExceptCRLF => { + Hir::class(Class::Unicode(ClassUnicode::new([ + ClassUnicodeRange::new('\0', '\x09'), + ClassUnicodeRange::new('\x0B', '\x0C'), + ClassUnicodeRange::new('\x0E', '\u{10FFFF}'), + ]))) + } + Dot::AnyByteExcept(byte) => { + let mut cls = + ClassBytes::new([ClassBytesRange::new(byte, byte)]); + cls.negate(); + Hir::class(Class::Bytes(cls)) + } + Dot::AnyByteExceptLF => { + Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\x09'), + ClassBytesRange::new(b'\x0B', b'\xFF'), + ]))) + } + Dot::AnyByteExceptCRLF => { + Hir::class(Class::Bytes(ClassBytes::new([ + ClassBytesRange::new(b'\0', b'\x09'), + ClassBytesRange::new(b'\x0B', b'\x0C'), + ClassBytesRange::new(b'\x0E', b'\xFF'), + ]))) + } + } + } +} + +/// The underlying kind of an arbitrary [`Hir`] expression. +/// +/// An `HirKind` is principally useful for doing case analysis on the type +/// of a regular expression. If you're looking to build new `Hir` values, +/// then you _must_ use the smart constructors defined on `Hir`, like +/// [`Hir::repetition`], to build new `Hir` values. The API intentionally does +/// not expose any way of building an `Hir` directly from an `HirKind`. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum HirKind { + /// The empty regular expression, which matches everything, including the + /// empty string. + Empty, + /// A literal string that matches exactly these bytes. + Literal(Literal), + /// A single character class that matches any of the characters in the + /// class. A class can either consist of Unicode scalar values as + /// characters, or it can use bytes. + /// + /// A class may be empty. In which case, it matches nothing. + Class(Class), + /// A look-around assertion. A look-around match always has zero length. + Look(Look), + /// A repetition operation applied to a sub-expression. + Repetition(Repetition), + /// A capturing group, which contains a sub-expression. + Capture(Capture), + /// A concatenation of expressions. + /// + /// A concatenation matches only if each of its sub-expressions match one + /// after the other. + /// + /// Concatenations are guaranteed by `Hir`'s smart constructors to always + /// have at least two sub-expressions. + Concat(Vec), + /// An alternation of expressions. + /// + /// An alternation matches only if at least one of its sub-expressions + /// match. If multiple sub-expressions match, then the leftmost is + /// preferred. + /// + /// Alternations are guaranteed by `Hir`'s smart constructors to always + /// have at least two sub-expressions. + Alternation(Vec), +} + +impl HirKind { + /// Returns a slice of this kind's sub-expressions, if any. + pub fn subs(&self) -> &[Hir] { + use core::slice::from_ref; + + match *self { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => &[], + HirKind::Repetition(Repetition { ref sub, .. }) => from_ref(sub), + HirKind::Capture(Capture { ref sub, .. }) => from_ref(sub), + HirKind::Concat(ref subs) => subs, + HirKind::Alternation(ref subs) => subs, + } + } +} + +impl core::fmt::Debug for Hir { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + self.kind.fmt(f) + } +} + +/// Print a display representation of this Hir. +/// +/// The result of this is a valid regular expression pattern string. +/// +/// This implementation uses constant stack space and heap space proportional +/// to the size of the `Hir`. +impl core::fmt::Display for Hir { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + crate::hir::print::Printer::new().print(self, f) + } +} + +/// The high-level intermediate representation of a literal. +/// +/// A literal corresponds to `0` or more bytes that should be matched +/// literally. The smart constructors defined on `Hir` will automatically +/// concatenate adjacent literals into one literal, and will even automatically +/// replace empty literals with `Hir::empty()`. +/// +/// Note that despite a literal being represented by a sequence of bytes, its +/// `Debug` implementation will attempt to print it as a normal string. (That +/// is, not a sequence of decimal numbers.) +#[derive(Clone, Eq, PartialEq)] +pub struct Literal(pub Box<[u8]>); + +impl core::fmt::Debug for Literal { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + crate::debug::Bytes(&self.0).fmt(f) + } +} + +/// The high-level intermediate representation of a character class. +/// +/// A character class corresponds to a set of characters. A character is either +/// defined by a Unicode scalar value or a byte. +/// +/// A character class, regardless of its character type, is represented by a +/// sequence of non-overlapping non-adjacent ranges of characters. +/// +/// There are no guarantees about which class variant is used. Generally +/// speaking, the Unicode variant is used whenever a class needs to contain +/// non-ASCII Unicode scalar values. But the Unicode variant can be used even +/// when Unicode mode is disabled. For example, at the time of writing, the +/// regex `(?-u:a|\xc2\xa0)` will compile down to HIR for the Unicode class +/// `[a\u00A0]` due to optimizations. +/// +/// Note that `Bytes` variant may be produced even when it exclusively matches +/// valid UTF-8. This is because a `Bytes` variant represents an intention by +/// the author of the regular expression to disable Unicode mode, which in turn +/// impacts the semantics of case insensitive matching. For example, `(?i)k` +/// and `(?i-u)k` will not match the same set of strings. +#[derive(Clone, Eq, PartialEq)] +pub enum Class { + /// A set of characters represented by Unicode scalar values. + Unicode(ClassUnicode), + /// A set of characters represented by arbitrary bytes (one byte per + /// character). + Bytes(ClassBytes), +} + +impl Class { + /// Apply Unicode simple case folding to this character class, in place. + /// The character class will be expanded to include all simple case folded + /// character variants. + /// + /// If this is a byte oriented character class, then this will be limited + /// to the ASCII ranges `A-Z` and `a-z`. + /// + /// # Panics + /// + /// This routine panics when the case mapping data necessary for this + /// routine to complete is unavailable. This occurs when the `unicode-case` + /// feature is not enabled and the underlying class is Unicode oriented. + /// + /// Callers should prefer using `try_case_fold_simple` instead, which will + /// return an error instead of panicking. + pub fn case_fold_simple(&mut self) { + match *self { + Class::Unicode(ref mut x) => x.case_fold_simple(), + Class::Bytes(ref mut x) => x.case_fold_simple(), + } + } + + /// Apply Unicode simple case folding to this character class, in place. + /// The character class will be expanded to include all simple case folded + /// character variants. + /// + /// If this is a byte oriented character class, then this will be limited + /// to the ASCII ranges `A-Z` and `a-z`. + /// + /// # Error + /// + /// This routine returns an error when the case mapping data necessary + /// for this routine to complete is unavailable. This occurs when the + /// `unicode-case` feature is not enabled and the underlying class is + /// Unicode oriented. + pub fn try_case_fold_simple( + &mut self, + ) -> core::result::Result<(), CaseFoldError> { + match *self { + Class::Unicode(ref mut x) => x.try_case_fold_simple()?, + Class::Bytes(ref mut x) => x.case_fold_simple(), + } + Ok(()) + } + + /// Negate this character class in place. + /// + /// After completion, this character class will contain precisely the + /// characters that weren't previously in the class. + pub fn negate(&mut self) { + match *self { + Class::Unicode(ref mut x) => x.negate(), + Class::Bytes(ref mut x) => x.negate(), + } + } + + /// Returns true if and only if this character class will only ever match + /// valid UTF-8. + /// + /// A character class can match invalid UTF-8 only when the following + /// conditions are met: + /// + /// 1. The translator was configured to permit generating an expression + /// that can match invalid UTF-8. (By default, this is disabled.) + /// 2. Unicode mode (via the `u` flag) was disabled either in the concrete + /// syntax or in the parser builder. By default, Unicode mode is + /// enabled. + pub fn is_utf8(&self) -> bool { + match *self { + Class::Unicode(_) => true, + Class::Bytes(ref x) => x.is_ascii(), + } + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// For non-empty byte oriented classes, this always returns `1`. For + /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or + /// `4`. For empty classes, `None` is returned. It is impossible for `0` to + /// be returned. + /// + /// # Example + /// + /// This example shows some examples of regexes and their corresponding + /// minimum length, if any. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// // The empty string has a min length of 0. + /// let hir = parse(r"")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // As do other types of regexes that only match the empty string. + /// let hir = parse(r"^$\b\B")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // A regex that can match the empty string but match more is still 0. + /// let hir = parse(r"a*")?; + /// assert_eq!(Some(0), hir.properties().minimum_len()); + /// // A regex that matches nothing has no minimum defined. + /// let hir = parse(r"[a&&b]")?; + /// assert_eq!(None, hir.properties().minimum_len()); + /// // Character classes usually have a minimum length of 1. + /// let hir = parse(r"\w")?; + /// assert_eq!(Some(1), hir.properties().minimum_len()); + /// // But sometimes Unicode classes might be bigger! + /// let hir = parse(r"\p{Cyrillic}")?; + /// assert_eq!(Some(2), hir.properties().minimum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn minimum_len(&self) -> Option { + match *self { + Class::Unicode(ref x) => x.minimum_len(), + Class::Bytes(ref x) => x.minimum_len(), + } + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// For non-empty byte oriented classes, this always returns `1`. For + /// non-empty Unicode oriented classes, this can return `1`, `2`, `3` or + /// `4`. For empty classes, `None` is returned. It is impossible for `0` to + /// be returned. + /// + /// # Example + /// + /// This example shows some examples of regexes and their corresponding + /// maximum length, if any. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// // The empty string has a max length of 0. + /// let hir = parse(r"")?; + /// assert_eq!(Some(0), hir.properties().maximum_len()); + /// // As do other types of regexes that only match the empty string. + /// let hir = parse(r"^$\b\B")?; + /// assert_eq!(Some(0), hir.properties().maximum_len()); + /// // A regex that matches nothing has no maximum defined. + /// let hir = parse(r"[a&&b]")?; + /// assert_eq!(None, hir.properties().maximum_len()); + /// // Bounded repeats work as you expect. + /// let hir = parse(r"x{2,10}")?; + /// assert_eq!(Some(10), hir.properties().maximum_len()); + /// // An unbounded repeat means there is no maximum. + /// let hir = parse(r"x{2,}")?; + /// assert_eq!(None, hir.properties().maximum_len()); + /// // With Unicode enabled, \w can match up to 4 bytes! + /// let hir = parse(r"\w")?; + /// assert_eq!(Some(4), hir.properties().maximum_len()); + /// // Without Unicode enabled, \w matches at most 1 byte. + /// let hir = parse(r"(?-u)\w")?; + /// assert_eq!(Some(1), hir.properties().maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn maximum_len(&self) -> Option { + match *self { + Class::Unicode(ref x) => x.maximum_len(), + Class::Bytes(ref x) => x.maximum_len(), + } + } + + /// Returns true if and only if this character class is empty. That is, + /// it has no elements. + /// + /// An empty character can never match anything, including an empty string. + pub fn is_empty(&self) -> bool { + match *self { + Class::Unicode(ref x) => x.ranges().is_empty(), + Class::Bytes(ref x) => x.ranges().is_empty(), + } + } + + /// If this class consists of exactly one element (whether a codepoint or a + /// byte), then return it as a literal byte string. + /// + /// If this class is empty or contains more than one element, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + match *self { + Class::Unicode(ref x) => x.literal(), + Class::Bytes(ref x) => x.literal(), + } + } +} + +impl core::fmt::Debug for Class { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + use crate::debug::Byte; + + let mut fmter = f.debug_set(); + match *self { + Class::Unicode(ref cls) => { + for r in cls.ranges().iter() { + fmter.entry(&(r.start..=r.end)); + } + } + Class::Bytes(ref cls) => { + for r in cls.ranges().iter() { + fmter.entry(&(Byte(r.start)..=Byte(r.end))); + } + } + } + fmter.finish() + } +} + +/// A set of characters represented by Unicode scalar values. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ClassUnicode { + set: IntervalSet, +} + +impl ClassUnicode { + /// Create a new class from a sequence of ranges. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. Ranges will automatically be sorted into a canonical + /// non-overlapping order. + pub fn new(ranges: I) -> ClassUnicode + where + I: IntoIterator, + { + ClassUnicode { set: IntervalSet::new(ranges) } + } + + /// Create a new class with no ranges. + /// + /// An empty class matches nothing. That is, it is equivalent to + /// [`Hir::fail`]. + pub fn empty() -> ClassUnicode { + ClassUnicode::new(vec![]) + } + + /// Add a new range to this set. + pub fn push(&mut self, range: ClassUnicodeRange) { + self.set.push(range); + } + + /// Return an iterator over all ranges in this class. + /// + /// The iterator yields ranges in ascending order. + pub fn iter(&self) -> ClassUnicodeIter<'_> { + ClassUnicodeIter(self.set.iter()) + } + + /// Return the underlying ranges as a slice. + pub fn ranges(&self) -> &[ClassUnicodeRange] { + self.set.intervals() + } + + /// Expand this character class such that it contains all case folded + /// characters, according to Unicode's "simple" mapping. For example, if + /// this class consists of the range `a-z`, then applying case folding will + /// result in the class containing both the ranges `a-z` and `A-Z`. + /// + /// # Panics + /// + /// This routine panics when the case mapping data necessary for this + /// routine to complete is unavailable. This occurs when the `unicode-case` + /// feature is not enabled. + /// + /// Callers should prefer using `try_case_fold_simple` instead, which will + /// return an error instead of panicking. + pub fn case_fold_simple(&mut self) { + self.set + .case_fold_simple() + .expect("unicode-case feature must be enabled"); + } + + /// Expand this character class such that it contains all case folded + /// characters, according to Unicode's "simple" mapping. For example, if + /// this class consists of the range `a-z`, then applying case folding will + /// result in the class containing both the ranges `a-z` and `A-Z`. + /// + /// # Error + /// + /// This routine returns an error when the case mapping data necessary + /// for this routine to complete is unavailable. This occurs when the + /// `unicode-case` feature is not enabled. + pub fn try_case_fold_simple( + &mut self, + ) -> core::result::Result<(), CaseFoldError> { + self.set.case_fold_simple() + } + + /// Negate this character class. + /// + /// For all `c` where `c` is a Unicode scalar value, if `c` was in this + /// set, then it will not be in this set after negation. + pub fn negate(&mut self) { + self.set.negate(); + } + + /// Union this character class with the given character class, in place. + pub fn union(&mut self, other: &ClassUnicode) { + self.set.union(&other.set); + } + + /// Intersect this character class with the given character class, in + /// place. + pub fn intersect(&mut self, other: &ClassUnicode) { + self.set.intersect(&other.set); + } + + /// Subtract the given character class from this character class, in place. + pub fn difference(&mut self, other: &ClassUnicode) { + self.set.difference(&other.set); + } + + /// Compute the symmetric difference of the given character classes, in + /// place. + /// + /// This computes the symmetric difference of two character classes. This + /// removes all elements in this class that are also in the given class, + /// but all adds all elements from the given class that aren't in this + /// class. That is, the class will contain all elements in either class, + /// but will not contain any elements that are in both classes. + pub fn symmetric_difference(&mut self, other: &ClassUnicode) { + self.set.symmetric_difference(&other.set); + } + + /// Returns true if and only if this character class will either match + /// nothing or only ASCII bytes. Stated differently, this returns false + /// if and only if this class contains a non-ASCII codepoint. + pub fn is_ascii(&self) -> bool { + self.set.intervals().last().map_or(true, |r| r.end <= '\x7F') + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn minimum_len(&self) -> Option { + let first = self.ranges().get(0)?; + // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). + Some(first.start.len_utf8()) + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn maximum_len(&self) -> Option { + let last = self.ranges().last()?; + // Correct because c1 < c2 implies c1.len_utf8() < c2.len_utf8(). + Some(last.end.len_utf8()) + } + + /// If this class consists of exactly one codepoint, then return it as + /// a literal byte string. + /// + /// If this class is empty or contains more than one codepoint, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + let rs = self.ranges(); + if rs.len() == 1 && rs[0].start == rs[0].end { + Some(rs[0].start.encode_utf8(&mut [0; 4]).to_string().into_bytes()) + } else { + None + } + } + + /// If this class consists of only ASCII ranges, then return its + /// corresponding and equivalent byte class. + pub fn to_byte_class(&self) -> Option { + if !self.is_ascii() { + return None; + } + Some(ClassBytes::new(self.ranges().iter().map(|r| { + // Since we are guaranteed that our codepoint range is ASCII, the + // 'u8::try_from' calls below are guaranteed to be correct. + ClassBytesRange { + start: u8::try_from(r.start).unwrap(), + end: u8::try_from(r.end).unwrap(), + } + }))) + } +} + +/// An iterator over all ranges in a Unicode character class. +/// +/// The lifetime `'a` refers to the lifetime of the underlying class. +#[derive(Debug)] +pub struct ClassUnicodeIter<'a>(IntervalSetIter<'a, ClassUnicodeRange>); + +impl<'a> Iterator for ClassUnicodeIter<'a> { + type Item = &'a ClassUnicodeRange; + + fn next(&mut self) -> Option<&'a ClassUnicodeRange> { + self.0.next() + } +} + +/// A single range of characters represented by Unicode scalar values. +/// +/// The range is closed. That is, the start and end of the range are included +/// in the range. +#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct ClassUnicodeRange { + start: char, + end: char, +} + +impl core::fmt::Debug for ClassUnicodeRange { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let start = if !self.start.is_whitespace() && !self.start.is_control() + { + self.start.to_string() + } else { + format!("0x{:X}", u32::from(self.start)) + }; + let end = if !self.end.is_whitespace() && !self.end.is_control() { + self.end.to_string() + } else { + format!("0x{:X}", u32::from(self.end)) + }; + f.debug_struct("ClassUnicodeRange") + .field("start", &start) + .field("end", &end) + .finish() + } +} + +impl Interval for ClassUnicodeRange { + type Bound = char; + + #[inline] + fn lower(&self) -> char { + self.start + } + #[inline] + fn upper(&self) -> char { + self.end + } + #[inline] + fn set_lower(&mut self, bound: char) { + self.start = bound; + } + #[inline] + fn set_upper(&mut self, bound: char) { + self.end = bound; + } + + /// Apply simple case folding to this Unicode scalar value range. + /// + /// Additional ranges are appended to the given vector. Canonical ordering + /// is *not* maintained in the given vector. + fn case_fold_simple( + &self, + ranges: &mut Vec, + ) -> Result<(), unicode::CaseFoldError> { + let mut folder = unicode::SimpleCaseFolder::new()?; + if !folder.overlaps(self.start, self.end) { + return Ok(()); + } + let (start, end) = (u32::from(self.start), u32::from(self.end)); + for cp in (start..=end).filter_map(char::from_u32) { + for &cp_folded in folder.mapping(cp) { + ranges.push(ClassUnicodeRange::new(cp_folded, cp_folded)); + } + } + Ok(()) + } +} + +impl ClassUnicodeRange { + /// Create a new Unicode scalar value range for a character class. + /// + /// The returned range is always in a canonical form. That is, the range + /// returned always satisfies the invariant that `start <= end`. + pub fn new(start: char, end: char) -> ClassUnicodeRange { + ClassUnicodeRange::create(start, end) + } + + /// Return the start of this range. + /// + /// The start of a range is always less than or equal to the end of the + /// range. + pub fn start(&self) -> char { + self.start + } + + /// Return the end of this range. + /// + /// The end of a range is always greater than or equal to the start of the + /// range. + pub fn end(&self) -> char { + self.end + } + + /// Returns the number of codepoints in this range. + pub fn len(&self) -> usize { + let diff = 1 + u32::from(self.end) - u32::from(self.start); + // This is likely to panic in 16-bit targets since a usize can only fit + // 2^16. It's not clear what to do here, other than to return an error + // when building a Unicode class that contains a range whose length + // overflows usize. (Which, to be honest, is probably quite common on + // 16-bit targets. For example, this would imply that '.' and '\p{any}' + // would be impossible to build.) + usize::try_from(diff).expect("char class len fits in usize") + } +} + +/// A set of characters represented by arbitrary bytes. +/// +/// Each byte corresponds to one character. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ClassBytes { + set: IntervalSet, +} + +impl ClassBytes { + /// Create a new class from a sequence of ranges. + /// + /// The given ranges do not need to be in any specific order, and ranges + /// may overlap. Ranges will automatically be sorted into a canonical + /// non-overlapping order. + pub fn new(ranges: I) -> ClassBytes + where + I: IntoIterator, + { + ClassBytes { set: IntervalSet::new(ranges) } + } + + /// Create a new class with no ranges. + /// + /// An empty class matches nothing. That is, it is equivalent to + /// [`Hir::fail`]. + pub fn empty() -> ClassBytes { + ClassBytes::new(vec![]) + } + + /// Add a new range to this set. + pub fn push(&mut self, range: ClassBytesRange) { + self.set.push(range); + } + + /// Return an iterator over all ranges in this class. + /// + /// The iterator yields ranges in ascending order. + pub fn iter(&self) -> ClassBytesIter<'_> { + ClassBytesIter(self.set.iter()) + } + + /// Return the underlying ranges as a slice. + pub fn ranges(&self) -> &[ClassBytesRange] { + self.set.intervals() + } + + /// Expand this character class such that it contains all case folded + /// characters. For example, if this class consists of the range `a-z`, + /// then applying case folding will result in the class containing both the + /// ranges `a-z` and `A-Z`. + /// + /// Note that this only applies ASCII case folding, which is limited to the + /// characters `a-z` and `A-Z`. + pub fn case_fold_simple(&mut self) { + self.set.case_fold_simple().expect("ASCII case folding never fails"); + } + + /// Negate this byte class. + /// + /// For all `b` where `b` is a any byte, if `b` was in this set, then it + /// will not be in this set after negation. + pub fn negate(&mut self) { + self.set.negate(); + } + + /// Union this byte class with the given byte class, in place. + pub fn union(&mut self, other: &ClassBytes) { + self.set.union(&other.set); + } + + /// Intersect this byte class with the given byte class, in place. + pub fn intersect(&mut self, other: &ClassBytes) { + self.set.intersect(&other.set); + } + + /// Subtract the given byte class from this byte class, in place. + pub fn difference(&mut self, other: &ClassBytes) { + self.set.difference(&other.set); + } + + /// Compute the symmetric difference of the given byte classes, in place. + /// + /// This computes the symmetric difference of two byte classes. This + /// removes all elements in this class that are also in the given class, + /// but all adds all elements from the given class that aren't in this + /// class. That is, the class will contain all elements in either class, + /// but will not contain any elements that are in both classes. + pub fn symmetric_difference(&mut self, other: &ClassBytes) { + self.set.symmetric_difference(&other.set); + } + + /// Returns true if and only if this character class will either match + /// nothing or only ASCII bytes. Stated differently, this returns false + /// if and only if this class contains a non-ASCII byte. + pub fn is_ascii(&self) -> bool { + self.set.intervals().last().map_or(true, |r| r.end <= 0x7F) + } + + /// Returns the length, in bytes, of the smallest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn minimum_len(&self) -> Option { + if self.ranges().is_empty() { + None + } else { + Some(1) + } + } + + /// Returns the length, in bytes, of the longest string matched by this + /// character class. + /// + /// Returns `None` when the class is empty. + pub fn maximum_len(&self) -> Option { + if self.ranges().is_empty() { + None + } else { + Some(1) + } + } + + /// If this class consists of exactly one byte, then return it as + /// a literal byte string. + /// + /// If this class is empty or contains more than one byte, then `None` + /// is returned. + pub fn literal(&self) -> Option> { + let rs = self.ranges(); + if rs.len() == 1 && rs[0].start == rs[0].end { + Some(vec![rs[0].start]) + } else { + None + } + } + + /// If this class consists of only ASCII ranges, then return its + /// corresponding and equivalent Unicode class. + pub fn to_unicode_class(&self) -> Option { + if !self.is_ascii() { + return None; + } + Some(ClassUnicode::new(self.ranges().iter().map(|r| { + // Since we are guaranteed that our byte range is ASCII, the + // 'char::from' calls below are correct and will not erroneously + // convert a raw byte value into its corresponding codepoint. + ClassUnicodeRange { + start: char::from(r.start), + end: char::from(r.end), + } + }))) + } +} + +/// An iterator over all ranges in a byte character class. +/// +/// The lifetime `'a` refers to the lifetime of the underlying class. +#[derive(Debug)] +pub struct ClassBytesIter<'a>(IntervalSetIter<'a, ClassBytesRange>); + +impl<'a> Iterator for ClassBytesIter<'a> { + type Item = &'a ClassBytesRange; + + fn next(&mut self) -> Option<&'a ClassBytesRange> { + self.0.next() + } +} + +/// A single range of characters represented by arbitrary bytes. +/// +/// The range is closed. That is, the start and end of the range are included +/// in the range. +#[derive(Clone, Copy, Default, Eq, PartialEq, PartialOrd, Ord)] +pub struct ClassBytesRange { + start: u8, + end: u8, +} + +impl Interval for ClassBytesRange { + type Bound = u8; + + #[inline] + fn lower(&self) -> u8 { + self.start + } + #[inline] + fn upper(&self) -> u8 { + self.end + } + #[inline] + fn set_lower(&mut self, bound: u8) { + self.start = bound; + } + #[inline] + fn set_upper(&mut self, bound: u8) { + self.end = bound; + } + + /// Apply simple case folding to this byte range. Only ASCII case mappings + /// (for a-z) are applied. + /// + /// Additional ranges are appended to the given vector. Canonical ordering + /// is *not* maintained in the given vector. + fn case_fold_simple( + &self, + ranges: &mut Vec, + ) -> Result<(), unicode::CaseFoldError> { + if !ClassBytesRange::new(b'a', b'z').is_intersection_empty(self) { + let lower = cmp::max(self.start, b'a'); + let upper = cmp::min(self.end, b'z'); + ranges.push(ClassBytesRange::new(lower - 32, upper - 32)); + } + if !ClassBytesRange::new(b'A', b'Z').is_intersection_empty(self) { + let lower = cmp::max(self.start, b'A'); + let upper = cmp::min(self.end, b'Z'); + ranges.push(ClassBytesRange::new(lower + 32, upper + 32)); + } + Ok(()) + } +} + +impl ClassBytesRange { + /// Create a new byte range for a character class. + /// + /// The returned range is always in a canonical form. That is, the range + /// returned always satisfies the invariant that `start <= end`. + pub fn new(start: u8, end: u8) -> ClassBytesRange { + ClassBytesRange::create(start, end) + } + + /// Return the start of this range. + /// + /// The start of a range is always less than or equal to the end of the + /// range. + pub fn start(&self) -> u8 { + self.start + } + + /// Return the end of this range. + /// + /// The end of a range is always greater than or equal to the start of the + /// range. + pub fn end(&self) -> u8 { + self.end + } + + /// Returns the number of bytes in this range. + pub fn len(&self) -> usize { + usize::from(self.end.checked_sub(self.start).unwrap()) + .checked_add(1) + .unwrap() + } +} + +impl core::fmt::Debug for ClassBytesRange { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ClassBytesRange") + .field("start", &crate::debug::Byte(self.start)) + .field("end", &crate::debug::Byte(self.end)) + .finish() + } +} + +/// The high-level intermediate representation for a look-around assertion. +/// +/// An assertion match is always zero-length. Also called an "empty match." +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Look { + /// Match the beginning of text. Specifically, this matches at the starting + /// position of the input. + Start = 1 << 0, + /// Match the end of text. Specifically, this matches at the ending + /// position of the input. + End = 1 << 1, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following a `\n` character. + StartLF = 1 << 2, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\n` character. + EndLF = 1 << 3, + /// Match the beginning of a line or the beginning of text. Specifically, + /// this matches at the starting position of the input, or at the position + /// immediately following either a `\r` or `\n` character, but never after + /// a `\r` when a `\n` follows. + StartCRLF = 1 << 4, + /// Match the end of a line or the end of text. Specifically, this matches + /// at the end position of the input, or at the position immediately + /// preceding a `\r` or `\n` character, but never before a `\n` when a `\r` + /// precedes it. + EndCRLF = 1 << 5, + /// Match an ASCII-only word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordAscii = 1 << 6, + /// Match an ASCII-only negation of a word boundary. + WordAsciiNegate = 1 << 7, + /// Match a Unicode-aware word boundary. That is, this matches a position + /// where the left adjacent character and right adjacent character + /// correspond to a word and non-word or a non-word and word character. + WordUnicode = 1 << 8, + /// Match a Unicode-aware negation of a word boundary. + WordUnicodeNegate = 1 << 9, + /// Match the start of an ASCII-only word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartAscii = 1 << 10, + /// Match the end of an ASCII-only word boundary. That is, this matches + /// a position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndAscii = 1 << 11, + /// Match the start of a Unicode word boundary. That is, this matches a + /// position at either the beginning of the haystack or where the previous + /// character is not a word character and the following character is a word + /// character. + WordStartUnicode = 1 << 12, + /// Match the end of a Unicode word boundary. That is, this matches a + /// position at either the end of the haystack or where the previous + /// character is a word character and the following character is not a word + /// character. + WordEndUnicode = 1 << 13, + /// Match the start half of an ASCII-only word boundary. That is, this + /// matches a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfAscii = 1 << 14, + /// Match the end half of an ASCII-only word boundary. That is, this + /// matches a position at either the end of the haystack or where the + /// following character is not a word character. + WordEndHalfAscii = 1 << 15, + /// Match the start half of a Unicode word boundary. That is, this matches + /// a position at either the beginning of the haystack or where the + /// previous character is not a word character. + WordStartHalfUnicode = 1 << 16, + /// Match the end half of a Unicode word boundary. That is, this matches + /// a position at either the end of the haystack or where the following + /// character is not a word character. + WordEndHalfUnicode = 1 << 17, +} + +impl Look { + /// Flip the look-around assertion to its equivalent for reverse searches. + /// For example, `StartLF` gets translated to `EndLF`. + /// + /// Some assertions, such as `WordUnicode`, remain the same since they + /// match the same positions regardless of the direction of the search. + #[inline] + pub const fn reversed(self) -> Look { + match self { + Look::Start => Look::End, + Look::End => Look::Start, + Look::StartLF => Look::EndLF, + Look::EndLF => Look::StartLF, + Look::StartCRLF => Look::EndCRLF, + Look::EndCRLF => Look::StartCRLF, + Look::WordAscii => Look::WordAscii, + Look::WordAsciiNegate => Look::WordAsciiNegate, + Look::WordUnicode => Look::WordUnicode, + Look::WordUnicodeNegate => Look::WordUnicodeNegate, + Look::WordStartAscii => Look::WordEndAscii, + Look::WordEndAscii => Look::WordStartAscii, + Look::WordStartUnicode => Look::WordEndUnicode, + Look::WordEndUnicode => Look::WordStartUnicode, + Look::WordStartHalfAscii => Look::WordEndHalfAscii, + Look::WordEndHalfAscii => Look::WordStartHalfAscii, + Look::WordStartHalfUnicode => Look::WordEndHalfUnicode, + Look::WordEndHalfUnicode => Look::WordStartHalfUnicode, + } + } + + /// Return the underlying representation of this look-around enumeration + /// as an integer. Giving the return value to the [`Look::from_repr`] + /// constructor is guaranteed to return the same look-around variant that + /// one started with within a semver compatible release of this crate. + #[inline] + pub const fn as_repr(self) -> u32 { + // AFAIK, 'as' is the only way to zero-cost convert an int enum to an + // actual int. + self as u32 + } + + /// Given the underlying representation of a `Look` value, return the + /// corresponding `Look` value if the representation is valid. Otherwise + /// `None` is returned. + #[inline] + pub const fn from_repr(repr: u32) -> Option { + match repr { + 0b00_0000_0000_0000_0001 => Some(Look::Start), + 0b00_0000_0000_0000_0010 => Some(Look::End), + 0b00_0000_0000_0000_0100 => Some(Look::StartLF), + 0b00_0000_0000_0000_1000 => Some(Look::EndLF), + 0b00_0000_0000_0001_0000 => Some(Look::StartCRLF), + 0b00_0000_0000_0010_0000 => Some(Look::EndCRLF), + 0b00_0000_0000_0100_0000 => Some(Look::WordAscii), + 0b00_0000_0000_1000_0000 => Some(Look::WordAsciiNegate), + 0b00_0000_0001_0000_0000 => Some(Look::WordUnicode), + 0b00_0000_0010_0000_0000 => Some(Look::WordUnicodeNegate), + 0b00_0000_0100_0000_0000 => Some(Look::WordStartAscii), + 0b00_0000_1000_0000_0000 => Some(Look::WordEndAscii), + 0b00_0001_0000_0000_0000 => Some(Look::WordStartUnicode), + 0b00_0010_0000_0000_0000 => Some(Look::WordEndUnicode), + 0b00_0100_0000_0000_0000 => Some(Look::WordStartHalfAscii), + 0b00_1000_0000_0000_0000 => Some(Look::WordEndHalfAscii), + 0b01_0000_0000_0000_0000 => Some(Look::WordStartHalfUnicode), + 0b10_0000_0000_0000_0000 => Some(Look::WordEndHalfUnicode), + _ => None, + } + } + + /// Returns a convenient single codepoint representation of this + /// look-around assertion. Each assertion is guaranteed to be represented + /// by a distinct character. + /// + /// This is useful for succinctly representing a look-around assertion in + /// human friendly but succinct output intended for a programmer working on + /// regex internals. + #[inline] + pub const fn as_char(self) -> char { + match self { + Look::Start => 'A', + Look::End => 'z', + Look::StartLF => '^', + Look::EndLF => '$', + Look::StartCRLF => 'r', + Look::EndCRLF => 'R', + Look::WordAscii => 'b', + Look::WordAsciiNegate => 'B', + Look::WordUnicode => '𝛃', + Look::WordUnicodeNegate => '𝚩', + Look::WordStartAscii => '<', + Look::WordEndAscii => '>', + Look::WordStartUnicode => '〈', + Look::WordEndUnicode => '〉', + Look::WordStartHalfAscii => '◁', + Look::WordEndHalfAscii => '▷', + Look::WordStartHalfUnicode => '◀', + Look::WordEndHalfUnicode => '▶', + } + } +} + +/// The high-level intermediate representation for a capturing group. +/// +/// A capturing group always has an index and a child expression. It may +/// also have a name associated with it (e.g., `(?P\w)`), but it's not +/// necessary. +/// +/// Note that there is no explicit representation of a non-capturing group +/// in a `Hir`. Instead, non-capturing grouping is handled automatically by +/// the recursive structure of the `Hir` itself. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Capture { + /// The capture index of the capture. + pub index: u32, + /// The name of the capture, if it exists. + pub name: Option>, + /// The expression inside the capturing group, which may be empty. + pub sub: Box, +} + +/// The high-level intermediate representation of a repetition operator. +/// +/// A repetition operator permits the repetition of an arbitrary +/// sub-expression. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Repetition { + /// The minimum range of the repetition. + /// + /// Note that special cases like `?`, `+` and `*` all get translated into + /// the ranges `{0,1}`, `{1,}` and `{0,}`, respectively. + /// + /// When `min` is zero, this expression can match the empty string + /// regardless of what its sub-expression is. + pub min: u32, + /// The maximum range of the repetition. + /// + /// Note that when `max` is `None`, `min` acts as a lower bound but where + /// there is no upper bound. For something like `x{5}` where the min and + /// max are equivalent, `min` will be set to `5` and `max` will be set to + /// `Some(5)`. + pub max: Option, + /// Whether this repetition operator is greedy or not. A greedy operator + /// will match as much as it can. A non-greedy operator will match as + /// little as it can. + /// + /// Typically, operators are greedy by default and are only non-greedy when + /// a `?` suffix is used, e.g., `(expr)*` is greedy while `(expr)*?` is + /// not. However, this can be inverted via the `U` "ungreedy" flag. + pub greedy: bool, + /// The expression being repeated. + pub sub: Box, +} + +impl Repetition { + /// Returns a new repetition with the same `min`, `max` and `greedy` + /// values, but with its sub-expression replaced with the one given. + pub fn with(&self, sub: Hir) -> Repetition { + Repetition { + min: self.min, + max: self.max, + greedy: self.greedy, + sub: Box::new(sub), + } + } +} + +/// A type describing the different flavors of `.`. +/// +/// This type is meant to be used with [`Hir::dot`], which is a convenience +/// routine for building HIR values derived from the `.` regex. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Dot { + /// Matches the UTF-8 encoding of any Unicode scalar value. + /// + /// This is equivalent to `(?su:.)` and also `\p{any}`. + AnyChar, + /// Matches any byte value. + /// + /// This is equivalent to `(?s-u:.)` and also `(?-u:[\x00-\xFF])`. + AnyByte, + /// Matches the UTF-8 encoding of any Unicode scalar value except for the + /// `char` given. + /// + /// This is equivalent to using `(?u-s:.)` with the line terminator set + /// to a particular ASCII byte. (Because of peculiarities in the regex + /// engines, a line terminator must be a single byte. It follows that when + /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar + /// value. That is, ti must be ASCII.) + /// + /// (This and `AnyCharExceptLF` both exist because of legacy reasons. + /// `AnyCharExceptLF` will be dropped in the next breaking change release.) + AnyCharExcept(char), + /// Matches the UTF-8 encoding of any Unicode scalar value except for `\n`. + /// + /// This is equivalent to `(?u-s:.)` and also `[\p{any}--\n]`. + AnyCharExceptLF, + /// Matches the UTF-8 encoding of any Unicode scalar value except for `\r` + /// and `\n`. + /// + /// This is equivalent to `(?uR-s:.)` and also `[\p{any}--\r\n]`. + AnyCharExceptCRLF, + /// Matches any byte value except for the `u8` given. + /// + /// This is equivalent to using `(?-us:.)` with the line terminator set + /// to a particular ASCII byte. (Because of peculiarities in the regex + /// engines, a line terminator must be a single byte. It follows that when + /// UTF-8 mode is enabled, this single byte must also be a Unicode scalar + /// value. That is, ti must be ASCII.) + /// + /// (This and `AnyByteExceptLF` both exist because of legacy reasons. + /// `AnyByteExceptLF` will be dropped in the next breaking change release.) + AnyByteExcept(u8), + /// Matches any byte value except for `\n`. + /// + /// This is equivalent to `(?-su:.)` and also `(?-u:[[\x00-\xFF]--\n])`. + AnyByteExceptLF, + /// Matches any byte value except for `\r` and `\n`. + /// + /// This is equivalent to `(?R-su:.)` and also `(?-u:[[\x00-\xFF]--\r\n])`. + AnyByteExceptCRLF, +} + +/// A custom `Drop` impl is used for `HirKind` such that it uses constant stack +/// space but heap space proportional to the depth of the total `Hir`. +impl Drop for Hir { + fn drop(&mut self) { + use core::mem; + + match *self.kind() { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => return, + HirKind::Capture(ref x) if x.sub.kind.subs().is_empty() => return, + HirKind::Repetition(ref x) if x.sub.kind.subs().is_empty() => { + return + } + HirKind::Concat(ref x) if x.is_empty() => return, + HirKind::Alternation(ref x) if x.is_empty() => return, + _ => {} + } + + let mut stack = vec![mem::replace(self, Hir::empty())]; + while let Some(mut expr) = stack.pop() { + match expr.kind { + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => {} + HirKind::Capture(ref mut x) => { + stack.push(mem::replace(&mut x.sub, Hir::empty())); + } + HirKind::Repetition(ref mut x) => { + stack.push(mem::replace(&mut x.sub, Hir::empty())); + } + HirKind::Concat(ref mut x) => { + stack.extend(x.drain(..)); + } + HirKind::Alternation(ref mut x) => { + stack.extend(x.drain(..)); + } + } + } + } +} + +/// A type that collects various properties of an HIR value. +/// +/// Properties are always scalar values and represent meta data that is +/// computed inductively on an HIR value. Properties are defined for all +/// HIR values. +/// +/// All methods on a `Properties` value take constant time and are meant to +/// be cheap to call. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Properties(Box); + +/// The property definition. It is split out so that we can box it, and +/// there by make `Properties` use less stack size. This is kind-of important +/// because every HIR value has a `Properties` attached to it. +/// +/// This does have the unfortunate consequence that creating any HIR value +/// always leads to at least one alloc for properties, but this is generally +/// true anyway (for pretty much all HirKinds except for look-arounds). +#[derive(Clone, Debug, Eq, PartialEq)] +struct PropertiesI { + minimum_len: Option, + maximum_len: Option, + look_set: LookSet, + look_set_prefix: LookSet, + look_set_suffix: LookSet, + look_set_prefix_any: LookSet, + look_set_suffix_any: LookSet, + utf8: bool, + explicit_captures_len: usize, + static_explicit_captures_len: Option, + literal: bool, + alternation_literal: bool, +} + +impl Properties { + /// Returns the length (in bytes) of the smallest string matched by this + /// HIR. + /// + /// A return value of `0` is possible and occurs when the HIR can match an + /// empty string. + /// + /// `None` is returned when there is no minimum length. This occurs in + /// precisely the cases where the HIR matches nothing. i.e., The language + /// the regex matches is empty. An example of such a regex is `\P{any}`. + #[inline] + pub fn minimum_len(&self) -> Option { + self.0.minimum_len + } + + /// Returns the length (in bytes) of the longest string matched by this + /// HIR. + /// + /// A return value of `0` is possible and occurs when nothing longer than + /// the empty string is in the language described by this HIR. + /// + /// `None` is returned when there is no longest matching string. This + /// occurs when the HIR matches nothing or when there is no upper bound on + /// the length of matching strings. Example of such regexes are `\P{any}` + /// (matches nothing) and `a+` (has no upper bound). + #[inline] + pub fn maximum_len(&self) -> Option { + self.0.maximum_len + } + + /// Returns a set of all look-around assertions that appear at least once + /// in this HIR value. + #[inline] + pub fn look_set(&self) -> LookSet { + self.0.look_set + } + + /// Returns a set of all look-around assertions that appear as a prefix for + /// this HIR value. That is, the set returned corresponds to the set of + /// assertions that must be passed before matching any bytes in a haystack. + /// + /// For example, `hir.look_set_prefix().contains(Look::Start)` returns true + /// if and only if the HIR is fully anchored at the start. + #[inline] + pub fn look_set_prefix(&self) -> LookSet { + self.0.look_set_prefix + } + + /// Returns a set of all look-around assertions that appear as a _possible_ + /// prefix for this HIR value. That is, the set returned corresponds to the + /// set of assertions that _may_ be passed before matching any bytes in a + /// haystack. + /// + /// For example, `hir.look_set_prefix_any().contains(Look::Start)` returns + /// true if and only if it's possible for the regex to match through a + /// anchored assertion before consuming any input. + #[inline] + pub fn look_set_prefix_any(&self) -> LookSet { + self.0.look_set_prefix_any + } + + /// Returns a set of all look-around assertions that appear as a suffix for + /// this HIR value. That is, the set returned corresponds to the set of + /// assertions that must be passed in order to be considered a match after + /// all other consuming HIR expressions. + /// + /// For example, `hir.look_set_suffix().contains(Look::End)` returns true + /// if and only if the HIR is fully anchored at the end. + #[inline] + pub fn look_set_suffix(&self) -> LookSet { + self.0.look_set_suffix + } + + /// Returns a set of all look-around assertions that appear as a _possible_ + /// suffix for this HIR value. That is, the set returned corresponds to the + /// set of assertions that _may_ be passed before matching any bytes in a + /// haystack. + /// + /// For example, `hir.look_set_suffix_any().contains(Look::End)` returns + /// true if and only if it's possible for the regex to match through a + /// anchored assertion at the end of a match without consuming any input. + #[inline] + pub fn look_set_suffix_any(&self) -> LookSet { + self.0.look_set_suffix_any + } + + /// Return true if and only if the corresponding HIR will always match + /// valid UTF-8. + /// + /// When this returns false, then it is possible for this HIR expression to + /// match invalid UTF-8, including by matching between the code units of + /// a single UTF-8 encoded codepoint. + /// + /// Note that this returns true even when the corresponding HIR can match + /// the empty string. Since an empty string can technically appear between + /// UTF-8 code units, it is possible for a match to be reported that splits + /// a codepoint which could in turn be considered matching invalid UTF-8. + /// However, it is generally assumed that such empty matches are handled + /// specially by the search routine if it is absolutely required that + /// matches not split a codepoint. + /// + /// # Example + /// + /// This code example shows the UTF-8 property of a variety of patterns. + /// + /// ``` + /// use regex_syntax::{ParserBuilder, parse}; + /// + /// // Examples of 'is_utf8() == true'. + /// assert!(parse(r"a")?.properties().is_utf8()); + /// assert!(parse(r"[^a]")?.properties().is_utf8()); + /// assert!(parse(r".")?.properties().is_utf8()); + /// assert!(parse(r"\W")?.properties().is_utf8()); + /// assert!(parse(r"\b")?.properties().is_utf8()); + /// assert!(parse(r"\B")?.properties().is_utf8()); + /// assert!(parse(r"(?-u)\b")?.properties().is_utf8()); + /// assert!(parse(r"(?-u)\B")?.properties().is_utf8()); + /// // Unicode mode is enabled by default, and in + /// // that mode, all \x hex escapes are treated as + /// // codepoints. So this actually matches the UTF-8 + /// // encoding of U+00FF. + /// assert!(parse(r"\xFF")?.properties().is_utf8()); + /// + /// // Now we show examples of 'is_utf8() == false'. + /// // The only way to do this is to force the parser + /// // to permit invalid UTF-8, otherwise all of these + /// // would fail to parse! + /// let parse = |pattern| { + /// ParserBuilder::new().utf8(false).build().parse(pattern) + /// }; + /// assert!(!parse(r"(?-u)[^a]")?.properties().is_utf8()); + /// assert!(!parse(r"(?-u).")?.properties().is_utf8()); + /// assert!(!parse(r"(?-u)\W")?.properties().is_utf8()); + /// // Conversely to the equivalent example above, + /// // when Unicode mode is disabled, \x hex escapes + /// // are treated as their raw byte values. + /// assert!(!parse(r"(?-u)\xFF")?.properties().is_utf8()); + /// // Note that just because we disabled UTF-8 in the + /// // parser doesn't mean we still can't use Unicode. + /// // It is enabled by default, so \xFF is still + /// // equivalent to matching the UTF-8 encoding of + /// // U+00FF by default. + /// assert!(parse(r"\xFF")?.properties().is_utf8()); + /// // Even though we use raw bytes that individually + /// // are not valid UTF-8, when combined together, the + /// // overall expression *does* match valid UTF-8! + /// assert!(parse(r"(?-u)\xE2\x98\x83")?.properties().is_utf8()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn is_utf8(&self) -> bool { + self.0.utf8 + } + + /// Returns the total number of explicit capturing groups in the + /// corresponding HIR. + /// + /// Note that this does not include the implicit capturing group + /// corresponding to the entire match that is typically included by regex + /// engines. + /// + /// # Example + /// + /// This method will return `0` for `a` and `1` for `(a)`: + /// + /// ``` + /// use regex_syntax::parse; + /// + /// assert_eq!(0, parse("a")?.properties().explicit_captures_len()); + /// assert_eq!(1, parse("(a)")?.properties().explicit_captures_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn explicit_captures_len(&self) -> usize { + self.0.explicit_captures_len + } + + /// Returns the total number of explicit capturing groups that appear in + /// every possible match. + /// + /// If the number of capture groups can vary depending on the match, then + /// this returns `None`. That is, a value is only returned when the number + /// of matching groups is invariant or "static." + /// + /// Note that this does not include the implicit capturing group + /// corresponding to the entire match. + /// + /// # Example + /// + /// This shows a few cases where a static number of capture groups is + /// available and a few cases where it is not. + /// + /// ``` + /// use regex_syntax::parse; + /// + /// let len = |pattern| { + /// parse(pattern).map(|h| { + /// h.properties().static_explicit_captures_len() + /// }) + /// }; + /// + /// assert_eq!(Some(0), len("a")?); + /// assert_eq!(Some(1), len("(a)")?); + /// assert_eq!(Some(1), len("(a)|(b)")?); + /// assert_eq!(Some(2), len("(a)(b)|(c)(d)")?); + /// assert_eq!(None, len("(a)|b")?); + /// assert_eq!(None, len("a|(b)")?); + /// assert_eq!(None, len("(b)*")?); + /// assert_eq!(Some(1), len("(b)+")?); + /// + /// # Ok::<(), Box>(()) + /// ``` + #[inline] + pub fn static_explicit_captures_len(&self) -> Option { + self.0.static_explicit_captures_len + } + + /// Return true if and only if this HIR is a simple literal. This is + /// only true when this HIR expression is either itself a `Literal` or a + /// concatenation of only `Literal`s. + /// + /// For example, `f` and `foo` are literals, but `f+`, `(foo)`, `foo()` and + /// the empty string are not (even though they contain sub-expressions that + /// are literals). + #[inline] + pub fn is_literal(&self) -> bool { + self.0.literal + } + + /// Return true if and only if this HIR is either a simple literal or an + /// alternation of simple literals. This is only + /// true when this HIR expression is either itself a `Literal` or a + /// concatenation of only `Literal`s or an alternation of only `Literal`s. + /// + /// For example, `f`, `foo`, `a|b|c`, and `foo|bar|baz` are alternation + /// literals, but `f+`, `(foo)`, `foo()`, and the empty pattern are not + /// (even though that contain sub-expressions that are literals). + #[inline] + pub fn is_alternation_literal(&self) -> bool { + self.0.alternation_literal + } + + /// Returns the total amount of heap memory usage, in bytes, used by this + /// `Properties` value. + #[inline] + pub fn memory_usage(&self) -> usize { + core::mem::size_of::() + } + + /// Returns a new set of properties that corresponds to the union of the + /// iterator of properties given. + /// + /// This is useful when one has multiple `Hir` expressions and wants + /// to combine them into a single alternation without constructing the + /// corresponding `Hir`. This routine provides a way of combining the + /// properties of each `Hir` expression into one set of properties + /// representing the union of those expressions. + /// + /// # Example: union with HIRs that never match + /// + /// This example shows that unioning properties together with one that + /// represents a regex that never matches will "poison" certain attributes, + /// like the minimum and maximum lengths. + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// let hir1 = parse("ab?c?")?; + /// assert_eq!(Some(1), hir1.properties().minimum_len()); + /// assert_eq!(Some(3), hir1.properties().maximum_len()); + /// + /// let hir2 = parse(r"[a&&b]")?; + /// assert_eq!(None, hir2.properties().minimum_len()); + /// assert_eq!(None, hir2.properties().maximum_len()); + /// + /// let hir3 = parse(r"wxy?z?")?; + /// assert_eq!(Some(2), hir3.properties().minimum_len()); + /// assert_eq!(Some(4), hir3.properties().maximum_len()); + /// + /// let unioned = Properties::union([ + /// hir1.properties(), + /// hir2.properties(), + /// hir3.properties(), + /// ]); + /// assert_eq!(None, unioned.minimum_len()); + /// assert_eq!(None, unioned.maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + /// + /// The maximum length can also be "poisoned" by a pattern that has no + /// upper bound on the length of a match. The minimum length remains + /// unaffected: + /// + /// ``` + /// use regex_syntax::{hir::Properties, parse}; + /// + /// let hir1 = parse("ab?c?")?; + /// assert_eq!(Some(1), hir1.properties().minimum_len()); + /// assert_eq!(Some(3), hir1.properties().maximum_len()); + /// + /// let hir2 = parse(r"a+")?; + /// assert_eq!(Some(1), hir2.properties().minimum_len()); + /// assert_eq!(None, hir2.properties().maximum_len()); + /// + /// let hir3 = parse(r"wxy?z?")?; + /// assert_eq!(Some(2), hir3.properties().minimum_len()); + /// assert_eq!(Some(4), hir3.properties().maximum_len()); + /// + /// let unioned = Properties::union([ + /// hir1.properties(), + /// hir2.properties(), + /// hir3.properties(), + /// ]); + /// assert_eq!(Some(1), unioned.minimum_len()); + /// assert_eq!(None, unioned.maximum_len()); + /// + /// # Ok::<(), Box>(()) + /// ``` + pub fn union(props: I) -> Properties + where + I: IntoIterator, + P: core::borrow::Borrow, + { + let mut it = props.into_iter().peekable(); + // While empty alternations aren't possible, we still behave as if they + // are. When we have an empty alternate, then clearly the look-around + // prefix and suffix is empty. Otherwise, it is the intersection of all + // prefixes and suffixes (respectively) of the branches. + let fix = if it.peek().is_none() { + LookSet::empty() + } else { + LookSet::full() + }; + // And also, an empty alternate means we have 0 static capture groups, + // but we otherwise start with the number corresponding to the first + // alternate. If any subsequent alternate has a different number of + // static capture groups, then we overall have a variation and not a + // static number of groups. + let static_explicit_captures_len = + it.peek().and_then(|p| p.borrow().static_explicit_captures_len()); + // The base case is an empty alternation, which matches nothing. + // Note though that empty alternations aren't possible, because the + // Hir::alternation smart constructor rewrites those as empty character + // classes. + let mut props = PropertiesI { + minimum_len: None, + maximum_len: None, + look_set: LookSet::empty(), + look_set_prefix: fix, + look_set_suffix: fix, + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len, + literal: false, + alternation_literal: true, + }; + let (mut min_poisoned, mut max_poisoned) = (false, false); + // Handle properties that need to visit every child hir. + for prop in it { + let p = prop.borrow(); + props.look_set.set_union(p.look_set()); + props.look_set_prefix.set_intersect(p.look_set_prefix()); + props.look_set_suffix.set_intersect(p.look_set_suffix()); + props.look_set_prefix_any.set_union(p.look_set_prefix_any()); + props.look_set_suffix_any.set_union(p.look_set_suffix_any()); + props.utf8 = props.utf8 && p.is_utf8(); + props.explicit_captures_len = props + .explicit_captures_len + .saturating_add(p.explicit_captures_len()); + if props.static_explicit_captures_len + != p.static_explicit_captures_len() + { + props.static_explicit_captures_len = None; + } + props.alternation_literal = + props.alternation_literal && p.is_literal(); + if !min_poisoned { + if let Some(xmin) = p.minimum_len() { + if props.minimum_len.map_or(true, |pmin| xmin < pmin) { + props.minimum_len = Some(xmin); + } + } else { + props.minimum_len = None; + min_poisoned = true; + } + } + if !max_poisoned { + if let Some(xmax) = p.maximum_len() { + if props.maximum_len.map_or(true, |pmax| xmax > pmax) { + props.maximum_len = Some(xmax); + } + } else { + props.maximum_len = None; + max_poisoned = true; + } + } + } + Properties(Box::new(props)) + } +} + +impl Properties { + /// Create a new set of HIR properties for an empty regex. + fn empty() -> Properties { + let inner = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + // It is debatable whether an empty regex always matches at valid + // UTF-8 boundaries. Strictly speaking, at a byte oriented view, + // it is clearly false. There are, for example, many empty strings + // between the bytes encoding a '☃'. + // + // However, when Unicode mode is enabled, the fundamental atom + // of matching is really a codepoint. And in that scenario, an + // empty regex is defined to only match at valid UTF-8 boundaries + // and to never split a codepoint. It just so happens that this + // enforcement is somewhat tricky to do for regexes that match + // the empty string inside regex engines themselves. It usually + // requires some layer above the regex engine to filter out such + // matches. + // + // In any case, 'true' is really the only coherent option. If it + // were false, for example, then 'a*' would also need to be false + // since it too can match the empty string. + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a literal regex. + fn literal(lit: &Literal) -> Properties { + let inner = PropertiesI { + minimum_len: Some(lit.0.len()), + maximum_len: Some(lit.0.len()), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: core::str::from_utf8(&lit.0).is_ok(), + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: true, + alternation_literal: true, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a character class. + fn class(class: &Class) -> Properties { + let inner = PropertiesI { + minimum_len: class.minimum_len(), + maximum_len: class.maximum_len(), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: class.is_utf8(), + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a look-around assertion. + fn look(look: Look) -> Properties { + let inner = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::singleton(look), + look_set_prefix: LookSet::singleton(look), + look_set_suffix: LookSet::singleton(look), + look_set_prefix_any: LookSet::singleton(look), + look_set_suffix_any: LookSet::singleton(look), + // This requires a little explanation. Basically, we don't consider + // matching an empty string to be equivalent to matching invalid + // UTF-8, even though technically matching every empty string will + // split the UTF-8 encoding of a single codepoint when treating a + // UTF-8 encoded string as a sequence of bytes. Our defense here is + // that in such a case, a codepoint should logically be treated as + // the fundamental atom for matching, and thus the only valid match + // points are between codepoints and not bytes. + // + // More practically, this is true here because it's also true + // for 'Hir::empty()', otherwise something like 'a*' would be + // considered to match invalid UTF-8. That in turn makes this + // property borderline useless. + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: false, + alternation_literal: false, + }; + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a repetition. + fn repetition(rep: &Repetition) -> Properties { + let p = rep.sub.properties(); + let minimum_len = p.minimum_len().map(|child_min| { + let rep_min = usize::try_from(rep.min).unwrap_or(usize::MAX); + child_min.saturating_mul(rep_min) + }); + let maximum_len = rep.max.and_then(|rep_max| { + let rep_max = usize::try_from(rep_max).ok()?; + let child_max = p.maximum_len()?; + child_max.checked_mul(rep_max) + }); + + let mut inner = PropertiesI { + minimum_len, + maximum_len, + look_set: p.look_set(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: p.look_set_prefix_any(), + look_set_suffix_any: p.look_set_suffix_any(), + utf8: p.is_utf8(), + explicit_captures_len: p.explicit_captures_len(), + static_explicit_captures_len: p.static_explicit_captures_len(), + literal: false, + alternation_literal: false, + }; + // If the repetition operator can match the empty string, then its + // lookset prefix and suffixes themselves remain empty since they are + // no longer required to match. + if rep.min > 0 { + inner.look_set_prefix = p.look_set_prefix(); + inner.look_set_suffix = p.look_set_suffix(); + } + // If the static captures len of the sub-expression is not known or + // is greater than zero, then it automatically propagates to the + // repetition, regardless of the repetition. Otherwise, it might + // change, but only when the repetition can match 0 times. + if rep.min == 0 + && inner.static_explicit_captures_len.map_or(false, |len| len > 0) + { + // If we require a match 0 times, then our captures len is + // guaranteed to be zero. Otherwise, if we *can* match the empty + // string, then it's impossible to know how many captures will be + // in the resulting match. + if rep.max == Some(0) { + inner.static_explicit_captures_len = Some(0); + } else { + inner.static_explicit_captures_len = None; + } + } + Properties(Box::new(inner)) + } + + /// Create a new set of HIR properties for a capture. + fn capture(capture: &Capture) -> Properties { + let p = capture.sub.properties(); + Properties(Box::new(PropertiesI { + explicit_captures_len: p.explicit_captures_len().saturating_add(1), + static_explicit_captures_len: p + .static_explicit_captures_len() + .map(|len| len.saturating_add(1)), + literal: false, + alternation_literal: false, + ..*p.0.clone() + })) + } + + /// Create a new set of HIR properties for a concatenation. + fn concat(concat: &[Hir]) -> Properties { + // The base case is an empty concatenation, which matches the empty + // string. Note though that empty concatenations aren't possible, + // because the Hir::concat smart constructor rewrites those as + // Hir::empty. + let mut props = PropertiesI { + minimum_len: Some(0), + maximum_len: Some(0), + look_set: LookSet::empty(), + look_set_prefix: LookSet::empty(), + look_set_suffix: LookSet::empty(), + look_set_prefix_any: LookSet::empty(), + look_set_suffix_any: LookSet::empty(), + utf8: true, + explicit_captures_len: 0, + static_explicit_captures_len: Some(0), + literal: true, + alternation_literal: true, + }; + // Handle properties that need to visit every child hir. + for x in concat.iter() { + let p = x.properties(); + props.look_set.set_union(p.look_set()); + props.utf8 = props.utf8 && p.is_utf8(); + props.explicit_captures_len = props + .explicit_captures_len + .saturating_add(p.explicit_captures_len()); + props.static_explicit_captures_len = p + .static_explicit_captures_len() + .and_then(|len1| { + Some((len1, props.static_explicit_captures_len?)) + }) + .and_then(|(len1, len2)| Some(len1.saturating_add(len2))); + props.literal = props.literal && p.is_literal(); + props.alternation_literal = + props.alternation_literal && p.is_alternation_literal(); + if let Some(minimum_len) = props.minimum_len { + match p.minimum_len() { + None => props.minimum_len = None, + Some(len) => { + // We use saturating arithmetic here because the + // minimum is just a lower bound. We can't go any + // higher than what our number types permit. + props.minimum_len = + Some(minimum_len.saturating_add(len)); + } + } + } + if let Some(maximum_len) = props.maximum_len { + match p.maximum_len() { + None => props.maximum_len = None, + Some(len) => { + props.maximum_len = maximum_len.checked_add(len) + } + } + } + } + // Handle the prefix properties, which only requires visiting + // child exprs until one matches more than the empty string. + let mut it = concat.iter(); + while let Some(x) = it.next() { + props.look_set_prefix.set_union(x.properties().look_set_prefix()); + props + .look_set_prefix_any + .set_union(x.properties().look_set_prefix_any()); + if x.properties().maximum_len().map_or(true, |x| x > 0) { + break; + } + } + // Same thing for the suffix properties, but in reverse. + let mut it = concat.iter().rev(); + while let Some(x) = it.next() { + props.look_set_suffix.set_union(x.properties().look_set_suffix()); + props + .look_set_suffix_any + .set_union(x.properties().look_set_suffix_any()); + if x.properties().maximum_len().map_or(true, |x| x > 0) { + break; + } + } + Properties(Box::new(props)) + } + + /// Create a new set of HIR properties for a concatenation. + fn alternation(alts: &[Hir]) -> Properties { + Properties::union(alts.iter().map(|hir| hir.properties())) + } +} + +/// A set of look-around assertions. +/// +/// This is useful for efficiently tracking look-around assertions. For +/// example, an [`Hir`] provides properties that return `LookSet`s. +#[derive(Clone, Copy, Default, Eq, PartialEq)] +pub struct LookSet { + /// The underlying representation this set is exposed to make it possible + /// to store it somewhere efficiently. The representation is that + /// of a bitset, where each assertion occupies bit `i` where `i = + /// Look::as_repr()`. + /// + /// Note that users of this internal representation must permit the full + /// range of `u16` values to be represented. For example, even if the + /// current implementation only makes use of the 10 least significant bits, + /// it may use more bits in a future semver compatible release. + pub bits: u32, +} + +impl LookSet { + /// Create an empty set of look-around assertions. + #[inline] + pub fn empty() -> LookSet { + LookSet { bits: 0 } + } + + /// Create a full set of look-around assertions. + /// + /// This set contains all possible look-around assertions. + #[inline] + pub fn full() -> LookSet { + LookSet { bits: !0 } + } + + /// Create a look-around set containing the look-around assertion given. + /// + /// This is a convenience routine for creating an empty set and inserting + /// one look-around assertions. + #[inline] + pub fn singleton(look: Look) -> LookSet { + LookSet::empty().insert(look) + } + + /// Returns the total number of look-around assertions in this set. + #[inline] + pub fn len(self) -> usize { + // OK because max value always fits in a u8, which in turn always + // fits in a usize, regardless of target. + usize::try_from(self.bits.count_ones()).unwrap() + } + + /// Returns true if and only if this set is empty. + #[inline] + pub fn is_empty(self) -> bool { + self.len() == 0 + } + + /// Returns true if and only if the given look-around assertion is in this + /// set. + #[inline] + pub fn contains(self, look: Look) -> bool { + self.bits & look.as_repr() != 0 + } + + /// Returns true if and only if this set contains any anchor assertions. + /// This includes both "start/end of haystack" and "start/end of line." + #[inline] + pub fn contains_anchor(&self) -> bool { + self.contains_anchor_haystack() || self.contains_anchor_line() + } + + /// Returns true if and only if this set contains any "start/end of + /// haystack" anchors. This doesn't include "start/end of line" anchors. + #[inline] + pub fn contains_anchor_haystack(&self) -> bool { + self.contains(Look::Start) || self.contains(Look::End) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors. This doesn't include "start/end of haystack" anchors. This + /// includes both `\n` line anchors and CRLF (`\r\n`) aware line anchors. + #[inline] + pub fn contains_anchor_line(&self) -> bool { + self.contains(Look::StartLF) + || self.contains(Look::EndLF) + || self.contains(Look::StartCRLF) + || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that only treat `\n` as line terminators. This does not include + /// haystack anchors or CRLF aware line anchors. + #[inline] + pub fn contains_anchor_lf(&self) -> bool { + self.contains(Look::StartLF) || self.contains(Look::EndLF) + } + + /// Returns true if and only if this set contains any "start/end of line" + /// anchors that are CRLF-aware. This doesn't include "start/end of + /// haystack" or "start/end of line-feed" anchors. + #[inline] + pub fn contains_anchor_crlf(&self) -> bool { + self.contains(Look::StartCRLF) || self.contains(Look::EndCRLF) + } + + /// Returns true if and only if this set contains any word boundary or + /// negated word boundary assertions. This include both Unicode and ASCII + /// word boundaries. + #[inline] + pub fn contains_word(self) -> bool { + self.contains_word_unicode() || self.contains_word_ascii() + } + + /// Returns true if and only if this set contains any Unicode word boundary + /// or negated Unicode word boundary assertions. + #[inline] + pub fn contains_word_unicode(self) -> bool { + self.contains(Look::WordUnicode) + || self.contains(Look::WordUnicodeNegate) + || self.contains(Look::WordStartUnicode) + || self.contains(Look::WordEndUnicode) + || self.contains(Look::WordStartHalfUnicode) + || self.contains(Look::WordEndHalfUnicode) + } + + /// Returns true if and only if this set contains any ASCII word boundary + /// or negated ASCII word boundary assertions. + #[inline] + pub fn contains_word_ascii(self) -> bool { + self.contains(Look::WordAscii) + || self.contains(Look::WordAsciiNegate) + || self.contains(Look::WordStartAscii) + || self.contains(Look::WordEndAscii) + || self.contains(Look::WordStartHalfAscii) + || self.contains(Look::WordEndHalfAscii) + } + + /// Returns an iterator over all of the look-around assertions in this set. + #[inline] + pub fn iter(self) -> LookSetIter { + LookSetIter { set: self } + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion added to it. If the assertion is already in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn insert(self, look: Look) -> LookSet { + LookSet { bits: self.bits | look.as_repr() } + } + + /// Updates this set in place with the result of inserting the given + /// assertion into this set. + #[inline] + pub fn set_insert(&mut self, look: Look) { + *self = self.insert(look); + } + + /// Return a new set that is equivalent to the original, but with the given + /// assertion removed from it. If the assertion is not in the set, then the + /// returned set is equivalent to the original. + #[inline] + pub fn remove(self, look: Look) -> LookSet { + LookSet { bits: self.bits & !look.as_repr() } + } + + /// Updates this set in place with the result of removing the given + /// assertion from this set. + #[inline] + pub fn set_remove(&mut self, look: Look) { + *self = self.remove(look); + } + + /// Returns a new set that is the result of subtracting the given set from + /// this set. + #[inline] + pub fn subtract(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & !other.bits } + } + + /// Updates this set in place with the result of subtracting the given set + /// from this set. + #[inline] + pub fn set_subtract(&mut self, other: LookSet) { + *self = self.subtract(other); + } + + /// Returns a new set that is the union of this and the one given. + #[inline] + pub fn union(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits | other.bits } + } + + /// Updates this set in place with the result of unioning it with the one + /// given. + #[inline] + pub fn set_union(&mut self, other: LookSet) { + *self = self.union(other); + } + + /// Returns a new set that is the intersection of this and the one given. + #[inline] + pub fn intersect(self, other: LookSet) -> LookSet { + LookSet { bits: self.bits & other.bits } + } + + /// Updates this set in place with the result of intersecting it with the + /// one given. + #[inline] + pub fn set_intersect(&mut self, other: LookSet) { + *self = self.intersect(other); + } + + /// Return a `LookSet` from the slice given as a native endian 32-bit + /// integer. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn read_repr(slice: &[u8]) -> LookSet { + let bits = u32::from_ne_bytes(slice[..4].try_into().unwrap()); + LookSet { bits } + } + + /// Write a `LookSet` as a native endian 32-bit integer to the beginning + /// of the slice given. + /// + /// # Panics + /// + /// This panics if `slice.len() < 4`. + #[inline] + pub fn write_repr(self, slice: &mut [u8]) { + let raw = self.bits.to_ne_bytes(); + slice[0] = raw[0]; + slice[1] = raw[1]; + slice[2] = raw[2]; + slice[3] = raw[3]; + } +} + +impl core::fmt::Debug for LookSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if self.is_empty() { + return write!(f, "∅"); + } + for look in self.iter() { + write!(f, "{}", look.as_char())?; + } + Ok(()) + } +} + +/// An iterator over all look-around assertions in a [`LookSet`]. +/// +/// This iterator is created by [`LookSet::iter`]. +#[derive(Clone, Debug)] +pub struct LookSetIter { + set: LookSet, +} + +impl Iterator for LookSetIter { + type Item = Look; + + #[inline] + fn next(&mut self) -> Option { + if self.set.is_empty() { + return None; + } + // We'll never have more than u8::MAX distinct look-around assertions, + // so 'bit' will always fit into a u16. + let bit = u16::try_from(self.set.bits.trailing_zeros()).unwrap(); + let look = Look::from_repr(1 << bit)?; + self.set = self.set.remove(look); + Some(look) + } +} + +/// Given a sequence of HIR values where each value corresponds to a Unicode +/// class (or an all-ASCII byte class), return a single Unicode class +/// corresponding to the union of the classes found. +fn class_chars(hirs: &[Hir]) -> Option { + let mut cls = ClassUnicode::new(vec![]); + for hir in hirs.iter() { + match *hir.kind() { + HirKind::Class(Class::Unicode(ref cls2)) => { + cls.union(cls2); + } + HirKind::Class(Class::Bytes(ref cls2)) => { + cls.union(&cls2.to_unicode_class()?); + } + _ => return None, + }; + } + Some(Class::Unicode(cls)) +} + +/// Given a sequence of HIR values where each value corresponds to a byte class +/// (or an all-ASCII Unicode class), return a single byte class corresponding +/// to the union of the classes found. +fn class_bytes(hirs: &[Hir]) -> Option { + let mut cls = ClassBytes::new(vec![]); + for hir in hirs.iter() { + match *hir.kind() { + HirKind::Class(Class::Unicode(ref cls2)) => { + cls.union(&cls2.to_byte_class()?); + } + HirKind::Class(Class::Bytes(ref cls2)) => { + cls.union(cls2); + } + _ => return None, + }; + } + Some(Class::Bytes(cls)) +} + +/// Given a sequence of HIR values where each value corresponds to a literal +/// that is a single `char`, return that sequence of `char`s. Otherwise return +/// None. No deduplication is done. +fn singleton_chars(hirs: &[Hir]) -> Option> { + let mut singletons = vec![]; + for hir in hirs.iter() { + let literal = match *hir.kind() { + HirKind::Literal(Literal(ref bytes)) => bytes, + _ => return None, + }; + let ch = match crate::debug::utf8_decode(literal) { + None => return None, + Some(Err(_)) => return None, + Some(Ok(ch)) => ch, + }; + if literal.len() != ch.len_utf8() { + return None; + } + singletons.push(ch); + } + Some(singletons) +} + +/// Given a sequence of HIR values where each value corresponds to a literal +/// that is a single byte, return that sequence of bytes. Otherwise return +/// None. No deduplication is done. +fn singleton_bytes(hirs: &[Hir]) -> Option> { + let mut singletons = vec![]; + for hir in hirs.iter() { + let literal = match *hir.kind() { + HirKind::Literal(Literal(ref bytes)) => bytes, + _ => return None, + }; + if literal.len() != 1 { + return None; + } + singletons.push(literal[0]); + } + Some(singletons) +} + +/// Looks for a common prefix in the list of alternation branches given. If one +/// is found, then an equivalent but (hopefully) simplified Hir is returned. +/// Otherwise, the original given list of branches is returned unmodified. +/// +/// This is not quite as good as it could be. Right now, it requires that +/// all branches are 'Concat' expressions. It also doesn't do well with +/// literals. For example, given 'foofoo|foobar', it will not refactor it to +/// 'foo(?:foo|bar)' because literals are flattened into their own special +/// concatenation. (One wonders if perhaps 'Literal' should be a single atom +/// instead of a string of bytes because of this. Otherwise, handling the +/// current representation in this routine will be pretty gnarly. Sigh.) +fn lift_common_prefix(hirs: Vec) -> Result> { + if hirs.len() <= 1 { + return Err(hirs); + } + let mut prefix = match hirs[0].kind() { + HirKind::Concat(ref xs) => &**xs, + _ => return Err(hirs), + }; + if prefix.is_empty() { + return Err(hirs); + } + for h in hirs.iter().skip(1) { + let concat = match h.kind() { + HirKind::Concat(ref xs) => xs, + _ => return Err(hirs), + }; + let common_len = prefix + .iter() + .zip(concat.iter()) + .take_while(|(x, y)| x == y) + .count(); + prefix = &prefix[..common_len]; + if prefix.is_empty() { + return Err(hirs); + } + } + let len = prefix.len(); + assert_ne!(0, len); + let mut prefix_concat = vec![]; + let mut suffix_alts = vec![]; + for h in hirs { + let mut concat = match h.into_kind() { + HirKind::Concat(xs) => xs, + // We required all sub-expressions to be + // concats above, so we're only here if we + // have a concat. + _ => unreachable!(), + }; + suffix_alts.push(Hir::concat(concat.split_off(len))); + if prefix_concat.is_empty() { + prefix_concat = concat; + } + } + let mut concat = prefix_concat; + concat.push(Hir::alternation(suffix_alts)); + Ok(Hir::concat(concat)) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn uclass(ranges: &[(char, char)]) -> ClassUnicode { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| ClassUnicodeRange::new(s, e)) + .collect(); + ClassUnicode::new(ranges) + } + + fn bclass(ranges: &[(u8, u8)]) -> ClassBytes { + let ranges: Vec = + ranges.iter().map(|&(s, e)| ClassBytesRange::new(s, e)).collect(); + ClassBytes::new(ranges) + } + + fn uranges(cls: &ClassUnicode) -> Vec<(char, char)> { + cls.iter().map(|x| (x.start(), x.end())).collect() + } + + #[cfg(feature = "unicode-case")] + fn ucasefold(cls: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls.clone(); + cls_.case_fold_simple(); + cls_ + } + + fn uunion(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.union(cls2); + cls_ + } + + fn uintersect(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.intersect(cls2); + cls_ + } + + fn udifference(cls1: &ClassUnicode, cls2: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.difference(cls2); + cls_ + } + + fn usymdifference( + cls1: &ClassUnicode, + cls2: &ClassUnicode, + ) -> ClassUnicode { + let mut cls_ = cls1.clone(); + cls_.symmetric_difference(cls2); + cls_ + } + + fn unegate(cls: &ClassUnicode) -> ClassUnicode { + let mut cls_ = cls.clone(); + cls_.negate(); + cls_ + } + + fn branges(cls: &ClassBytes) -> Vec<(u8, u8)> { + cls.iter().map(|x| (x.start(), x.end())).collect() + } + + fn bcasefold(cls: &ClassBytes) -> ClassBytes { + let mut cls_ = cls.clone(); + cls_.case_fold_simple(); + cls_ + } + + fn bunion(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.union(cls2); + cls_ + } + + fn bintersect(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.intersect(cls2); + cls_ + } + + fn bdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.difference(cls2); + cls_ + } + + fn bsymdifference(cls1: &ClassBytes, cls2: &ClassBytes) -> ClassBytes { + let mut cls_ = cls1.clone(); + cls_.symmetric_difference(cls2); + cls_ + } + + fn bnegate(cls: &ClassBytes) -> ClassBytes { + let mut cls_ = cls.clone(); + cls_.negate(); + cls_ + } + + #[test] + fn class_range_canonical_unicode() { + let range = ClassUnicodeRange::new('\u{00FF}', '\0'); + assert_eq!('\0', range.start()); + assert_eq!('\u{00FF}', range.end()); + } + + #[test] + fn class_range_canonical_bytes() { + let range = ClassBytesRange::new(b'\xFF', b'\0'); + assert_eq!(b'\0', range.start()); + assert_eq!(b'\xFF', range.end()); + } + + #[test] + fn class_canonicalize_unicode() { + let cls = uclass(&[('a', 'c'), ('x', 'z')]); + let expected = vec![('a', 'c'), ('x', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('a', 'c')]); + let expected = vec![('a', 'c'), ('x', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('w', 'y')]); + let expected = vec![('w', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[ + ('c', 'f'), + ('a', 'g'), + ('d', 'j'), + ('a', 'c'), + ('m', 'p'), + ('l', 's'), + ]); + let expected = vec![('a', 'j'), ('l', 's')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('x', 'z'), ('u', 'w')]); + let expected = vec![('u', 'z')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('\x00', '\u{10FFFF}'), ('\x00', '\u{10FFFF}')]); + let expected = vec![('\x00', '\u{10FFFF}')]; + assert_eq!(expected, uranges(&cls)); + + let cls = uclass(&[('a', 'a'), ('b', 'b')]); + let expected = vec![('a', 'b')]; + assert_eq!(expected, uranges(&cls)); + } + + #[test] + fn class_canonicalize_bytes() { + let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); + let expected = vec![(b'a', b'c'), (b'x', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'a', b'c')]); + let expected = vec![(b'a', b'c'), (b'x', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'w', b'y')]); + let expected = vec![(b'w', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[ + (b'c', b'f'), + (b'a', b'g'), + (b'd', b'j'), + (b'a', b'c'), + (b'm', b'p'), + (b'l', b's'), + ]); + let expected = vec![(b'a', b'j'), (b'l', b's')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'x', b'z'), (b'u', b'w')]); + let expected = vec![(b'u', b'z')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFF'), (b'\x00', b'\xFF')]); + let expected = vec![(b'\x00', b'\xFF')]; + assert_eq!(expected, branges(&cls)); + + let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); + let expected = vec![(b'a', b'b')]; + assert_eq!(expected, branges(&cls)); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn class_case_fold_unicode() { + let cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + let expected = uclass(&[ + ('A', 'J'), + ('L', 'S'), + ('a', 'j'), + ('l', 's'), + ('\u{17F}', '\u{17F}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'Z')]); + let expected = uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('a', 'z')]); + let expected = uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'A'), ('_', '_')]); + let expected = uclass(&[('A', 'A'), ('_', '_'), ('a', 'a')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('A', 'A'), ('=', '=')]); + let expected = uclass(&[('=', '='), ('A', 'A'), ('a', 'a')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('\x00', '\x10')]); + assert_eq!(cls, ucasefold(&cls)); + + let cls = uclass(&[('k', 'k')]); + let expected = + uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}')]); + assert_eq!(expected, ucasefold(&cls)); + + let cls = uclass(&[('@', '@')]); + assert_eq!(cls, ucasefold(&cls)); + } + + #[test] + #[cfg(not(feature = "unicode-case"))] + fn class_case_fold_unicode_disabled() { + let mut cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + assert!(cls.try_case_fold_simple().is_err()); + } + + #[test] + #[should_panic] + #[cfg(not(feature = "unicode-case"))] + fn class_case_fold_unicode_disabled_panics() { + let mut cls = uclass(&[ + ('C', 'F'), + ('A', 'G'), + ('D', 'J'), + ('A', 'C'), + ('M', 'P'), + ('L', 'S'), + ('c', 'f'), + ]); + cls.case_fold_simple(); + } + + #[test] + fn class_case_fold_bytes() { + let cls = bclass(&[ + (b'C', b'F'), + (b'A', b'G'), + (b'D', b'J'), + (b'A', b'C'), + (b'M', b'P'), + (b'L', b'S'), + (b'c', b'f'), + ]); + let expected = + bclass(&[(b'A', b'J'), (b'L', b'S'), (b'a', b'j'), (b'l', b's')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'Z')]); + let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'a', b'z')]); + let expected = bclass(&[(b'A', b'Z'), (b'a', b'z')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'A'), (b'_', b'_')]); + let expected = bclass(&[(b'A', b'A'), (b'_', b'_'), (b'a', b'a')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'A', b'A'), (b'=', b'=')]); + let expected = bclass(&[(b'=', b'='), (b'A', b'A'), (b'a', b'a')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'\x00', b'\x10')]); + assert_eq!(cls, bcasefold(&cls)); + + let cls = bclass(&[(b'k', b'k')]); + let expected = bclass(&[(b'K', b'K'), (b'k', b'k')]); + assert_eq!(expected, bcasefold(&cls)); + + let cls = bclass(&[(b'@', b'@')]); + assert_eq!(cls, bcasefold(&cls)); + } + + #[test] + fn class_negate_unicode() { + let cls = uclass(&[('a', 'a')]); + let expected = uclass(&[('\x00', '\x60'), ('\x62', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', 'a'), ('b', 'b')]); + let expected = uclass(&[('\x00', '\x60'), ('\x63', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', 'c'), ('x', 'z')]); + let expected = uclass(&[ + ('\x00', '\x60'), + ('\x64', '\x77'), + ('\x7B', '\u{10FFFF}'), + ]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', 'a')]); + let expected = uclass(&[('\x62', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('a', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\x60')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{10FFFF}')]); + let expected = uclass(&[]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[]); + let expected = uclass(&[('\x00', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = + uclass(&[('\x00', '\u{10FFFD}'), ('\u{10FFFF}', '\u{10FFFF}')]); + let expected = uclass(&[('\u{10FFFE}', '\u{10FFFE}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{D7FF}')]); + let expected = uclass(&[('\u{E000}', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\x00', '\u{D7FE}')]); + let expected = uclass(&[('\u{D7FF}', '\u{10FFFF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\u{E000}', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\u{D7FF}')]); + assert_eq!(expected, unegate(&cls)); + + let cls = uclass(&[('\u{E001}', '\u{10FFFF}')]); + let expected = uclass(&[('\x00', '\u{E000}')]); + assert_eq!(expected, unegate(&cls)); + } + + #[test] + fn class_negate_bytes() { + let cls = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'\x00', b'\x60'), (b'\x62', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'a'), (b'b', b'b')]); + let expected = bclass(&[(b'\x00', b'\x60'), (b'\x63', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'c'), (b'x', b'z')]); + let expected = bclass(&[ + (b'\x00', b'\x60'), + (b'\x64', b'\x77'), + (b'\x7B', b'\xFF'), + ]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'a')]); + let expected = bclass(&[(b'\x62', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'a', b'\xFF')]); + let expected = bclass(&[(b'\x00', b'\x60')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFF')]); + let expected = bclass(&[]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[]); + let expected = bclass(&[(b'\x00', b'\xFF')]); + assert_eq!(expected, bnegate(&cls)); + + let cls = bclass(&[(b'\x00', b'\xFD'), (b'\xFF', b'\xFF')]); + let expected = bclass(&[(b'\xFE', b'\xFE')]); + assert_eq!(expected, bnegate(&cls)); + } + + #[test] + fn class_union_unicode() { + let cls1 = uclass(&[('a', 'g'), ('m', 't'), ('A', 'C')]); + let cls2 = uclass(&[('a', 'z')]); + let expected = uclass(&[('a', 'z'), ('A', 'C')]); + assert_eq!(expected, uunion(&cls1, &cls2)); + } + + #[test] + fn class_union_bytes() { + let cls1 = bclass(&[(b'a', b'g'), (b'm', b't'), (b'A', b'C')]); + let cls2 = bclass(&[(b'a', b'z')]); + let expected = bclass(&[(b'a', b'z'), (b'A', b'C')]); + assert_eq!(expected, bunion(&cls1, &cls2)); + } + + #[test] + fn class_intersect_unicode() { + let cls1 = uclass(&[]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('b', 'b')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'c')]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('a', 'c')]); + let expected = uclass(&[('a', 'b')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('b', 'c')]); + let expected = uclass(&[('b', 'b')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b')]); + let cls2 = uclass(&[('c', 'd')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('b', 'c')]); + let cls2 = uclass(&[('a', 'd')]); + let expected = uclass(&[('b', 'c')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('a', 'h')]); + let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let expected = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('g', 'h')]); + let cls2 = uclass(&[('d', 'e'), ('k', 'l')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('d', 'e'), ('g', 'h')]); + let cls2 = uclass(&[('h', 'h')]); + let expected = uclass(&[('h', 'h')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('e', 'f'), ('i', 'j')]); + let cls2 = uclass(&[('c', 'd'), ('g', 'h'), ('k', 'l')]); + let expected = uclass(&[]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'b'), ('c', 'd'), ('e', 'f')]); + let cls2 = uclass(&[('b', 'c'), ('d', 'e'), ('f', 'g')]); + let expected = uclass(&[('b', 'f')]); + assert_eq!(expected, uintersect(&cls1, &cls2)); + } + + #[test] + fn class_intersect_bytes() { + let cls1 = bclass(&[]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'b', b'b')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'c')]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'a', b'c')]); + let expected = bclass(&[(b'a', b'b')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'b', b'c')]); + let expected = bclass(&[(b'b', b'b')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b')]); + let cls2 = bclass(&[(b'c', b'd')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'b', b'c')]); + let cls2 = bclass(&[(b'a', b'd')]); + let expected = bclass(&[(b'b', b'c')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'a', b'h')]); + let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let expected = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'g', b'h')]); + let cls2 = bclass(&[(b'd', b'e'), (b'k', b'l')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'd', b'e'), (b'g', b'h')]); + let cls2 = bclass(&[(b'h', b'h')]); + let expected = bclass(&[(b'h', b'h')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'e', b'f'), (b'i', b'j')]); + let cls2 = bclass(&[(b'c', b'd'), (b'g', b'h'), (b'k', b'l')]); + let expected = bclass(&[]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'b'), (b'c', b'd'), (b'e', b'f')]); + let cls2 = bclass(&[(b'b', b'c'), (b'd', b'e'), (b'f', b'g')]); + let expected = bclass(&[(b'b', b'f')]); + assert_eq!(expected, bintersect(&cls1, &cls2)); + } + + #[test] + fn class_difference_unicode() { + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'a')]); + let cls2 = uclass(&[]); + let expected = uclass(&[('a', 'a')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('a', 'a')]); + let expected = uclass(&[('b', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('z', 'z')]); + let expected = uclass(&[('a', 'y')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('m', 'm')]); + let expected = uclass(&[('a', 'l'), ('n', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('a', 'z')]); + let expected = uclass(&[]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('d', 'v')]); + let expected = uclass(&[('a', 'c')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('b', 'g'), ('s', 'u')]); + let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'c'), ('g', 'i'), ('r', 't')]); + let cls2 = uclass(&[('b', 'd'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('a', 'a'), ('h', 'i'), ('r', 'r')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('x', 'z')]); + let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('x', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + + let cls1 = uclass(&[('a', 'z')]); + let cls2 = uclass(&[('a', 'c'), ('e', 'g'), ('s', 'u')]); + let expected = uclass(&[('d', 'd'), ('h', 'r'), ('v', 'z')]); + assert_eq!(expected, udifference(&cls1, &cls2)); + } + + #[test] + fn class_difference_bytes() { + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'a')]); + let cls2 = bclass(&[]); + let expected = bclass(&[(b'a', b'a')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'a', b'a')]); + let expected = bclass(&[(b'b', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'z', b'z')]); + let expected = bclass(&[(b'a', b'y')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'm', b'm')]); + let expected = bclass(&[(b'a', b'l'), (b'n', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'a', b'z')]); + let expected = bclass(&[]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'd', b'v')]); + let expected = bclass(&[(b'a', b'c')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'b', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'c'), (b'g', b'i'), (b'r', b't')]); + let cls2 = bclass(&[(b'b', b'd'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'a', b'a'), (b'h', b'i'), (b'r', b'r')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'x', b'z')]); + let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'x', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + + let cls1 = bclass(&[(b'a', b'z')]); + let cls2 = bclass(&[(b'a', b'c'), (b'e', b'g'), (b's', b'u')]); + let expected = bclass(&[(b'd', b'd'), (b'h', b'r'), (b'v', b'z')]); + assert_eq!(expected, bdifference(&cls1, &cls2)); + } + + #[test] + fn class_symmetric_difference_unicode() { + let cls1 = uclass(&[('a', 'm')]); + let cls2 = uclass(&[('g', 't')]); + let expected = uclass(&[('a', 'f'), ('n', 't')]); + assert_eq!(expected, usymdifference(&cls1, &cls2)); + } + + #[test] + fn class_symmetric_difference_bytes() { + let cls1 = bclass(&[(b'a', b'm')]); + let cls2 = bclass(&[(b'g', b't')]); + let expected = bclass(&[(b'a', b'f'), (b'n', b't')]); + assert_eq!(expected, bsymdifference(&cls1, &cls2)); + } + + // We use a thread with an explicit stack size to test that our destructor + // for Hir can handle arbitrarily sized expressions in constant stack + // space. In case we run on a platform without threads (WASM?), we limit + // this test to Windows/Unix. + #[test] + #[cfg(any(unix, windows))] + fn no_stack_overflow_on_drop() { + use std::thread; + + let run = || { + let mut expr = Hir::empty(); + for _ in 0..100 { + expr = Hir::capture(Capture { + index: 1, + name: None, + sub: Box::new(expr), + }); + expr = Hir::repetition(Repetition { + min: 0, + max: Some(1), + greedy: true, + sub: Box::new(expr), + }); + + expr = Hir { + kind: HirKind::Concat(vec![expr]), + props: Properties::empty(), + }; + expr = Hir { + kind: HirKind::Alternation(vec![expr]), + props: Properties::empty(), + }; + } + assert!(!matches!(*expr.kind(), HirKind::Empty)); + }; + + // We run our test on a thread with a small stack size so we can + // force the issue more easily. + // + // NOTE(2023-03-21): See the corresponding test in 'crate::ast::tests' + // for context on the specific stack size chosen here. + thread::Builder::new() + .stack_size(16 << 10) + .spawn(run) + .unwrap() + .join() + .unwrap(); + } + + #[test] + fn look_set_iter() { + let set = LookSet::empty(); + assert_eq!(0, set.iter().count()); + + let set = LookSet::full(); + assert_eq!(18, set.iter().count()); + + let set = + LookSet::empty().insert(Look::StartLF).insert(Look::WordUnicode); + assert_eq!(2, set.iter().count()); + + let set = LookSet::empty().insert(Look::StartLF); + assert_eq!(1, set.iter().count()); + + let set = LookSet::empty().insert(Look::WordAsciiNegate); + assert_eq!(1, set.iter().count()); + } + + #[test] + fn look_set_debug() { + let res = format!("{:?}", LookSet::empty()); + assert_eq!("∅", res); + let res = format!("{:?}", LookSet::full()); + assert_eq!("Az^$rRbB𝛃𝚩<>〈〉◁▷◀▶", res); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/print.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/print.rs new file mode 100644 index 0000000000000000000000000000000000000000..89db08c25bfaf8ef726be9ce28e5167a75abfc1a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/print.rs @@ -0,0 +1,608 @@ +/*! +This module provides a regular expression printer for `Hir`. +*/ + +use core::fmt; + +use crate::{ + hir::{ + self, + visitor::{self, Visitor}, + Hir, HirKind, + }, + is_meta_character, +}; + +/// A builder for constructing a printer. +/// +/// Note that since a printer doesn't have any configuration knobs, this type +/// remains unexported. +#[derive(Clone, Debug)] +struct PrinterBuilder { + _priv: (), +} + +impl Default for PrinterBuilder { + fn default() -> PrinterBuilder { + PrinterBuilder::new() + } +} + +impl PrinterBuilder { + fn new() -> PrinterBuilder { + PrinterBuilder { _priv: () } + } + + fn build(&self) -> Printer { + Printer { _priv: () } + } +} + +/// A printer for a regular expression's high-level intermediate +/// representation. +/// +/// A printer converts a high-level intermediate representation (HIR) to a +/// regular expression pattern string. This particular printer uses constant +/// stack space and heap space proportional to the size of the HIR. +/// +/// Since this printer is only using the HIR, the pattern it prints will likely +/// not resemble the original pattern at all. For example, a pattern like +/// `\pL` will have its entire class written out. +/// +/// The purpose of this printer is to provide a means to mutate an HIR and then +/// build a regular expression from the result of that mutation. (A regex +/// library could provide a constructor from this HIR explicitly, but that +/// creates an unnecessary public coupling between the regex library and this +/// specific HIR representation.) +#[derive(Debug)] +pub struct Printer { + _priv: (), +} + +impl Printer { + /// Create a new printer. + pub fn new() -> Printer { + PrinterBuilder::new().build() + } + + /// Print the given `Ast` to the given writer. The writer must implement + /// `fmt::Write`. Typical implementations of `fmt::Write` that can be used + /// here are a `fmt::Formatter` (which is available in `fmt::Display` + /// implementations) or a `&mut String`. + pub fn print(&mut self, hir: &Hir, wtr: W) -> fmt::Result { + visitor::visit(hir, Writer { wtr }) + } +} + +#[derive(Debug)] +struct Writer { + wtr: W, +} + +impl Visitor for Writer { + type Output = (); + type Err = fmt::Error; + + fn finish(self) -> fmt::Result { + Ok(()) + } + + fn visit_pre(&mut self, hir: &Hir) -> fmt::Result { + match *hir.kind() { + HirKind::Empty => { + // Technically an empty sub-expression could be "printed" by + // just ignoring it, but in practice, you could have a + // repetition operator attached to an empty expression, and you + // really need something in the concrete syntax to make that + // work as you'd expect. + self.wtr.write_str(r"(?:)")?; + } + // Repetition operators are strictly suffix oriented. + HirKind::Repetition(_) => {} + HirKind::Literal(hir::Literal(ref bytes)) => { + // See the comment on the 'Concat' and 'Alternation' case below + // for why we put parens here. Literals are, conceptually, + // a special case of concatenation where each element is a + // character. The HIR flattens this into a Box<[u8]>, but we + // still need to treat it like a concatenation for correct + // printing. As a special case, we don't write parens if there + // is only one character. One character means there is no + // concat so we don't need parens. Adding parens would still be + // correct, but we drop them here because it tends to create + // rather noisy regexes even in simple cases. + let result = core::str::from_utf8(bytes); + let len = result.map_or(bytes.len(), |s| s.chars().count()); + if len > 1 { + self.wtr.write_str(r"(?:")?; + } + match result { + Ok(string) => { + for c in string.chars() { + self.write_literal_char(c)?; + } + } + Err(_) => { + for &b in bytes.iter() { + self.write_literal_byte(b)?; + } + } + } + if len > 1 { + self.wtr.write_str(r")")?; + } + } + HirKind::Class(hir::Class::Unicode(ref cls)) => { + if cls.ranges().is_empty() { + return self.wtr.write_str("[a&&b]"); + } + self.wtr.write_str("[")?; + for range in cls.iter() { + if range.start() == range.end() { + self.write_literal_char(range.start())?; + } else if u32::from(range.start()) + 1 + == u32::from(range.end()) + { + self.write_literal_char(range.start())?; + self.write_literal_char(range.end())?; + } else { + self.write_literal_char(range.start())?; + self.wtr.write_str("-")?; + self.write_literal_char(range.end())?; + } + } + self.wtr.write_str("]")?; + } + HirKind::Class(hir::Class::Bytes(ref cls)) => { + if cls.ranges().is_empty() { + return self.wtr.write_str("[a&&b]"); + } + self.wtr.write_str("(?-u:[")?; + for range in cls.iter() { + if range.start() == range.end() { + self.write_literal_class_byte(range.start())?; + } else if range.start() + 1 == range.end() { + self.write_literal_class_byte(range.start())?; + self.write_literal_class_byte(range.end())?; + } else { + self.write_literal_class_byte(range.start())?; + self.wtr.write_str("-")?; + self.write_literal_class_byte(range.end())?; + } + } + self.wtr.write_str("])")?; + } + HirKind::Look(ref look) => match *look { + hir::Look::Start => { + self.wtr.write_str(r"\A")?; + } + hir::Look::End => { + self.wtr.write_str(r"\z")?; + } + hir::Look::StartLF => { + self.wtr.write_str("(?m:^)")?; + } + hir::Look::EndLF => { + self.wtr.write_str("(?m:$)")?; + } + hir::Look::StartCRLF => { + self.wtr.write_str("(?mR:^)")?; + } + hir::Look::EndCRLF => { + self.wtr.write_str("(?mR:$)")?; + } + hir::Look::WordAscii => { + self.wtr.write_str(r"(?-u:\b)")?; + } + hir::Look::WordAsciiNegate => { + self.wtr.write_str(r"(?-u:\B)")?; + } + hir::Look::WordUnicode => { + self.wtr.write_str(r"\b")?; + } + hir::Look::WordUnicodeNegate => { + self.wtr.write_str(r"\B")?; + } + hir::Look::WordStartAscii => { + self.wtr.write_str(r"(?-u:\b{start})")?; + } + hir::Look::WordEndAscii => { + self.wtr.write_str(r"(?-u:\b{end})")?; + } + hir::Look::WordStartUnicode => { + self.wtr.write_str(r"\b{start}")?; + } + hir::Look::WordEndUnicode => { + self.wtr.write_str(r"\b{end}")?; + } + hir::Look::WordStartHalfAscii => { + self.wtr.write_str(r"(?-u:\b{start-half})")?; + } + hir::Look::WordEndHalfAscii => { + self.wtr.write_str(r"(?-u:\b{end-half})")?; + } + hir::Look::WordStartHalfUnicode => { + self.wtr.write_str(r"\b{start-half}")?; + } + hir::Look::WordEndHalfUnicode => { + self.wtr.write_str(r"\b{end-half}")?; + } + }, + HirKind::Capture(hir::Capture { ref name, .. }) => { + self.wtr.write_str("(")?; + if let Some(ref name) = *name { + write!(self.wtr, "?P<{name}>")?; + } + } + // Why do this? Wrapping concats and alts in non-capturing groups + // is not *always* necessary, but is sometimes necessary. For + // example, 'concat(a, alt(b, c))' should be written as 'a(?:b|c)' + // and not 'ab|c'. The former is clearly the intended meaning, but + // the latter is actually 'alt(concat(a, b), c)'. + // + // It would be possible to only group these things in cases where + // it's strictly necessary, but it requires knowing the parent + // expression. And since this technique is simpler and always + // correct, we take this route. More to the point, it is a non-goal + // of an HIR printer to show a nice easy-to-read regex. Indeed, + // its construction forbids it from doing so. Therefore, inserting + // extra groups where they aren't necessary is perfectly okay. + HirKind::Concat(_) | HirKind::Alternation(_) => { + self.wtr.write_str(r"(?:")?; + } + } + Ok(()) + } + + fn visit_post(&mut self, hir: &Hir) -> fmt::Result { + match *hir.kind() { + // Handled during visit_pre + HirKind::Empty + | HirKind::Literal(_) + | HirKind::Class(_) + | HirKind::Look(_) => {} + HirKind::Repetition(ref x) => { + match (x.min, x.max) { + (0, Some(1)) => { + self.wtr.write_str("?")?; + } + (0, None) => { + self.wtr.write_str("*")?; + } + (1, None) => { + self.wtr.write_str("+")?; + } + (1, Some(1)) => { + // 'a{1}' and 'a{1}?' are exactly equivalent to 'a'. + return Ok(()); + } + (m, None) => { + write!(self.wtr, "{{{m},}}")?; + } + (m, Some(n)) if m == n => { + write!(self.wtr, "{{{m}}}")?; + // a{m} and a{m}? are always exactly equivalent. + return Ok(()); + } + (m, Some(n)) => { + write!(self.wtr, "{{{m},{n}}}")?; + } + } + if !x.greedy { + self.wtr.write_str("?")?; + } + } + HirKind::Capture(_) + | HirKind::Concat(_) + | HirKind::Alternation(_) => { + self.wtr.write_str(r")")?; + } + } + Ok(()) + } + + fn visit_alternation_in(&mut self) -> fmt::Result { + self.wtr.write_str("|") + } +} + +impl Writer { + fn write_literal_char(&mut self, c: char) -> fmt::Result { + if is_meta_character(c) { + self.wtr.write_str("\\")?; + } + self.wtr.write_char(c) + } + + fn write_literal_byte(&mut self, b: u8) -> fmt::Result { + if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { + self.write_literal_char(char::try_from(b).unwrap()) + } else { + write!(self.wtr, "(?-u:\\x{b:02X})") + } + } + + fn write_literal_class_byte(&mut self, b: u8) -> fmt::Result { + if b <= 0x7F && !b.is_ascii_control() && !b.is_ascii_whitespace() { + self.write_literal_char(char::try_from(b).unwrap()) + } else { + write!(self.wtr, "\\x{b:02X}") + } + } +} + +#[cfg(test)] +mod tests { + use alloc::{ + boxed::Box, + string::{String, ToString}, + }; + + use crate::ParserBuilder; + + use super::*; + + fn roundtrip(given: &str, expected: &str) { + roundtrip_with(|b| b, given, expected); + } + + fn roundtrip_bytes(given: &str, expected: &str) { + roundtrip_with(|b| b.utf8(false), given, expected); + } + + fn roundtrip_with(mut f: F, given: &str, expected: &str) + where + F: FnMut(&mut ParserBuilder) -> &mut ParserBuilder, + { + let mut builder = ParserBuilder::new(); + f(&mut builder); + let hir = builder.build().parse(given).unwrap(); + + let mut printer = Printer::new(); + let mut dst = String::new(); + printer.print(&hir, &mut dst).unwrap(); + + // Check that the result is actually valid. + builder.build().parse(&dst).unwrap(); + + assert_eq!(expected, dst); + } + + #[test] + fn print_literal() { + roundtrip("a", "a"); + roundtrip(r"\xff", "\u{FF}"); + roundtrip_bytes(r"\xff", "\u{FF}"); + roundtrip_bytes(r"(?-u)\xff", r"(?-u:\xFF)"); + roundtrip("☃", "☃"); + } + + #[test] + fn print_class() { + roundtrip(r"[a]", r"a"); + roundtrip(r"[ab]", r"[ab]"); + roundtrip(r"[a-z]", r"[a-z]"); + roundtrip(r"[a-z--b-c--x-y]", r"[ad-wz]"); + roundtrip(r"[^\x01-\u{10FFFF}]", "\u{0}"); + roundtrip(r"[-]", r"\-"); + roundtrip(r"[☃-⛄]", r"[☃-⛄]"); + + roundtrip(r"(?-u)[a]", r"a"); + roundtrip(r"(?-u)[ab]", r"(?-u:[ab])"); + roundtrip(r"(?-u)[a-z]", r"(?-u:[a-z])"); + roundtrip_bytes(r"(?-u)[a-\xFF]", r"(?-u:[a-\xFF])"); + + // The following test that the printer escapes meta characters + // in character classes. + roundtrip(r"[\[]", r"\["); + roundtrip(r"[Z-_]", r"[Z-_]"); + roundtrip(r"[Z-_--Z]", r"[\[-_]"); + + // The following test that the printer escapes meta characters + // in byte oriented character classes. + roundtrip_bytes(r"(?-u)[\[]", r"\["); + roundtrip_bytes(r"(?-u)[Z-_]", r"(?-u:[Z-_])"); + roundtrip_bytes(r"(?-u)[Z-_--Z]", r"(?-u:[\[-_])"); + + // This tests that an empty character class is correctly roundtripped. + #[cfg(feature = "unicode-gencat")] + roundtrip(r"\P{any}", r"[a&&b]"); + roundtrip_bytes(r"(?-u)[^\x00-\xFF]", r"[a&&b]"); + } + + #[test] + fn print_anchor() { + roundtrip(r"^", r"\A"); + roundtrip(r"$", r"\z"); + roundtrip(r"(?m)^", r"(?m:^)"); + roundtrip(r"(?m)$", r"(?m:$)"); + } + + #[test] + fn print_word_boundary() { + roundtrip(r"\b", r"\b"); + roundtrip(r"\B", r"\B"); + roundtrip(r"(?-u)\b", r"(?-u:\b)"); + roundtrip_bytes(r"(?-u)\B", r"(?-u:\B)"); + } + + #[test] + fn print_repetition() { + roundtrip("a?", "a?"); + roundtrip("a??", "a??"); + roundtrip("(?U)a?", "a??"); + + roundtrip("a*", "a*"); + roundtrip("a*?", "a*?"); + roundtrip("(?U)a*", "a*?"); + + roundtrip("a+", "a+"); + roundtrip("a+?", "a+?"); + roundtrip("(?U)a+", "a+?"); + + roundtrip("a{1}", "a"); + roundtrip("a{2}", "a{2}"); + roundtrip("a{1,}", "a+"); + roundtrip("a{1,5}", "a{1,5}"); + roundtrip("a{1}?", "a"); + roundtrip("a{2}?", "a{2}"); + roundtrip("a{1,}?", "a+?"); + roundtrip("a{1,5}?", "a{1,5}?"); + roundtrip("(?U)a{1}", "a"); + roundtrip("(?U)a{2}", "a{2}"); + roundtrip("(?U)a{1,}", "a+?"); + roundtrip("(?U)a{1,5}", "a{1,5}?"); + + // Test that various zero-length repetitions always translate to an + // empty regex. This is more a property of HIR's smart constructors + // than the printer though. + roundtrip("a{0}", "(?:)"); + roundtrip("(?:ab){0}", "(?:)"); + #[cfg(feature = "unicode-gencat")] + { + roundtrip(r"\p{any}{0}", "(?:)"); + roundtrip(r"\P{any}{0}", "(?:)"); + } + } + + #[test] + fn print_group() { + roundtrip("()", "((?:))"); + roundtrip("(?P)", "(?P(?:))"); + roundtrip("(?:)", "(?:)"); + + roundtrip("(a)", "(a)"); + roundtrip("(?Pa)", "(?Pa)"); + roundtrip("(?:a)", "a"); + + roundtrip("((((a))))", "((((a))))"); + } + + #[test] + fn print_alternation() { + roundtrip("|", "(?:(?:)|(?:))"); + roundtrip("||", "(?:(?:)|(?:)|(?:))"); + + roundtrip("a|b", "[ab]"); + roundtrip("ab|cd", "(?:(?:ab)|(?:cd))"); + roundtrip("a|b|c", "[a-c]"); + roundtrip("ab|cd|ef", "(?:(?:ab)|(?:cd)|(?:ef))"); + roundtrip("foo|bar|quux", "(?:(?:foo)|(?:bar)|(?:quux))"); + } + + // This is a regression test that stresses a peculiarity of how the HIR + // is both constructed and printed. Namely, it is legal for a repetition + // to directly contain a concatenation. This particular construct isn't + // really possible to build from the concrete syntax directly, since you'd + // be forced to put the concatenation into (at least) a non-capturing + // group. Concurrently, the printer doesn't consider this case and just + // kind of naively prints the child expression and tacks on the repetition + // operator. + // + // As a result, if you attached '+' to a 'concat(a, b)', the printer gives + // you 'ab+', but clearly it really should be '(?:ab)+'. + // + // This bug isn't easy to surface because most ways of building an HIR + // come directly from the concrete syntax, and as mentioned above, it just + // isn't possible to build this kind of HIR from the concrete syntax. + // Nevertheless, this is definitely a bug. + // + // See: https://github.com/rust-lang/regex/issues/731 + #[test] + fn regression_repetition_concat() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("x".as_bytes()), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::literal("ab".as_bytes())), + }), + Hir::literal("y".as_bytes()), + ]); + assert_eq!(r"(?:x(?:ab)+y)", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ])), + }), + Hir::look(hir::Look::End), + ]); + assert_eq!(r"(?:\A\A\z\z)", expr.to_string()); + } + + // Just like regression_repetition_concat, but with the repetition using + // an alternation as a child expression instead. + // + // See: https://github.com/rust-lang/regex/issues/731 + #[test] + fn regression_repetition_alternation() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("ab".as_bytes()), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::alternation(alloc::vec![ + Hir::literal("cd".as_bytes()), + Hir::literal("ef".as_bytes()), + ])), + }), + Hir::literal("gh".as_bytes()), + ]); + assert_eq!(r"(?:(?:ab)(?:(?:cd)|(?:ef))+(?:gh))", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy: true, + sub: Box::new(Hir::alternation(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ])), + }), + Hir::look(hir::Look::End), + ]); + assert_eq!(r"(?:\A(?:\A|\z)\z)", expr.to_string()); + } + + // This regression test is very similar in flavor to + // regression_repetition_concat in that the root of the issue lies in a + // peculiarity of how the HIR is represented and how the printer writes it + // out. Like the other regression, this one is also rooted in the fact that + // you can't produce the peculiar HIR from the concrete syntax. Namely, you + // just can't have a 'concat(a, alt(b, c))' because the 'alt' will normally + // be in (at least) a non-capturing group. Why? Because the '|' has very + // low precedence (lower that concatenation), and so something like 'ab|c' + // is actually 'alt(ab, c)'. + // + // See: https://github.com/rust-lang/regex/issues/516 + #[test] + fn regression_alternation_concat() { + let expr = Hir::concat(alloc::vec![ + Hir::literal("ab".as_bytes()), + Hir::alternation(alloc::vec![ + Hir::literal("mn".as_bytes()), + Hir::literal("xy".as_bytes()), + ]), + ]); + assert_eq!(r"(?:(?:ab)(?:(?:mn)|(?:xy)))", expr.to_string()); + + let expr = Hir::concat(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::alternation(alloc::vec![ + Hir::look(hir::Look::Start), + Hir::look(hir::Look::End), + ]), + ]); + assert_eq!(r"(?:\A(?:\A|\z))", expr.to_string()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/translate.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/translate.rs new file mode 100644 index 0000000000000000000000000000000000000000..48469f9e1615d0edf457469ac9a2fabdbbefce80 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/hir/translate.rs @@ -0,0 +1,3740 @@ +/*! +Defines a translator that converts an `Ast` to an `Hir`. +*/ + +use core::cell::{Cell, RefCell}; + +use alloc::{boxed::Box, string::ToString, vec, vec::Vec}; + +use crate::{ + ast::{self, Ast, Span, Visitor}, + either::Either, + hir::{self, Error, ErrorKind, Hir, HirKind}, + unicode::{self, ClassQuery}, +}; + +type Result = core::result::Result; + +/// A builder for constructing an AST->HIR translator. +#[derive(Clone, Debug)] +pub struct TranslatorBuilder { + utf8: bool, + line_terminator: u8, + flags: Flags, +} + +impl Default for TranslatorBuilder { + fn default() -> TranslatorBuilder { + TranslatorBuilder::new() + } +} + +impl TranslatorBuilder { + /// Create a new translator builder with a default configuration. + pub fn new() -> TranslatorBuilder { + TranslatorBuilder { + utf8: true, + line_terminator: b'\n', + flags: Flags::default(), + } + } + + /// Build a translator using the current configuration. + pub fn build(&self) -> Translator { + Translator { + stack: RefCell::new(vec![]), + flags: Cell::new(self.flags), + utf8: self.utf8, + line_terminator: self.line_terminator, + } + } + + /// When disabled, translation will permit the construction of a regular + /// expression that may match invalid UTF-8. + /// + /// When enabled (the default), the translator is guaranteed to produce an + /// expression that, for non-empty matches, will only ever produce spans + /// that are entirely valid UTF-8 (otherwise, the translator will return an + /// error). + /// + /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even + /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete + /// syntax) will be allowed even though they can produce matches that split + /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" + /// matches, and it is expected that the regex engine itself must handle + /// these cases if necessary (perhaps by suppressing any zero-width matches + /// that split a codepoint). + pub fn utf8(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.utf8 = yes; + self + } + + /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. + /// + /// Namely, instead of `.` (by default) matching everything except for `\n`, + /// this will cause `.` to match everything except for the byte given. + /// + /// If `.` is used in a context where Unicode mode is enabled and this byte + /// isn't ASCII, then an error will be returned. When Unicode mode is + /// disabled, then any byte is permitted, but will return an error if UTF-8 + /// mode is enabled and it is a non-ASCII byte. + /// + /// In short, any ASCII value for a line terminator is always okay. But a + /// non-ASCII byte might result in an error depending on whether Unicode + /// mode or UTF-8 mode are enabled. + /// + /// Note that if `R` mode is enabled then it always takes precedence and + /// the line terminator will be treated as `\r` and `\n` simultaneously. + /// + /// Note also that this *doesn't* impact the look-around assertions + /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional + /// configuration in the regex engine itself. + pub fn line_terminator(&mut self, byte: u8) -> &mut TranslatorBuilder { + self.line_terminator = byte; + self + } + + /// Enable or disable the case insensitive flag (`i`) by default. + pub fn case_insensitive(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.case_insensitive = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the multi-line matching flag (`m`) by default. + pub fn multi_line(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.multi_line = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the "dot matches any character" flag (`s`) by + /// default. + pub fn dot_matches_new_line( + &mut self, + yes: bool, + ) -> &mut TranslatorBuilder { + self.flags.dot_matches_new_line = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the CRLF mode flag (`R`) by default. + pub fn crlf(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.crlf = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the "swap greed" flag (`U`) by default. + pub fn swap_greed(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.swap_greed = if yes { Some(true) } else { None }; + self + } + + /// Enable or disable the Unicode flag (`u`) by default. + pub fn unicode(&mut self, yes: bool) -> &mut TranslatorBuilder { + self.flags.unicode = if yes { None } else { Some(false) }; + self + } +} + +/// A translator maps abstract syntax to a high level intermediate +/// representation. +/// +/// A translator may be benefit from reuse. That is, a translator can translate +/// many abstract syntax trees. +/// +/// A `Translator` can be configured in more detail via a +/// [`TranslatorBuilder`]. +#[derive(Clone, Debug)] +pub struct Translator { + /// Our call stack, but on the heap. + stack: RefCell>, + /// The current flag settings. + flags: Cell, + /// Whether we're allowed to produce HIR that can match arbitrary bytes. + utf8: bool, + /// The line terminator to use for `.`. + line_terminator: u8, +} + +impl Translator { + /// Create a new translator using the default configuration. + pub fn new() -> Translator { + TranslatorBuilder::new().build() + } + + /// Translate the given abstract syntax tree (AST) into a high level + /// intermediate representation (HIR). + /// + /// If there was a problem doing the translation, then an HIR-specific + /// error is returned. + /// + /// The original pattern string used to produce the `Ast` *must* also be + /// provided. The translator does not use the pattern string during any + /// correct translation, but is used for error reporting. + pub fn translate(&mut self, pattern: &str, ast: &Ast) -> Result { + ast::visit(ast, TranslatorI::new(self, pattern)) + } +} + +/// An HirFrame is a single stack frame, represented explicitly, which is +/// created for each item in the Ast that we traverse. +/// +/// Note that technically, this type doesn't represent our entire stack +/// frame. In particular, the Ast visitor represents any state associated with +/// traversing the Ast itself. +#[derive(Clone, Debug)] +enum HirFrame { + /// An arbitrary HIR expression. These get pushed whenever we hit a base + /// case in the Ast. They get popped after an inductive (i.e., recursive) + /// step is complete. + Expr(Hir), + /// A literal that is being constructed, character by character, from the + /// AST. We need this because the AST gives each individual character its + /// own node. So as we see characters, we peek at the top-most HirFrame. + /// If it's a literal, then we add to it. Otherwise, we push a new literal. + /// When it comes time to pop it, we convert it to an Hir via Hir::literal. + Literal(Vec), + /// A Unicode character class. This frame is mutated as we descend into + /// the Ast of a character class (which is itself its own mini recursive + /// structure). + ClassUnicode(hir::ClassUnicode), + /// A byte-oriented character class. This frame is mutated as we descend + /// into the Ast of a character class (which is itself its own mini + /// recursive structure). + /// + /// Byte character classes are created when Unicode mode (`u`) is disabled. + /// If `utf8` is enabled (the default), then a byte character is only + /// permitted to match ASCII text. + ClassBytes(hir::ClassBytes), + /// This is pushed whenever a repetition is observed. After visiting every + /// sub-expression in the repetition, the translator's stack is expected to + /// have this sentinel at the top. + /// + /// This sentinel only exists to stop other things (like flattening + /// literals) from reaching across repetition operators. + Repetition, + /// This is pushed on to the stack upon first seeing any kind of capture, + /// indicated by parentheses (including non-capturing groups). It is popped + /// upon leaving a group. + Group { + /// The old active flags when this group was opened. + /// + /// If this group sets flags, then the new active flags are set to the + /// result of merging the old flags with the flags introduced by this + /// group. If the group doesn't set any flags, then this is simply + /// equivalent to whatever flags were set when the group was opened. + /// + /// When this group is popped, the active flags should be restored to + /// the flags set here. + /// + /// The "active" flags correspond to whatever flags are set in the + /// Translator. + old_flags: Flags, + }, + /// This is pushed whenever a concatenation is observed. After visiting + /// every sub-expression in the concatenation, the translator's stack is + /// popped until it sees a Concat frame. + Concat, + /// This is pushed whenever an alternation is observed. After visiting + /// every sub-expression in the alternation, the translator's stack is + /// popped until it sees an Alternation frame. + Alternation, + /// This is pushed immediately before each sub-expression in an + /// alternation. This separates the branches of an alternation on the + /// stack and prevents literal flattening from reaching across alternation + /// branches. + /// + /// It is popped after each expression in a branch until an 'Alternation' + /// frame is observed when doing a post visit on an alternation. + AlternationBranch, +} + +impl HirFrame { + /// Assert that the current stack frame is an Hir expression and return it. + fn unwrap_expr(self) -> Hir { + match self { + HirFrame::Expr(expr) => expr, + HirFrame::Literal(lit) => Hir::literal(lit), + _ => panic!("tried to unwrap expr from HirFrame, got: {self:?}"), + } + } + + /// Assert that the current stack frame is a Unicode class expression and + /// return it. + fn unwrap_class_unicode(self) -> hir::ClassUnicode { + match self { + HirFrame::ClassUnicode(cls) => cls, + _ => panic!( + "tried to unwrap Unicode class \ + from HirFrame, got: {:?}", + self + ), + } + } + + /// Assert that the current stack frame is a byte class expression and + /// return it. + fn unwrap_class_bytes(self) -> hir::ClassBytes { + match self { + HirFrame::ClassBytes(cls) => cls, + _ => panic!( + "tried to unwrap byte class \ + from HirFrame, got: {:?}", + self + ), + } + } + + /// Assert that the current stack frame is a repetition sentinel. If it + /// isn't, then panic. + fn unwrap_repetition(self) { + match self { + HirFrame::Repetition => {} + _ => { + panic!( + "tried to unwrap repetition from HirFrame, got: {self:?}" + ) + } + } + } + + /// Assert that the current stack frame is a group indicator and return + /// its corresponding flags (the flags that were active at the time the + /// group was entered). + fn unwrap_group(self) -> Flags { + match self { + HirFrame::Group { old_flags } => old_flags, + _ => { + panic!("tried to unwrap group from HirFrame, got: {self:?}") + } + } + } + + /// Assert that the current stack frame is an alternation pipe sentinel. If + /// it isn't, then panic. + fn unwrap_alternation_pipe(self) { + match self { + HirFrame::AlternationBranch => {} + _ => { + panic!("tried to unwrap alt pipe from HirFrame, got: {self:?}") + } + } + } +} + +impl<'t, 'p> Visitor for TranslatorI<'t, 'p> { + type Output = Hir; + type Err = Error; + + fn finish(self) -> Result { + // ... otherwise, we should have exactly one HIR on the stack. + assert_eq!(self.trans().stack.borrow().len(), 1); + Ok(self.pop().unwrap().unwrap_expr()) + } + + fn visit_pre(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::ClassBracketed(_) => { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + } + Ast::Repetition(_) => self.push(HirFrame::Repetition), + Ast::Group(ref x) => { + let old_flags = x + .flags() + .map(|ast| self.set_flags(ast)) + .unwrap_or_else(|| self.flags()); + self.push(HirFrame::Group { old_flags }); + } + Ast::Concat(_) => { + self.push(HirFrame::Concat); + } + Ast::Alternation(ref x) => { + self.push(HirFrame::Alternation); + if !x.asts.is_empty() { + self.push(HirFrame::AlternationBranch); + } + } + _ => {} + } + Ok(()) + } + + fn visit_post(&mut self, ast: &Ast) -> Result<()> { + match *ast { + Ast::Empty(_) => { + self.push(HirFrame::Expr(Hir::empty())); + } + Ast::Flags(ref x) => { + self.set_flags(&x.flags); + // Flags in the AST are generally considered directives and + // not actual sub-expressions. However, they can be used in + // the concrete syntax like `((?i))`, and we need some kind of + // indication of an expression there, and Empty is the correct + // choice. + // + // There can also be things like `(?i)+`, but we rule those out + // in the parser. In the future, we might allow them for + // consistency sake. + self.push(HirFrame::Expr(Hir::empty())); + } + Ast::Literal(ref x) => match self.ast_literal_to_scalar(x)? { + Either::Right(byte) => self.push_byte(byte), + Either::Left(ch) => match self.case_fold_char(x.span, ch)? { + None => self.push_char(ch), + Some(expr) => self.push(HirFrame::Expr(expr)), + }, + }, + Ast::Dot(ref span) => { + self.push(HirFrame::Expr(self.hir_dot(**span)?)); + } + Ast::Assertion(ref x) => { + self.push(HirFrame::Expr(self.hir_assertion(x)?)); + } + Ast::ClassPerl(ref x) => { + if self.flags().unicode() { + let cls = self.hir_perl_unicode_class(x)?; + let hcls = hir::Class::Unicode(cls); + self.push(HirFrame::Expr(Hir::class(hcls))); + } else { + let cls = self.hir_perl_byte_class(x)?; + let hcls = hir::Class::Bytes(cls); + self.push(HirFrame::Expr(Hir::class(hcls))); + } + } + Ast::ClassUnicode(ref x) => { + let cls = hir::Class::Unicode(self.hir_unicode_class(x)?); + self.push(HirFrame::Expr(Hir::class(cls))); + } + Ast::ClassBracketed(ref ast) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + self.unicode_fold_and_negate( + &ast.span, + ast.negated, + &mut cls, + )?; + let expr = Hir::class(hir::Class::Unicode(cls)); + self.push(HirFrame::Expr(expr)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + self.bytes_fold_and_negate( + &ast.span, + ast.negated, + &mut cls, + )?; + let expr = Hir::class(hir::Class::Bytes(cls)); + self.push(HirFrame::Expr(expr)); + } + } + Ast::Repetition(ref x) => { + let expr = self.pop().unwrap().unwrap_expr(); + self.pop().unwrap().unwrap_repetition(); + self.push(HirFrame::Expr(self.hir_repetition(x, expr))); + } + Ast::Group(ref x) => { + let expr = self.pop().unwrap().unwrap_expr(); + let old_flags = self.pop().unwrap().unwrap_group(); + self.trans().flags.set(old_flags); + self.push(HirFrame::Expr(self.hir_capture(x, expr))); + } + Ast::Concat(_) => { + let mut exprs = vec![]; + while let Some(expr) = self.pop_concat_expr() { + if !matches!(*expr.kind(), HirKind::Empty) { + exprs.push(expr); + } + } + exprs.reverse(); + self.push(HirFrame::Expr(Hir::concat(exprs))); + } + Ast::Alternation(_) => { + let mut exprs = vec![]; + while let Some(expr) = self.pop_alt_expr() { + self.pop().unwrap().unwrap_alternation_pipe(); + exprs.push(expr); + } + exprs.reverse(); + self.push(HirFrame::Expr(Hir::alternation(exprs))); + } + } + Ok(()) + } + + fn visit_alternation_in(&mut self) -> Result<()> { + self.push(HirFrame::AlternationBranch); + Ok(()) + } + + fn visit_class_set_item_pre( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Bracketed(_) => { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + } + // We needn't handle the Union case here since the visitor will + // do it for us. + _ => {} + } + Ok(()) + } + + fn visit_class_set_item_post( + &mut self, + ast: &ast::ClassSetItem, + ) -> Result<()> { + match *ast { + ast::ClassSetItem::Empty(_) => {} + ast::ClassSetItem::Literal(ref x) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.push(hir::ClassUnicodeRange::new(x.c, x.c)); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + let byte = self.class_literal_byte(x)?; + cls.push(hir::ClassBytesRange::new(byte, byte)); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Range(ref x) => { + if self.flags().unicode() { + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.push(hir::ClassUnicodeRange::new(x.start.c, x.end.c)); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + let start = self.class_literal_byte(&x.start)?; + let end = self.class_literal_byte(&x.end)?; + cls.push(hir::ClassBytesRange::new(start, end)); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Ascii(ref x) => { + if self.flags().unicode() { + let xcls = self.hir_ascii_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let xcls = self.hir_ascii_byte_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + cls.union(&xcls); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Unicode(ref x) => { + let xcls = self.hir_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } + ast::ClassSetItem::Perl(ref x) => { + if self.flags().unicode() { + let xcls = self.hir_perl_unicode_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + cls.union(&xcls); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let xcls = self.hir_perl_byte_class(x)?; + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + cls.union(&xcls); + self.push(HirFrame::ClassBytes(cls)); + } + } + ast::ClassSetItem::Bracketed(ref ast) => { + if self.flags().unicode() { + let mut cls1 = self.pop().unwrap().unwrap_class_unicode(); + self.unicode_fold_and_negate( + &ast.span, + ast.negated, + &mut cls1, + )?; + + let mut cls2 = self.pop().unwrap().unwrap_class_unicode(); + cls2.union(&cls1); + self.push(HirFrame::ClassUnicode(cls2)); + } else { + let mut cls1 = self.pop().unwrap().unwrap_class_bytes(); + self.bytes_fold_and_negate( + &ast.span, + ast.negated, + &mut cls1, + )?; + + let mut cls2 = self.pop().unwrap().unwrap_class_bytes(); + cls2.union(&cls1); + self.push(HirFrame::ClassBytes(cls2)); + } + } + // This is handled automatically by the visitor. + ast::ClassSetItem::Union(_) => {} + } + Ok(()) + } + + fn visit_class_set_binary_op_pre( + &mut self, + _op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } + + fn visit_class_set_binary_op_in( + &mut self, + _op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + if self.flags().unicode() { + let cls = hir::ClassUnicode::empty(); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let cls = hir::ClassBytes::empty(); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } + + fn visit_class_set_binary_op_post( + &mut self, + op: &ast::ClassSetBinaryOp, + ) -> Result<()> { + use crate::ast::ClassSetBinaryOpKind::*; + + if self.flags().unicode() { + let mut rhs = self.pop().unwrap().unwrap_class_unicode(); + let mut lhs = self.pop().unwrap().unwrap_class_unicode(); + let mut cls = self.pop().unwrap().unwrap_class_unicode(); + if self.flags().case_insensitive() { + rhs.try_case_fold_simple().map_err(|_| { + self.error( + op.rhs.span().clone(), + ErrorKind::UnicodeCaseUnavailable, + ) + })?; + lhs.try_case_fold_simple().map_err(|_| { + self.error( + op.lhs.span().clone(), + ErrorKind::UnicodeCaseUnavailable, + ) + })?; + } + match op.kind { + Intersection => lhs.intersect(&rhs), + Difference => lhs.difference(&rhs), + SymmetricDifference => lhs.symmetric_difference(&rhs), + } + cls.union(&lhs); + self.push(HirFrame::ClassUnicode(cls)); + } else { + let mut rhs = self.pop().unwrap().unwrap_class_bytes(); + let mut lhs = self.pop().unwrap().unwrap_class_bytes(); + let mut cls = self.pop().unwrap().unwrap_class_bytes(); + if self.flags().case_insensitive() { + rhs.case_fold_simple(); + lhs.case_fold_simple(); + } + match op.kind { + Intersection => lhs.intersect(&rhs), + Difference => lhs.difference(&rhs), + SymmetricDifference => lhs.symmetric_difference(&rhs), + } + cls.union(&lhs); + self.push(HirFrame::ClassBytes(cls)); + } + Ok(()) + } +} + +/// The internal implementation of a translator. +/// +/// This type is responsible for carrying around the original pattern string, +/// which is not tied to the internal state of a translator. +/// +/// A TranslatorI exists for the time it takes to translate a single Ast. +#[derive(Clone, Debug)] +struct TranslatorI<'t, 'p> { + trans: &'t Translator, + pattern: &'p str, +} + +impl<'t, 'p> TranslatorI<'t, 'p> { + /// Build a new internal translator. + fn new(trans: &'t Translator, pattern: &'p str) -> TranslatorI<'t, 'p> { + TranslatorI { trans, pattern } + } + + /// Return a reference to the underlying translator. + fn trans(&self) -> &Translator { + &self.trans + } + + /// Push the given frame on to the call stack. + fn push(&self, frame: HirFrame) { + self.trans().stack.borrow_mut().push(frame); + } + + /// Push the given literal char on to the call stack. + /// + /// If the top-most element of the stack is a literal, then the char + /// is appended to the end of that literal. Otherwise, a new literal + /// containing just the given char is pushed to the top of the stack. + fn push_char(&self, ch: char) { + let mut buf = [0; 4]; + let bytes = ch.encode_utf8(&mut buf).as_bytes(); + let mut stack = self.trans().stack.borrow_mut(); + if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { + literal.extend_from_slice(bytes); + } else { + stack.push(HirFrame::Literal(bytes.to_vec())); + } + } + + /// Push the given literal byte on to the call stack. + /// + /// If the top-most element of the stack is a literal, then the byte + /// is appended to the end of that literal. Otherwise, a new literal + /// containing just the given byte is pushed to the top of the stack. + fn push_byte(&self, byte: u8) { + let mut stack = self.trans().stack.borrow_mut(); + if let Some(HirFrame::Literal(ref mut literal)) = stack.last_mut() { + literal.push(byte); + } else { + stack.push(HirFrame::Literal(vec![byte])); + } + } + + /// Pop the top of the call stack. If the call stack is empty, return None. + fn pop(&self) -> Option { + self.trans().stack.borrow_mut().pop() + } + + /// Pop an HIR expression from the top of the stack for a concatenation. + /// + /// This returns None if the stack is empty or when a concat frame is seen. + /// Otherwise, it panics if it could not find an HIR expression. + fn pop_concat_expr(&self) -> Option { + let frame = self.pop()?; + match frame { + HirFrame::Concat => None, + HirFrame::Expr(expr) => Some(expr), + HirFrame::Literal(lit) => Some(Hir::literal(lit)), + HirFrame::ClassUnicode(_) => { + unreachable!("expected expr or concat, got Unicode class") + } + HirFrame::ClassBytes(_) => { + unreachable!("expected expr or concat, got byte class") + } + HirFrame::Repetition => { + unreachable!("expected expr or concat, got repetition") + } + HirFrame::Group { .. } => { + unreachable!("expected expr or concat, got group") + } + HirFrame::Alternation => { + unreachable!("expected expr or concat, got alt marker") + } + HirFrame::AlternationBranch => { + unreachable!("expected expr or concat, got alt branch marker") + } + } + } + + /// Pop an HIR expression from the top of the stack for an alternation. + /// + /// This returns None if the stack is empty or when an alternation frame is + /// seen. Otherwise, it panics if it could not find an HIR expression. + fn pop_alt_expr(&self) -> Option { + let frame = self.pop()?; + match frame { + HirFrame::Alternation => None, + HirFrame::Expr(expr) => Some(expr), + HirFrame::Literal(lit) => Some(Hir::literal(lit)), + HirFrame::ClassUnicode(_) => { + unreachable!("expected expr or alt, got Unicode class") + } + HirFrame::ClassBytes(_) => { + unreachable!("expected expr or alt, got byte class") + } + HirFrame::Repetition => { + unreachable!("expected expr or alt, got repetition") + } + HirFrame::Group { .. } => { + unreachable!("expected expr or alt, got group") + } + HirFrame::Concat => { + unreachable!("expected expr or alt, got concat marker") + } + HirFrame::AlternationBranch => { + unreachable!("expected expr or alt, got alt branch marker") + } + } + } + + /// Create a new error with the given span and error type. + fn error(&self, span: Span, kind: ErrorKind) -> Error { + Error { kind, pattern: self.pattern.to_string(), span } + } + + /// Return a copy of the active flags. + fn flags(&self) -> Flags { + self.trans().flags.get() + } + + /// Set the flags of this translator from the flags set in the given AST. + /// Then, return the old flags. + fn set_flags(&self, ast_flags: &ast::Flags) -> Flags { + let old_flags = self.flags(); + let mut new_flags = Flags::from_ast(ast_flags); + new_flags.merge(&old_flags); + self.trans().flags.set(new_flags); + old_flags + } + + /// Convert an Ast literal to its scalar representation. + /// + /// When Unicode mode is enabled, then this always succeeds and returns a + /// `char` (Unicode scalar value). + /// + /// When Unicode mode is disabled, then a `char` will still be returned + /// whenever possible. A byte is returned only when invalid UTF-8 is + /// allowed and when the byte is not ASCII. Otherwise, a non-ASCII byte + /// will result in an error when invalid UTF-8 is not allowed. + fn ast_literal_to_scalar( + &self, + lit: &ast::Literal, + ) -> Result> { + if self.flags().unicode() { + return Ok(Either::Left(lit.c)); + } + let byte = match lit.byte() { + None => return Ok(Either::Left(lit.c)), + Some(byte) => byte, + }; + if byte <= 0x7F { + return Ok(Either::Left(char::try_from(byte).unwrap())); + } + if self.trans().utf8 { + return Err(self.error(lit.span, ErrorKind::InvalidUtf8)); + } + Ok(Either::Right(byte)) + } + + fn case_fold_char(&self, span: Span, c: char) -> Result> { + if !self.flags().case_insensitive() { + return Ok(None); + } + if self.flags().unicode() { + // If case folding won't do anything, then don't bother trying. + let map = unicode::SimpleCaseFolder::new() + .map(|f| f.overlaps(c, c)) + .map_err(|_| { + self.error(span, ErrorKind::UnicodeCaseUnavailable) + })?; + if !map { + return Ok(None); + } + let mut cls = + hir::ClassUnicode::new(vec![hir::ClassUnicodeRange::new( + c, c, + )]); + cls.try_case_fold_simple().map_err(|_| { + self.error(span, ErrorKind::UnicodeCaseUnavailable) + })?; + Ok(Some(Hir::class(hir::Class::Unicode(cls)))) + } else { + if !c.is_ascii() { + return Ok(None); + } + // If case folding won't do anything, then don't bother trying. + match c { + 'A'..='Z' | 'a'..='z' => {} + _ => return Ok(None), + } + let mut cls = + hir::ClassBytes::new(vec![hir::ClassBytesRange::new( + // OK because 'c.len_utf8() == 1' which in turn implies + // that 'c' is ASCII. + u8::try_from(c).unwrap(), + u8::try_from(c).unwrap(), + )]); + cls.case_fold_simple(); + Ok(Some(Hir::class(hir::Class::Bytes(cls)))) + } + } + + fn hir_dot(&self, span: Span) -> Result { + let (utf8, lineterm, flags) = + (self.trans().utf8, self.trans().line_terminator, self.flags()); + if utf8 && (!flags.unicode() || !lineterm.is_ascii()) { + return Err(self.error(span, ErrorKind::InvalidUtf8)); + } + let dot = if flags.dot_matches_new_line() { + if flags.unicode() { + hir::Dot::AnyChar + } else { + hir::Dot::AnyByte + } + } else { + if flags.unicode() { + if flags.crlf() { + hir::Dot::AnyCharExceptCRLF + } else { + if !lineterm.is_ascii() { + return Err( + self.error(span, ErrorKind::InvalidLineTerminator) + ); + } + hir::Dot::AnyCharExcept(char::from(lineterm)) + } + } else { + if flags.crlf() { + hir::Dot::AnyByteExceptCRLF + } else { + hir::Dot::AnyByteExcept(lineterm) + } + } + }; + Ok(Hir::dot(dot)) + } + + fn hir_assertion(&self, asst: &ast::Assertion) -> Result { + let unicode = self.flags().unicode(); + let multi_line = self.flags().multi_line(); + let crlf = self.flags().crlf(); + Ok(match asst.kind { + ast::AssertionKind::StartLine => Hir::look(if multi_line { + if crlf { + hir::Look::StartCRLF + } else { + hir::Look::StartLF + } + } else { + hir::Look::Start + }), + ast::AssertionKind::EndLine => Hir::look(if multi_line { + if crlf { + hir::Look::EndCRLF + } else { + hir::Look::EndLF + } + } else { + hir::Look::End + }), + ast::AssertionKind::StartText => Hir::look(hir::Look::Start), + ast::AssertionKind::EndText => Hir::look(hir::Look::End), + ast::AssertionKind::WordBoundary => Hir::look(if unicode { + hir::Look::WordUnicode + } else { + hir::Look::WordAscii + }), + ast::AssertionKind::NotWordBoundary => Hir::look(if unicode { + hir::Look::WordUnicodeNegate + } else { + hir::Look::WordAsciiNegate + }), + ast::AssertionKind::WordBoundaryStart + | ast::AssertionKind::WordBoundaryStartAngle => { + Hir::look(if unicode { + hir::Look::WordStartUnicode + } else { + hir::Look::WordStartAscii + }) + } + ast::AssertionKind::WordBoundaryEnd + | ast::AssertionKind::WordBoundaryEndAngle => { + Hir::look(if unicode { + hir::Look::WordEndUnicode + } else { + hir::Look::WordEndAscii + }) + } + ast::AssertionKind::WordBoundaryStartHalf => { + Hir::look(if unicode { + hir::Look::WordStartHalfUnicode + } else { + hir::Look::WordStartHalfAscii + }) + } + ast::AssertionKind::WordBoundaryEndHalf => Hir::look(if unicode { + hir::Look::WordEndHalfUnicode + } else { + hir::Look::WordEndHalfAscii + }), + }) + } + + fn hir_capture(&self, group: &ast::Group, expr: Hir) -> Hir { + let (index, name) = match group.kind { + ast::GroupKind::CaptureIndex(index) => (index, None), + ast::GroupKind::CaptureName { ref name, .. } => { + (name.index, Some(name.name.clone().into_boxed_str())) + } + // The HIR doesn't need to use non-capturing groups, since the way + // in which the data type is defined handles this automatically. + ast::GroupKind::NonCapturing(_) => return expr, + }; + Hir::capture(hir::Capture { index, name, sub: Box::new(expr) }) + } + + fn hir_repetition(&self, rep: &ast::Repetition, expr: Hir) -> Hir { + let (min, max) = match rep.op.kind { + ast::RepetitionKind::ZeroOrOne => (0, Some(1)), + ast::RepetitionKind::ZeroOrMore => (0, None), + ast::RepetitionKind::OneOrMore => (1, None), + ast::RepetitionKind::Range(ast::RepetitionRange::Exactly(m)) => { + (m, Some(m)) + } + ast::RepetitionKind::Range(ast::RepetitionRange::AtLeast(m)) => { + (m, None) + } + ast::RepetitionKind::Range(ast::RepetitionRange::Bounded( + m, + n, + )) => (m, Some(n)), + }; + let greedy = + if self.flags().swap_greed() { !rep.greedy } else { rep.greedy }; + Hir::repetition(hir::Repetition { + min, + max, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_unicode_class( + &self, + ast_class: &ast::ClassUnicode, + ) -> Result { + use crate::ast::ClassUnicodeKind::*; + + if !self.flags().unicode() { + return Err( + self.error(ast_class.span, ErrorKind::UnicodeNotAllowed) + ); + } + let query = match ast_class.kind { + OneLetter(name) => ClassQuery::OneLetter(name), + Named(ref name) => ClassQuery::Binary(name), + NamedValue { ref name, ref value, .. } => ClassQuery::ByValue { + property_name: name, + property_value: value, + }, + }; + let mut result = self.convert_unicode_class_error( + &ast_class.span, + unicode::class(query), + ); + if let Ok(ref mut class) = result { + self.unicode_fold_and_negate( + &ast_class.span, + ast_class.negated, + class, + )?; + } + result + } + + fn hir_ascii_unicode_class( + &self, + ast: &ast::ClassAscii, + ) -> Result { + let mut cls = hir::ClassUnicode::new( + ascii_class_as_chars(&ast.kind) + .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), + ); + self.unicode_fold_and_negate(&ast.span, ast.negated, &mut cls)?; + Ok(cls) + } + + fn hir_ascii_byte_class( + &self, + ast: &ast::ClassAscii, + ) -> Result { + let mut cls = hir::ClassBytes::new( + ascii_class(&ast.kind) + .map(|(s, e)| hir::ClassBytesRange::new(s, e)), + ); + self.bytes_fold_and_negate(&ast.span, ast.negated, &mut cls)?; + Ok(cls) + } + + fn hir_perl_unicode_class( + &self, + ast_class: &ast::ClassPerl, + ) -> Result { + use crate::ast::ClassPerlKind::*; + + assert!(self.flags().unicode()); + let result = match ast_class.kind { + Digit => unicode::perl_digit(), + Space => unicode::perl_space(), + Word => unicode::perl_word(), + }; + let mut class = + self.convert_unicode_class_error(&ast_class.span, result)?; + // We needn't apply case folding here because the Perl Unicode classes + // are already closed under Unicode simple case folding. + if ast_class.negated { + class.negate(); + } + Ok(class) + } + + fn hir_perl_byte_class( + &self, + ast_class: &ast::ClassPerl, + ) -> Result { + use crate::ast::ClassPerlKind::*; + + assert!(!self.flags().unicode()); + let mut class = match ast_class.kind { + Digit => hir_ascii_class_bytes(&ast::ClassAsciiKind::Digit), + Space => hir_ascii_class_bytes(&ast::ClassAsciiKind::Space), + Word => hir_ascii_class_bytes(&ast::ClassAsciiKind::Word), + }; + // We needn't apply case folding here because the Perl ASCII classes + // are already closed (under ASCII case folding). + if ast_class.negated { + class.negate(); + } + // Negating a Perl byte class is likely to cause it to match invalid + // UTF-8. That's only OK if the translator is configured to allow such + // things. + if self.trans().utf8 && !class.is_ascii() { + return Err(self.error(ast_class.span, ErrorKind::InvalidUtf8)); + } + Ok(class) + } + + /// Converts the given Unicode specific error to an HIR translation error. + /// + /// The span given should approximate the position at which an error would + /// occur. + fn convert_unicode_class_error( + &self, + span: &Span, + result: core::result::Result, + ) -> Result { + result.map_err(|err| { + let sp = span.clone(); + match err { + unicode::Error::PropertyNotFound => { + self.error(sp, ErrorKind::UnicodePropertyNotFound) + } + unicode::Error::PropertyValueNotFound => { + self.error(sp, ErrorKind::UnicodePropertyValueNotFound) + } + unicode::Error::PerlClassNotFound => { + self.error(sp, ErrorKind::UnicodePerlClassNotFound) + } + } + }) + } + + fn unicode_fold_and_negate( + &self, + span: &Span, + negated: bool, + class: &mut hir::ClassUnicode, + ) -> Result<()> { + // Note that we must apply case folding before negation! + // Consider `(?i)[^x]`. If we applied negation first, then + // the result would be the character class that matched any + // Unicode scalar value. + if self.flags().case_insensitive() { + class.try_case_fold_simple().map_err(|_| { + self.error(span.clone(), ErrorKind::UnicodeCaseUnavailable) + })?; + } + if negated { + class.negate(); + } + Ok(()) + } + + fn bytes_fold_and_negate( + &self, + span: &Span, + negated: bool, + class: &mut hir::ClassBytes, + ) -> Result<()> { + // Note that we must apply case folding before negation! + // Consider `(?i)[^x]`. If we applied negation first, then + // the result would be the character class that matched any + // Unicode scalar value. + if self.flags().case_insensitive() { + class.case_fold_simple(); + } + if negated { + class.negate(); + } + if self.trans().utf8 && !class.is_ascii() { + return Err(self.error(span.clone(), ErrorKind::InvalidUtf8)); + } + Ok(()) + } + + /// Return a scalar byte value suitable for use as a literal in a byte + /// character class. + fn class_literal_byte(&self, ast: &ast::Literal) -> Result { + match self.ast_literal_to_scalar(ast)? { + Either::Right(byte) => Ok(byte), + Either::Left(ch) => { + if ch.is_ascii() { + Ok(u8::try_from(ch).unwrap()) + } else { + // We can't feasibly support Unicode in + // byte oriented classes. Byte classes don't + // do Unicode case folding. + Err(self.error(ast.span, ErrorKind::UnicodeNotAllowed)) + } + } + } + } +} + +/// A translator's representation of a regular expression's flags at any given +/// moment in time. +/// +/// Each flag can be in one of three states: absent, present but disabled or +/// present but enabled. +#[derive(Clone, Copy, Debug, Default)] +struct Flags { + case_insensitive: Option, + multi_line: Option, + dot_matches_new_line: Option, + swap_greed: Option, + unicode: Option, + crlf: Option, + // Note that `ignore_whitespace` is omitted here because it is handled + // entirely in the parser. +} + +impl Flags { + fn from_ast(ast: &ast::Flags) -> Flags { + let mut flags = Flags::default(); + let mut enable = true; + for item in &ast.items { + match item.kind { + ast::FlagsItemKind::Negation => { + enable = false; + } + ast::FlagsItemKind::Flag(ast::Flag::CaseInsensitive) => { + flags.case_insensitive = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::MultiLine) => { + flags.multi_line = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::DotMatchesNewLine) => { + flags.dot_matches_new_line = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::SwapGreed) => { + flags.swap_greed = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::Unicode) => { + flags.unicode = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::CRLF) => { + flags.crlf = Some(enable); + } + ast::FlagsItemKind::Flag(ast::Flag::IgnoreWhitespace) => {} + } + } + flags + } + + fn merge(&mut self, previous: &Flags) { + if self.case_insensitive.is_none() { + self.case_insensitive = previous.case_insensitive; + } + if self.multi_line.is_none() { + self.multi_line = previous.multi_line; + } + if self.dot_matches_new_line.is_none() { + self.dot_matches_new_line = previous.dot_matches_new_line; + } + if self.swap_greed.is_none() { + self.swap_greed = previous.swap_greed; + } + if self.unicode.is_none() { + self.unicode = previous.unicode; + } + if self.crlf.is_none() { + self.crlf = previous.crlf; + } + } + + fn case_insensitive(&self) -> bool { + self.case_insensitive.unwrap_or(false) + } + + fn multi_line(&self) -> bool { + self.multi_line.unwrap_or(false) + } + + fn dot_matches_new_line(&self) -> bool { + self.dot_matches_new_line.unwrap_or(false) + } + + fn swap_greed(&self) -> bool { + self.swap_greed.unwrap_or(false) + } + + fn unicode(&self) -> bool { + self.unicode.unwrap_or(true) + } + + fn crlf(&self) -> bool { + self.crlf.unwrap_or(false) + } +} + +fn hir_ascii_class_bytes(kind: &ast::ClassAsciiKind) -> hir::ClassBytes { + let ranges: Vec<_> = ascii_class(kind) + .map(|(s, e)| hir::ClassBytesRange::new(s, e)) + .collect(); + hir::ClassBytes::new(ranges) +} + +fn ascii_class(kind: &ast::ClassAsciiKind) -> impl Iterator { + use crate::ast::ClassAsciiKind::*; + + let slice: &'static [(u8, u8)] = match *kind { + Alnum => &[(b'0', b'9'), (b'A', b'Z'), (b'a', b'z')], + Alpha => &[(b'A', b'Z'), (b'a', b'z')], + Ascii => &[(b'\x00', b'\x7F')], + Blank => &[(b'\t', b'\t'), (b' ', b' ')], + Cntrl => &[(b'\x00', b'\x1F'), (b'\x7F', b'\x7F')], + Digit => &[(b'0', b'9')], + Graph => &[(b'!', b'~')], + Lower => &[(b'a', b'z')], + Print => &[(b' ', b'~')], + Punct => &[(b'!', b'/'), (b':', b'@'), (b'[', b'`'), (b'{', b'~')], + Space => &[ + (b'\t', b'\t'), + (b'\n', b'\n'), + (b'\x0B', b'\x0B'), + (b'\x0C', b'\x0C'), + (b'\r', b'\r'), + (b' ', b' '), + ], + Upper => &[(b'A', b'Z')], + Word => &[(b'0', b'9'), (b'A', b'Z'), (b'_', b'_'), (b'a', b'z')], + Xdigit => &[(b'0', b'9'), (b'A', b'F'), (b'a', b'f')], + }; + slice.iter().copied() +} + +fn ascii_class_as_chars( + kind: &ast::ClassAsciiKind, +) -> impl Iterator { + ascii_class(kind).map(|(s, e)| (char::from(s), char::from(e))) +} + +#[cfg(test)] +mod tests { + use crate::{ + ast::{parse::ParserBuilder, Position}, + hir::{Look, Properties}, + }; + + use super::*; + + // We create these errors to compare with real hir::Errors in the tests. + // We define equality between TestError and hir::Error to disregard the + // pattern string in hir::Error, which is annoying to provide in tests. + #[derive(Clone, Debug)] + struct TestError { + span: Span, + kind: hir::ErrorKind, + } + + impl PartialEq for TestError { + fn eq(&self, other: &hir::Error) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + impl PartialEq for hir::Error { + fn eq(&self, other: &TestError) -> bool { + self.span == other.span && self.kind == other.kind + } + } + + fn parse(pattern: &str) -> Ast { + ParserBuilder::new().octal(true).build().parse(pattern).unwrap() + } + + fn t(pattern: &str) -> Hir { + TranslatorBuilder::new() + .utf8(true) + .build() + .translate(pattern, &parse(pattern)) + .unwrap() + } + + fn t_err(pattern: &str) -> hir::Error { + TranslatorBuilder::new() + .utf8(true) + .build() + .translate(pattern, &parse(pattern)) + .unwrap_err() + } + + fn t_bytes(pattern: &str) -> Hir { + TranslatorBuilder::new() + .utf8(false) + .build() + .translate(pattern, &parse(pattern)) + .unwrap() + } + + fn props(pattern: &str) -> Properties { + t(pattern).properties().clone() + } + + fn props_bytes(pattern: &str) -> Properties { + t_bytes(pattern).properties().clone() + } + + fn hir_lit(s: &str) -> Hir { + hir_blit(s.as_bytes()) + } + + fn hir_blit(s: &[u8]) -> Hir { + Hir::literal(s) + } + + fn hir_capture(index: u32, expr: Hir) -> Hir { + Hir::capture(hir::Capture { index, name: None, sub: Box::new(expr) }) + } + + fn hir_capture_name(index: u32, name: &str, expr: Hir) -> Hir { + Hir::capture(hir::Capture { + index, + name: Some(name.into()), + sub: Box::new(expr), + }) + } + + fn hir_quest(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 0, + max: Some(1), + greedy, + sub: Box::new(expr), + }) + } + + fn hir_star(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 0, + max: None, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_plus(greedy: bool, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min: 1, + max: None, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_range(greedy: bool, min: u32, max: Option, expr: Hir) -> Hir { + Hir::repetition(hir::Repetition { + min, + max, + greedy, + sub: Box::new(expr), + }) + } + + fn hir_alt(alts: Vec) -> Hir { + Hir::alternation(alts) + } + + fn hir_cat(exprs: Vec) -> Hir { + Hir::concat(exprs) + } + + #[allow(dead_code)] + fn hir_uclass_query(query: ClassQuery<'_>) -> Hir { + Hir::class(hir::Class::Unicode(unicode::class(query).unwrap())) + } + + #[allow(dead_code)] + fn hir_uclass_perl_word() -> Hir { + Hir::class(hir::Class::Unicode(unicode::perl_word().unwrap())) + } + + fn hir_ascii_uclass(kind: &ast::ClassAsciiKind) -> Hir { + Hir::class(hir::Class::Unicode(hir::ClassUnicode::new( + ascii_class_as_chars(kind) + .map(|(s, e)| hir::ClassUnicodeRange::new(s, e)), + ))) + } + + fn hir_ascii_bclass(kind: &ast::ClassAsciiKind) -> Hir { + Hir::class(hir::Class::Bytes(hir::ClassBytes::new( + ascii_class(kind).map(|(s, e)| hir::ClassBytesRange::new(s, e)), + ))) + } + + fn hir_uclass(ranges: &[(char, char)]) -> Hir { + Hir::class(uclass(ranges)) + } + + fn hir_bclass(ranges: &[(u8, u8)]) -> Hir { + Hir::class(bclass(ranges)) + } + + fn hir_case_fold(expr: Hir) -> Hir { + match expr.into_kind() { + HirKind::Class(mut cls) => { + cls.case_fold_simple(); + Hir::class(cls) + } + _ => panic!("cannot case fold non-class Hir expr"), + } + } + + fn hir_negate(expr: Hir) -> Hir { + match expr.into_kind() { + HirKind::Class(mut cls) => { + cls.negate(); + Hir::class(cls) + } + _ => panic!("cannot negate non-class Hir expr"), + } + } + + fn uclass(ranges: &[(char, char)]) -> hir::Class { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) + .collect(); + hir::Class::Unicode(hir::ClassUnicode::new(ranges)) + } + + fn bclass(ranges: &[(u8, u8)]) -> hir::Class { + let ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassBytesRange::new(s, e)) + .collect(); + hir::Class::Bytes(hir::ClassBytes::new(ranges)) + } + + #[cfg(feature = "unicode-case")] + fn class_case_fold(mut cls: hir::Class) -> Hir { + cls.case_fold_simple(); + Hir::class(cls) + } + + fn class_negate(mut cls: hir::Class) -> Hir { + cls.negate(); + Hir::class(cls) + } + + #[allow(dead_code)] + fn hir_union(expr1: Hir, expr2: Hir) -> Hir { + use crate::hir::Class::{Bytes, Unicode}; + + match (expr1.into_kind(), expr2.into_kind()) { + (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { + c1.union(&c2); + Hir::class(hir::Class::Unicode(c1)) + } + (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { + c1.union(&c2); + Hir::class(hir::Class::Bytes(c1)) + } + _ => panic!("cannot union non-class Hir exprs"), + } + } + + #[allow(dead_code)] + fn hir_difference(expr1: Hir, expr2: Hir) -> Hir { + use crate::hir::Class::{Bytes, Unicode}; + + match (expr1.into_kind(), expr2.into_kind()) { + (HirKind::Class(Unicode(mut c1)), HirKind::Class(Unicode(c2))) => { + c1.difference(&c2); + Hir::class(hir::Class::Unicode(c1)) + } + (HirKind::Class(Bytes(mut c1)), HirKind::Class(Bytes(c2))) => { + c1.difference(&c2); + Hir::class(hir::Class::Bytes(c1)) + } + _ => panic!("cannot difference non-class Hir exprs"), + } + } + + fn hir_look(look: hir::Look) -> Hir { + Hir::look(look) + } + + #[test] + fn empty() { + assert_eq!(t(""), Hir::empty()); + assert_eq!(t("(?i)"), Hir::empty()); + assert_eq!(t("()"), hir_capture(1, Hir::empty())); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("(?P)"), hir_capture_name(1, "wat", Hir::empty())); + assert_eq!(t("|"), hir_alt(vec![Hir::empty(), Hir::empty()])); + assert_eq!( + t("()|()"), + hir_alt(vec![ + hir_capture(1, Hir::empty()), + hir_capture(2, Hir::empty()), + ]) + ); + assert_eq!( + t("(|b)"), + hir_capture(1, hir_alt(vec![Hir::empty(), hir_lit("b"),])) + ); + assert_eq!( + t("(a|)"), + hir_capture(1, hir_alt(vec![hir_lit("a"), Hir::empty(),])) + ); + assert_eq!( + t("(a||c)"), + hir_capture( + 1, + hir_alt(vec![hir_lit("a"), Hir::empty(), hir_lit("c"),]) + ) + ); + assert_eq!( + t("(||)"), + hir_capture( + 1, + hir_alt(vec![Hir::empty(), Hir::empty(), Hir::empty(),]) + ) + ); + } + + #[test] + fn literal() { + assert_eq!(t("a"), hir_lit("a")); + assert_eq!(t("(?-u)a"), hir_lit("a")); + assert_eq!(t("☃"), hir_lit("☃")); + assert_eq!(t("abcd"), hir_lit("abcd")); + + assert_eq!(t_bytes("(?-u)a"), hir_lit("a")); + assert_eq!(t_bytes("(?-u)\x61"), hir_lit("a")); + assert_eq!(t_bytes(r"(?-u)\x61"), hir_lit("a")); + assert_eq!(t_bytes(r"(?-u)\xFF"), hir_blit(b"\xFF")); + + assert_eq!(t("(?-u)☃"), hir_lit("☃")); + assert_eq!( + t_err(r"(?-u)\xFF"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(9, 1, 10) + ), + } + ); + } + + #[test] + fn literal_case_insensitive() { + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i)a"), hir_uclass(&[('A', 'A'), ('a', 'a'),])); + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i:a)"), hir_uclass(&[('A', 'A'), ('a', 'a')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("a(?i)a(?-i)a"), + hir_cat(vec![ + hir_lit("a"), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_lit("a"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)ab@c"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_uclass(&[('B', 'B'), ('b', 'b')]), + hir_lit("@"), + hir_uclass(&[('C', 'C'), ('c', 'c')]), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)β"), + hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) + ); + + assert_eq!(t("(?i-u)a"), hir_bclass(&[(b'A', b'A'), (b'a', b'a'),])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?-u)a(?i)a(?-i)a"), + hir_cat(vec![ + hir_lit("a"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("a"), + ]) + ); + assert_eq!( + t("(?i-u)ab@c"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_bclass(&[(b'B', b'B'), (b'b', b'b')]), + hir_lit("@"), + hir_bclass(&[(b'C', b'C'), (b'c', b'c')]), + ]) + ); + + assert_eq!( + t_bytes("(?i-u)a"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!( + t_bytes("(?i-u)\x61"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!( + t_bytes(r"(?i-u)\x61"), + hir_bclass(&[(b'A', b'A'), (b'a', b'a'),]) + ); + assert_eq!(t_bytes(r"(?i-u)\xFF"), hir_blit(b"\xFF")); + + assert_eq!(t("(?i-u)β"), hir_lit("β"),); + } + + #[test] + fn dot() { + assert_eq!( + t("."), + hir_uclass(&[('\0', '\t'), ('\x0B', '\u{10FFFF}')]) + ); + assert_eq!( + t("(?R)."), + hir_uclass(&[ + ('\0', '\t'), + ('\x0B', '\x0C'), + ('\x0E', '\u{10FFFF}'), + ]) + ); + assert_eq!(t("(?s)."), hir_uclass(&[('\0', '\u{10FFFF}')])); + assert_eq!(t("(?Rs)."), hir_uclass(&[('\0', '\u{10FFFF}')])); + assert_eq!( + t_bytes("(?-u)."), + hir_bclass(&[(b'\0', b'\t'), (b'\x0B', b'\xFF')]) + ); + assert_eq!( + t_bytes("(?R-u)."), + hir_bclass(&[ + (b'\0', b'\t'), + (b'\x0B', b'\x0C'), + (b'\x0E', b'\xFF'), + ]) + ); + assert_eq!(t_bytes("(?s-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); + assert_eq!(t_bytes("(?Rs-u)."), hir_bclass(&[(b'\0', b'\xFF'),])); + + // If invalid UTF-8 isn't allowed, then non-Unicode `.` isn't allowed. + assert_eq!( + t_err("(?-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(6, 1, 7) + ), + } + ); + assert_eq!( + t_err("(?R-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err("(?s-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err("(?Rs-u)."), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(7, 1, 8), + Position::new(8, 1, 9) + ), + } + ); + } + + #[test] + fn assertions() { + assert_eq!(t("^"), hir_look(hir::Look::Start)); + assert_eq!(t("$"), hir_look(hir::Look::End)); + assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); + assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); + assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); + + assert_eq!(t(r"\b"), hir_look(hir::Look::WordUnicode)); + assert_eq!(t(r"\B"), hir_look(hir::Look::WordUnicodeNegate)); + assert_eq!(t(r"(?-u)\b"), hir_look(hir::Look::WordAscii)); + assert_eq!(t(r"(?-u)\B"), hir_look(hir::Look::WordAsciiNegate)); + } + + #[test] + fn group() { + assert_eq!(t("(a)"), hir_capture(1, hir_lit("a"))); + assert_eq!( + t("(a)(b)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_capture(2, hir_lit("b")), + ]) + ); + assert_eq!( + t("(a)|(b)"), + hir_alt(vec![ + hir_capture(1, hir_lit("a")), + hir_capture(2, hir_lit("b")), + ]) + ); + assert_eq!(t("(?P)"), hir_capture_name(1, "foo", Hir::empty())); + assert_eq!(t("(?Pa)"), hir_capture_name(1, "foo", hir_lit("a"))); + assert_eq!( + t("(?Pa)(?Pb)"), + hir_cat(vec![ + hir_capture_name(1, "foo", hir_lit("a")), + hir_capture_name(2, "bar", hir_lit("b")), + ]) + ); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("(?:a)"), hir_lit("a")); + assert_eq!( + t("(?:a)(b)"), + hir_cat(vec![hir_lit("a"), hir_capture(1, hir_lit("b")),]) + ); + assert_eq!( + t("(a)(?:b)(c)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_lit("b"), + hir_capture(2, hir_lit("c")), + ]) + ); + assert_eq!( + t("(a)(?Pb)(c)"), + hir_cat(vec![ + hir_capture(1, hir_lit("a")), + hir_capture_name(2, "foo", hir_lit("b")), + hir_capture(3, hir_lit("c")), + ]) + ); + assert_eq!(t("()"), hir_capture(1, Hir::empty())); + assert_eq!(t("((?i))"), hir_capture(1, Hir::empty())); + assert_eq!(t("((?x))"), hir_capture(1, Hir::empty())); + assert_eq!( + t("(((?x)))"), + hir_capture(1, hir_capture(2, Hir::empty())) + ); + } + + #[test] + fn line_anchors() { + assert_eq!(t("^"), hir_look(hir::Look::Start)); + assert_eq!(t("$"), hir_look(hir::Look::End)); + assert_eq!(t(r"\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"\z"), hir_look(hir::Look::End)); + + assert_eq!(t(r"(?m)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?m)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?m)^"), hir_look(hir::Look::StartLF)); + assert_eq!(t("(?m)$"), hir_look(hir::Look::EndLF)); + + assert_eq!(t(r"(?R)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?R)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?R)^"), hir_look(hir::Look::Start)); + assert_eq!(t("(?R)$"), hir_look(hir::Look::End)); + + assert_eq!(t(r"(?Rm)\A"), hir_look(hir::Look::Start)); + assert_eq!(t(r"(?Rm)\z"), hir_look(hir::Look::End)); + assert_eq!(t("(?Rm)^"), hir_look(hir::Look::StartCRLF)); + assert_eq!(t("(?Rm)$"), hir_look(hir::Look::EndCRLF)); + } + + #[test] + fn flags() { + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i:a)a"), + hir_cat( + vec![hir_uclass(&[('A', 'A'), ('a', 'a')]), hir_lit("a"),] + ) + ); + assert_eq!( + t("(?i-u:a)β"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("β"), + ]) + ); + assert_eq!( + t("(?:(?i-u)a)b"), + hir_cat(vec![ + hir_bclass(&[(b'A', b'A'), (b'a', b'a')]), + hir_lit("b"), + ]) + ); + assert_eq!( + t("((?i-u)a)b"), + hir_cat(vec![ + hir_capture(1, hir_bclass(&[(b'A', b'A'), (b'a', b'a')])), + hir_lit("b"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)(?-i:a)a"), + hir_cat( + vec![hir_lit("a"), hir_uclass(&[('A', 'A'), ('a', 'a')]),] + ) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?im)a^"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::StartLF), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?im)a^(?i-m)a^"), + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::StartLF), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_look(hir::Look::Start), + ]) + ); + assert_eq!( + t("(?U)a*a*?(?-U)a*a*?"), + hir_cat(vec![ + hir_star(false, hir_lit("a")), + hir_star(true, hir_lit("a")), + hir_star(true, hir_lit("a")), + hir_star(false, hir_lit("a")), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?:a(?i)a)a"), + hir_cat(vec![ + hir_cat(vec![ + hir_lit("a"), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + ]), + hir_lit("a"), + ]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)(?:a(?-i)a)a"), + hir_cat(vec![ + hir_cat(vec![ + hir_uclass(&[('A', 'A'), ('a', 'a')]), + hir_lit("a"), + ]), + hir_uclass(&[('A', 'A'), ('a', 'a')]), + ]) + ); + } + + #[test] + fn escape() { + assert_eq!( + t(r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#"), + hir_lit(r"\.+*?()|[]{}^$#") + ); + } + + #[test] + fn repetition() { + assert_eq!(t("a?"), hir_quest(true, hir_lit("a"))); + assert_eq!(t("a*"), hir_star(true, hir_lit("a"))); + assert_eq!(t("a+"), hir_plus(true, hir_lit("a"))); + assert_eq!(t("a??"), hir_quest(false, hir_lit("a"))); + assert_eq!(t("a*?"), hir_star(false, hir_lit("a"))); + assert_eq!(t("a+?"), hir_plus(false, hir_lit("a"))); + + assert_eq!(t("a{1}"), hir_range(true, 1, Some(1), hir_lit("a"),)); + assert_eq!(t("a{1,}"), hir_range(true, 1, None, hir_lit("a"),)); + assert_eq!(t("a{1,2}"), hir_range(true, 1, Some(2), hir_lit("a"),)); + assert_eq!(t("a{1}?"), hir_range(false, 1, Some(1), hir_lit("a"),)); + assert_eq!(t("a{1,}?"), hir_range(false, 1, None, hir_lit("a"),)); + assert_eq!(t("a{1,2}?"), hir_range(false, 1, Some(2), hir_lit("a"),)); + + assert_eq!( + t("ab?"), + hir_cat(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) + ); + assert_eq!(t("(ab)?"), hir_quest(true, hir_capture(1, hir_lit("ab")))); + assert_eq!( + t("a|b?"), + hir_alt(vec![hir_lit("a"), hir_quest(true, hir_lit("b")),]) + ); + } + + #[test] + fn cat_alt() { + let a = || hir_look(hir::Look::Start); + let b = || hir_look(hir::Look::End); + let c = || hir_look(hir::Look::WordUnicode); + let d = || hir_look(hir::Look::WordUnicodeNegate); + + assert_eq!(t("(^$)"), hir_capture(1, hir_cat(vec![a(), b()]))); + assert_eq!(t("^|$"), hir_alt(vec![a(), b()])); + assert_eq!(t(r"^|$|\b"), hir_alt(vec![a(), b(), c()])); + assert_eq!( + t(r"^$|$\b|\b\B"), + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_cat(vec![b(), c()]), + hir_cat(vec![c(), d()]), + ]) + ); + assert_eq!(t("(^|$)"), hir_capture(1, hir_alt(vec![a(), b()]))); + assert_eq!( + t(r"(^|$|\b)"), + hir_capture(1, hir_alt(vec![a(), b(), c()])) + ); + assert_eq!( + t(r"(^$|$\b|\b\B)"), + hir_capture( + 1, + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_cat(vec![b(), c()]), + hir_cat(vec![c(), d()]), + ]) + ) + ); + assert_eq!( + t(r"(^$|($\b|(\b\B)))"), + hir_capture( + 1, + hir_alt(vec![ + hir_cat(vec![a(), b()]), + hir_capture( + 2, + hir_alt(vec![ + hir_cat(vec![b(), c()]), + hir_capture(3, hir_cat(vec![c(), d()])), + ]) + ), + ]) + ) + ); + } + + // Tests the HIR transformation of things like '[a-z]|[A-Z]' into + // '[A-Za-z]'. In other words, an alternation of just classes is always + // equivalent to a single class corresponding to the union of the branches + // in that class. (Unless some branches match invalid UTF-8 and others + // match non-ASCII Unicode.) + #[test] + fn cat_class_flattened() { + assert_eq!(t(r"[a-z]|[A-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); + // Combining all of the letter properties should give us the one giant + // letter property. + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"(?x) + \p{Lowercase_Letter} + |\p{Uppercase_Letter} + |\p{Titlecase_Letter} + |\p{Modifier_Letter} + |\p{Other_Letter} + "), + hir_uclass_query(ClassQuery::Binary("letter")) + ); + // Byte classes that can truly match invalid UTF-8 cannot be combined + // with Unicode classes. + assert_eq!( + t_bytes(r"[Δδ]|(?-u:[\x90-\xFF])|[Λλ]"), + hir_alt(vec![ + hir_uclass(&[('Δ', 'Δ'), ('δ', 'δ')]), + hir_bclass(&[(b'\x90', b'\xFF')]), + hir_uclass(&[('Λ', 'Λ'), ('λ', 'λ')]), + ]) + ); + // Byte classes on their own can be combined, even if some are ASCII + // and others are invalid UTF-8. + assert_eq!( + t_bytes(r"[a-z]|(?-u:[\x90-\xFF])|[A-Z]"), + hir_bclass(&[(b'A', b'Z'), (b'a', b'z'), (b'\x90', b'\xFF')]), + ); + } + + #[test] + fn class_ascii() { + assert_eq!( + t("[[:alnum:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Alnum) + ); + assert_eq!( + t("[[:alpha:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Alpha) + ); + assert_eq!( + t("[[:ascii:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Ascii) + ); + assert_eq!( + t("[[:blank:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Blank) + ); + assert_eq!( + t("[[:cntrl:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Cntrl) + ); + assert_eq!( + t("[[:digit:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t("[[:graph:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Graph) + ); + assert_eq!( + t("[[:lower:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Lower) + ); + assert_eq!( + t("[[:print:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Print) + ); + assert_eq!( + t("[[:punct:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Punct) + ); + assert_eq!( + t("[[:space:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t("[[:upper:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Upper) + ); + assert_eq!( + t("[[:word:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Word) + ); + assert_eq!( + t("[[:xdigit:]]"), + hir_ascii_uclass(&ast::ClassAsciiKind::Xdigit) + ); + + assert_eq!( + t("[[:^lower:]]"), + hir_negate(hir_ascii_uclass(&ast::ClassAsciiKind::Lower)) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[[:lower:]]"), + hir_uclass(&[ + ('A', 'Z'), + ('a', 'z'), + ('\u{17F}', '\u{17F}'), + ('\u{212A}', '\u{212A}'), + ]) + ); + + assert_eq!( + t("(?-u)[[:lower:]]"), + hir_ascii_bclass(&ast::ClassAsciiKind::Lower) + ); + assert_eq!( + t("(?i-u)[[:lower:]]"), + hir_case_fold(hir_ascii_bclass(&ast::ClassAsciiKind::Lower)) + ); + + assert_eq!( + t_err("(?-u)[[:^lower:]]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(16, 1, 17) + ), + } + ); + assert_eq!( + t_err("(?i-u)[[:^lower:]]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(7, 1, 8), + Position::new(17, 1, 18) + ), + } + ); + } + + #[test] + fn class_ascii_multiple() { + // See: https://github.com/rust-lang/regex/issues/680 + assert_eq!( + t("[[:alnum:][:^ascii:]]"), + hir_union( + hir_ascii_uclass(&ast::ClassAsciiKind::Alnum), + hir_uclass(&[('\u{80}', '\u{10FFFF}')]), + ), + ); + assert_eq!( + t_bytes("(?-u)[[:alnum:][:^ascii:]]"), + hir_union( + hir_ascii_bclass(&ast::ClassAsciiKind::Alnum), + hir_bclass(&[(0x80, 0xFF)]), + ), + ); + } + + #[test] + #[cfg(feature = "unicode-perl")] + fn class_perl_unicode() { + // Unicode + assert_eq!(t(r"\d"), hir_uclass_query(ClassQuery::Binary("digit"))); + assert_eq!(t(r"\s"), hir_uclass_query(ClassQuery::Binary("space"))); + assert_eq!(t(r"\w"), hir_uclass_perl_word()); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\d"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\s"), + hir_uclass_query(ClassQuery::Binary("space")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)\w"), hir_uclass_perl_word()); + + // Unicode, negated + assert_eq!( + t(r"\D"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + assert_eq!( + t(r"\S"), + hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) + ); + assert_eq!(t(r"\W"), hir_negate(hir_uclass_perl_word())); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\D"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\S"), + hir_negate(hir_uclass_query(ClassQuery::Binary("space"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)\W"), hir_negate(hir_uclass_perl_word())); + } + + #[test] + fn class_perl_ascii() { + // ASCII only + assert_eq!( + t(r"(?-u)\d"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t(r"(?-u)\s"), + hir_ascii_bclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t(r"(?-u)\w"), + hir_ascii_bclass(&ast::ClassAsciiKind::Word) + ); + assert_eq!( + t(r"(?i-u)\d"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t(r"(?i-u)\s"), + hir_ascii_bclass(&ast::ClassAsciiKind::Space) + ); + assert_eq!( + t(r"(?i-u)\w"), + hir_ascii_bclass(&ast::ClassAsciiKind::Word) + ); + + // ASCII only, negated + assert_eq!( + t_bytes(r"(?-u)\D"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)\S"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) + ); + assert_eq!( + t_bytes(r"(?-u)\W"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + assert_eq!( + t_bytes(r"(?i-u)\D"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?i-u)\S"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Space)) + ); + assert_eq!( + t_bytes(r"(?i-u)\W"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + + // ASCII only, negated, with UTF-8 mode enabled. + // In this case, negating any Perl class results in an error because + // all such classes can match invalid UTF-8. + assert_eq!( + t_err(r"(?-u)\D"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?-u)\S"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?-u)\W"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(7, 1, 8), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\D"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\S"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + assert_eq!( + t_err(r"(?i-u)\W"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(6, 1, 7), + Position::new(8, 1, 9), + ), + }, + ); + } + + #[test] + #[cfg(not(feature = "unicode-perl"))] + fn class_perl_word_disabled() { + assert_eq!( + t_err(r"\w"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(all(not(feature = "unicode-perl"), not(feature = "unicode-bool")))] + fn class_perl_space_disabled() { + assert_eq!( + t_err(r"\s"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(all( + not(feature = "unicode-perl"), + not(feature = "unicode-gencat") + ))] + fn class_perl_digit_disabled() { + assert_eq!( + t_err(r"\d"), + TestError { + kind: hir::ErrorKind::UnicodePerlClassNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(2, 1, 3) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn class_unicode_gencat() { + assert_eq!(t(r"\pZ"), hir_uclass_query(ClassQuery::Binary("Z"))); + assert_eq!(t(r"\pz"), hir_uclass_query(ClassQuery::Binary("Z"))); + assert_eq!( + t(r"\p{Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{se PaRa ToR}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{gc:Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{gc=Separator}"), + hir_uclass_query(ClassQuery::Binary("Z")) + ); + assert_eq!( + t(r"\p{Other}"), + hir_uclass_query(ClassQuery::Binary("Other")) + ); + assert_eq!(t(r"\pC"), hir_uclass_query(ClassQuery::Binary("Other"))); + + assert_eq!( + t(r"\PZ"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + assert_eq!( + t(r"\P{separator}"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + assert_eq!( + t(r"\P{gc!=separator}"), + hir_negate(hir_uclass_query(ClassQuery::Binary("Z"))) + ); + + assert_eq!(t(r"\p{any}"), hir_uclass_query(ClassQuery::Binary("Any"))); + assert_eq!( + t(r"\p{assigned}"), + hir_uclass_query(ClassQuery::Binary("Assigned")) + ); + assert_eq!( + t(r"\p{ascii}"), + hir_uclass_query(ClassQuery::Binary("ASCII")) + ); + assert_eq!( + t(r"\p{gc:any}"), + hir_uclass_query(ClassQuery::Binary("Any")) + ); + assert_eq!( + t(r"\p{gc:assigned}"), + hir_uclass_query(ClassQuery::Binary("Assigned")) + ); + assert_eq!( + t(r"\p{gc:ascii}"), + hir_uclass_query(ClassQuery::Binary("ASCII")) + ); + + assert_eq!( + t_err(r"(?-u)\pZ"), + TestError { + kind: hir::ErrorKind::UnicodeNotAllowed, + span: Span::new( + Position::new(5, 1, 6), + Position::new(8, 1, 9) + ), + } + ); + assert_eq!( + t_err(r"(?-u)\p{Separator}"), + TestError { + kind: hir::ErrorKind::UnicodeNotAllowed, + span: Span::new( + Position::new(5, 1, 6), + Position::new(18, 1, 19) + ), + } + ); + assert_eq!( + t_err(r"\pE"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(3, 1, 4) + ), + } + ); + assert_eq!( + t_err(r"\p{Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(7, 1, 8) + ), + } + ); + assert_eq!( + t_err(r"\p{gc:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(10, 1, 11) + ), + } + ); + } + + #[test] + #[cfg(not(feature = "unicode-gencat"))] + fn class_unicode_gencat_disabled() { + assert_eq!( + t_err(r"\p{Separator}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(13, 1, 14) + ), + } + ); + + assert_eq!( + t_err(r"\p{Any}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(7, 1, 8) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-script")] + fn class_unicode_script() { + assert_eq!( + t(r"\p{Greek}"), + hir_uclass_query(ClassQuery::Binary("Greek")) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\p{Greek}"), + hir_case_fold(hir_uclass_query(ClassQuery::Binary("Greek"))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)\P{Greek}"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "Greek" + )))) + ); + + assert_eq!( + t_err(r"\p{sc:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(10, 1, 11) + ), + } + ); + assert_eq!( + t_err(r"\p{scx:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + #[cfg(not(feature = "unicode-script"))] + fn class_unicode_script_disabled() { + assert_eq!( + t_err(r"\p{Greek}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(9, 1, 10) + ), + } + ); + + assert_eq!( + t_err(r"\p{scx:Greek}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(13, 1, 14) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-age")] + fn class_unicode_age() { + assert_eq!( + t_err(r"\p{age:Foo}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyValueNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn class_unicode_any_empty() { + assert_eq!(t(r"\P{any}"), hir_uclass(&[]),); + } + + #[test] + #[cfg(not(feature = "unicode-age"))] + fn class_unicode_age_disabled() { + assert_eq!( + t_err(r"\p{age:3.0}"), + TestError { + kind: hir::ErrorKind::UnicodePropertyNotFound, + span: Span::new( + Position::new(0, 1, 1), + Position::new(11, 1, 12) + ), + } + ); + } + + #[test] + fn class_bracketed() { + assert_eq!(t("[a]"), hir_lit("a")); + assert_eq!(t("[ab]"), hir_uclass(&[('a', 'b')])); + assert_eq!(t("[^[a]]"), class_negate(uclass(&[('a', 'a')]))); + assert_eq!(t("[a-z]"), hir_uclass(&[('a', 'z')])); + assert_eq!(t("[a-fd-h]"), hir_uclass(&[('a', 'h')])); + assert_eq!(t("[a-fg-m]"), hir_uclass(&[('a', 'm')])); + assert_eq!(t(r"[\x00]"), hir_uclass(&[('\0', '\0')])); + assert_eq!(t(r"[\n]"), hir_uclass(&[('\n', '\n')])); + assert_eq!(t("[\n]"), hir_uclass(&[('\n', '\n')])); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!(t(r"[\d]"), hir_uclass_query(ClassQuery::Binary("digit"))); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\pZ]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\p{separator}]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!(t(r"[^\D]"), hir_uclass_query(ClassQuery::Binary("digit"))); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\PZ]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\P{separator}]"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + #[cfg(all( + feature = "unicode-case", + any(feature = "unicode-perl", feature = "unicode-gencat") + ))] + assert_eq!( + t(r"(?i)[^\D]"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[^\P{greek}]"), + hir_case_fold(hir_uclass_query(ClassQuery::Binary("greek"))) + ); + + assert_eq!(t("(?-u)[a]"), hir_bclass(&[(b'a', b'a')])); + assert_eq!(t(r"(?-u)[\x00]"), hir_bclass(&[(b'\0', b'\0')])); + assert_eq!(t_bytes(r"(?-u)[\xFF]"), hir_bclass(&[(b'\xFF', b'\xFF')])); + + #[cfg(feature = "unicode-case")] + assert_eq!(t("(?i)[a]"), hir_uclass(&[('A', 'A'), ('a', 'a')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[k]"), + hir_uclass(&[('K', 'K'), ('k', 'k'), ('\u{212A}', '\u{212A}'),]) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[β]"), + hir_uclass(&[('Β', 'Β'), ('β', 'β'), ('ϐ', 'ϐ'),]) + ); + assert_eq!(t("(?i-u)[k]"), hir_bclass(&[(b'K', b'K'), (b'k', b'k'),])); + + assert_eq!(t("[^a]"), class_negate(uclass(&[('a', 'a')]))); + assert_eq!(t(r"[^\x00]"), class_negate(uclass(&[('\0', '\0')]))); + assert_eq!( + t_bytes("(?-u)[^a]"), + class_negate(bclass(&[(b'a', b'a')])) + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-gencat"))] + assert_eq!( + t(r"[^\d]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\pZ]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) + ); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[^\p{separator}]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("separator"))) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[^\p{greek}]"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "greek" + )))) + ); + #[cfg(all(feature = "unicode-case", feature = "unicode-script"))] + assert_eq!( + t(r"(?i)[\P{greek}]"), + hir_negate(hir_case_fold(hir_uclass_query(ClassQuery::Binary( + "greek" + )))) + ); + + // Test some weird cases. + assert_eq!(t(r"[\[]"), hir_uclass(&[('[', '[')])); + + assert_eq!(t(r"[&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&\&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\x00-&]"), hir_uclass(&[('\0', '&')])); + assert_eq!(t(r"[&-\xFF]"), hir_uclass(&[('&', '\u{FF}')])); + + assert_eq!(t(r"[~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\~\~]"), hir_uclass(&[('~', '~')])); + assert_eq!(t(r"[\x00-~]"), hir_uclass(&[('\0', '~')])); + assert_eq!(t(r"[~-\xFF]"), hir_uclass(&[('~', '\u{FF}')])); + + assert_eq!(t(r"[-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\-\-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\x00-\-]"), hir_uclass(&[('\0', '-')])); + assert_eq!(t(r"[\--\xFF]"), hir_uclass(&[('-', '\u{FF}')])); + + assert_eq!( + t_err("(?-u)[^a]"), + TestError { + kind: hir::ErrorKind::InvalidUtf8, + span: Span::new( + Position::new(5, 1, 6), + Position::new(9, 1, 10) + ), + } + ); + #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] + assert_eq!(t(r"[^\s\S]"), hir_uclass(&[]),); + #[cfg(any(feature = "unicode-perl", feature = "unicode-bool"))] + assert_eq!(t_bytes(r"(?-u)[^\s\S]"), hir_bclass(&[]),); + } + + #[test] + fn class_bracketed_union() { + assert_eq!(t("[a-zA-Z]"), hir_uclass(&[('A', 'Z'), ('a', 'z')])); + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[a\pZb]"), + hir_union( + hir_uclass(&[('a', 'b')]), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ); + #[cfg(all(feature = "unicode-gencat", feature = "unicode-script"))] + assert_eq!( + t(r"[\pZ\p{Greek}]"), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[\p{age:3.0}\pZ\p{Greek}]"), + hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[[[\p{age:3.0}\pZ]\p{Greek}][\p{Cyrillic}]]"), + hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("cyrillic")), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ) + ) + ); + + #[cfg(all( + feature = "unicode-age", + feature = "unicode-case", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"(?i)[\p{age:3.0}\pZ\p{Greek}]"), + hir_case_fold(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + )) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"[^\p{age:3.0}\pZ\p{Greek}]"), + hir_negate(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + )) + ); + #[cfg(all( + feature = "unicode-age", + feature = "unicode-case", + feature = "unicode-gencat", + feature = "unicode-script" + ))] + assert_eq!( + t(r"(?i)[^\p{age:3.0}\pZ\p{Greek}]"), + hir_negate(hir_case_fold(hir_union( + hir_uclass_query(ClassQuery::ByValue { + property_name: "age", + property_value: "3.0", + }), + hir_union( + hir_uclass_query(ClassQuery::Binary("greek")), + hir_uclass_query(ClassQuery::Binary("separator")) + ) + ))) + ); + } + + #[test] + fn class_bracketed_nested() { + assert_eq!(t(r"[a[^c]]"), class_negate(uclass(&[('c', 'c')]))); + assert_eq!(t(r"[a-b[^c]]"), class_negate(uclass(&[('c', 'c')]))); + assert_eq!(t(r"[a-c[^c]]"), class_negate(uclass(&[]))); + + assert_eq!(t(r"[^a[^c]]"), hir_uclass(&[('c', 'c')])); + assert_eq!(t(r"[^a-b[^c]]"), hir_uclass(&[('c', 'c')])); + + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[a[^c]]"), + hir_negate(class_case_fold(uclass(&[('c', 'c')]))) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[a-b[^c]]"), + hir_negate(class_case_fold(uclass(&[('c', 'c')]))) + ); + + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)[^a[^c]]"), hir_uclass(&[('C', 'C'), ('c', 'c')])); + #[cfg(feature = "unicode-case")] + assert_eq!( + t(r"(?i)[^a-b[^c]]"), + hir_uclass(&[('C', 'C'), ('c', 'c')]) + ); + + assert_eq!(t(r"[^a-c[^c]]"), hir_uclass(&[]),); + #[cfg(feature = "unicode-case")] + assert_eq!(t(r"(?i)[^a-c[^c]]"), hir_uclass(&[]),); + } + + #[test] + fn class_bracketed_intersect() { + assert_eq!(t("[abc&&b-c]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[abc&&[b-c]]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[[abc]&&[b-c]]"), hir_uclass(&[('b', 'c')])); + assert_eq!(t("[a-z&&b-y&&c-x]"), hir_uclass(&[('c', 'x')])); + assert_eq!(t("[c-da-b&&a-d]"), hir_uclass(&[('a', 'd')])); + assert_eq!(t("[a-d&&c-da-b]"), hir_uclass(&[('a', 'd')])); + assert_eq!(t(r"[a-z&&a-c]"), hir_uclass(&[('a', 'c')])); + assert_eq!(t(r"[[a-z&&a-c]]"), hir_uclass(&[('a', 'c')])); + assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); + + assert_eq!(t("(?-u)[abc&&b-c]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[abc&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[[abc]&&[b-c]]"), hir_bclass(&[(b'b', b'c')])); + assert_eq!(t("(?-u)[a-z&&b-y&&c-x]"), hir_bclass(&[(b'c', b'x')])); + assert_eq!(t("(?-u)[c-da-b&&a-d]"), hir_bclass(&[(b'a', b'd')])); + assert_eq!(t("(?-u)[a-d&&c-da-b]"), hir_bclass(&[(b'a', b'd')])); + + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[abc&&b-c]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[abc&&[b-c]]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[[abc]&&[b-c]]"), + hir_case_fold(hir_uclass(&[('b', 'c')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[a-z&&b-y&&c-x]"), + hir_case_fold(hir_uclass(&[('c', 'x')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[c-da-b&&a-d]"), + hir_case_fold(hir_uclass(&[('a', 'd')])) + ); + #[cfg(feature = "unicode-case")] + assert_eq!( + t("(?i)[a-d&&c-da-b]"), + hir_case_fold(hir_uclass(&[('a', 'd')])) + ); + + assert_eq!( + t("(?i-u)[abc&&b-c]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[abc&&[b-c]]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[[abc]&&[b-c]]"), + hir_case_fold(hir_bclass(&[(b'b', b'c')])) + ); + assert_eq!( + t("(?i-u)[a-z&&b-y&&c-x]"), + hir_case_fold(hir_bclass(&[(b'c', b'x')])) + ); + assert_eq!( + t("(?i-u)[c-da-b&&a-d]"), + hir_case_fold(hir_bclass(&[(b'a', b'd')])) + ); + assert_eq!( + t("(?i-u)[a-d&&c-da-b]"), + hir_case_fold(hir_bclass(&[(b'a', b'd')])) + ); + + // In `[a^]`, `^` does not need to be escaped, so it makes sense that + // `^` is also allowed to be unescaped after `&&`. + assert_eq!(t(r"[\^&&^]"), hir_uclass(&[('^', '^')])); + // `]` needs to be escaped after `&&` since it's not at start of class. + assert_eq!(t(r"[]&&\]]"), hir_uclass(&[(']', ']')])); + assert_eq!(t(r"[-&&-]"), hir_uclass(&[('-', '-')])); + assert_eq!(t(r"[\&&&&]"), hir_uclass(&[('&', '&')])); + assert_eq!(t(r"[\&&&\&]"), hir_uclass(&[('&', '&')])); + // Test precedence. + assert_eq!( + t(r"[a-w&&[^c-g]z]"), + hir_uclass(&[('a', 'b'), ('h', 'w')]) + ); + } + + #[test] + fn class_bracketed_intersect_negate() { + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^\w&&\d]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + assert_eq!(t(r"[^[a-z&&a-c]]"), hir_negate(hir_uclass(&[('a', 'c')]))); + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^[\w&&\d]]"), + hir_negate(hir_uclass_query(ClassQuery::Binary("digit"))) + ); + #[cfg(feature = "unicode-perl")] + assert_eq!( + t(r"[^[^\w&&\d]]"), + hir_uclass_query(ClassQuery::Binary("digit")) + ); + #[cfg(feature = "unicode-perl")] + assert_eq!(t(r"[[[^\w]&&[^\d]]]"), hir_negate(hir_uclass_perl_word())); + + #[cfg(feature = "unicode-perl")] + assert_eq!( + t_bytes(r"(?-u)[^\w&&\d]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)[^[a-z&&a-c]]"), + hir_negate(hir_bclass(&[(b'a', b'c')])) + ); + assert_eq!( + t_bytes(r"(?-u)[^[\w&&\d]]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Digit)) + ); + assert_eq!( + t_bytes(r"(?-u)[^[^\w&&\d]]"), + hir_ascii_bclass(&ast::ClassAsciiKind::Digit) + ); + assert_eq!( + t_bytes(r"(?-u)[[[^\w]&&[^\d]]]"), + hir_negate(hir_ascii_bclass(&ast::ClassAsciiKind::Word)) + ); + } + + #[test] + fn class_bracketed_difference() { + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"[\pL--[:ascii:]]"), + hir_difference( + hir_uclass_query(ClassQuery::Binary("letter")), + hir_uclass(&[('\0', '\x7F')]) + ) + ); + + assert_eq!( + t(r"(?-u)[[:alpha:]--[:lower:]]"), + hir_bclass(&[(b'A', b'Z')]) + ); + } + + #[test] + fn class_bracketed_symmetric_difference() { + #[cfg(feature = "unicode-script")] + assert_eq!( + t(r"[\p{sc:Greek}~~\p{scx:Greek}]"), + // Class({ + // '·'..='·', + // '\u{300}'..='\u{301}', + // '\u{304}'..='\u{304}', + // '\u{306}'..='\u{306}', + // '\u{308}'..='\u{308}', + // '\u{313}'..='\u{313}', + // '\u{342}'..='\u{342}', + // '\u{345}'..='\u{345}', + // 'ʹ'..='ʹ', + // '\u{1dc0}'..='\u{1dc1}', + // '⁝'..='⁝', + // }) + hir_uclass(&[ + ('·', '·'), + ('\u{0300}', '\u{0301}'), + ('\u{0304}', '\u{0304}'), + ('\u{0306}', '\u{0306}'), + ('\u{0308}', '\u{0308}'), + ('\u{0313}', '\u{0313}'), + ('\u{0342}', '\u{0342}'), + ('\u{0345}', '\u{0345}'), + ('ʹ', 'ʹ'), + ('\u{1DC0}', '\u{1DC1}'), + ('⁝', '⁝'), + ]) + ); + assert_eq!(t(r"[a-g~~c-j]"), hir_uclass(&[('a', 'b'), ('h', 'j')])); + + assert_eq!( + t(r"(?-u)[a-g~~c-j]"), + hir_bclass(&[(b'a', b'b'), (b'h', b'j')]) + ); + } + + #[test] + fn ignore_whitespace() { + assert_eq!(t(r"(?x)\12 3"), hir_lit("\n3")); + assert_eq!(t(r"(?x)\x { 53 }"), hir_lit("S")); + assert_eq!( + t(r"(?x)\x # comment +{ # comment + 53 # comment +} #comment"), + hir_lit("S") + ); + + assert_eq!(t(r"(?x)\x 53"), hir_lit("S")); + assert_eq!( + t(r"(?x)\x # comment + 53 # comment"), + hir_lit("S") + ); + assert_eq!(t(r"(?x)\x5 3"), hir_lit("S")); + + #[cfg(feature = "unicode-gencat")] + assert_eq!( + t(r"(?x)\p # comment +{ # comment + Separator # comment +} # comment"), + hir_uclass_query(ClassQuery::Binary("separator")) + ); + + assert_eq!( + t(r"(?x)a # comment +{ # comment + 5 # comment + , # comment + 10 # comment +} # comment"), + hir_range(true, 5, Some(10), hir_lit("a")) + ); + + assert_eq!(t(r"(?x)a\ # hi there"), hir_lit("a ")); + } + + #[test] + fn analysis_is_utf8() { + // Positive examples. + assert!(props_bytes(r"a").is_utf8()); + assert!(props_bytes(r"ab").is_utf8()); + assert!(props_bytes(r"(?-u)a").is_utf8()); + assert!(props_bytes(r"(?-u)ab").is_utf8()); + assert!(props_bytes(r"\xFF").is_utf8()); + assert!(props_bytes(r"\xFF\xFF").is_utf8()); + assert!(props_bytes(r"[^a]").is_utf8()); + assert!(props_bytes(r"[^a][^a]").is_utf8()); + assert!(props_bytes(r"\b").is_utf8()); + assert!(props_bytes(r"\B").is_utf8()); + assert!(props_bytes(r"(?-u)\b").is_utf8()); + assert!(props_bytes(r"(?-u)\B").is_utf8()); + + // Negative examples. + assert!(!props_bytes(r"(?-u)\xFF").is_utf8()); + assert!(!props_bytes(r"(?-u)\xFF\xFF").is_utf8()); + assert!(!props_bytes(r"(?-u)[^a]").is_utf8()); + assert!(!props_bytes(r"(?-u)[^a][^a]").is_utf8()); + } + + #[test] + fn analysis_captures_len() { + assert_eq!(0, props(r"a").explicit_captures_len()); + assert_eq!(0, props(r"(?:a)").explicit_captures_len()); + assert_eq!(0, props(r"(?i-u:a)").explicit_captures_len()); + assert_eq!(0, props(r"(?i-u)a").explicit_captures_len()); + assert_eq!(1, props(r"(a)").explicit_captures_len()); + assert_eq!(1, props(r"(?Pa)").explicit_captures_len()); + assert_eq!(1, props(r"()").explicit_captures_len()); + assert_eq!(1, props(r"()a").explicit_captures_len()); + assert_eq!(1, props(r"(a)+").explicit_captures_len()); + assert_eq!(2, props(r"(a)(b)").explicit_captures_len()); + assert_eq!(2, props(r"(a)|(b)").explicit_captures_len()); + assert_eq!(2, props(r"((a))").explicit_captures_len()); + assert_eq!(1, props(r"([a&&b])").explicit_captures_len()); + } + + #[test] + fn analysis_static_captures_len() { + let len = |pattern| props(pattern).static_explicit_captures_len(); + assert_eq!(Some(0), len(r"")); + assert_eq!(Some(0), len(r"foo|bar")); + assert_eq!(None, len(r"(foo)|bar")); + assert_eq!(None, len(r"foo|(bar)")); + assert_eq!(Some(1), len(r"(foo|bar)")); + assert_eq!(Some(1), len(r"(a|b|c|d|e|f)")); + assert_eq!(Some(1), len(r"(a)|(b)|(c)|(d)|(e)|(f)")); + assert_eq!(Some(2), len(r"(a)(b)|(c)(d)|(e)(f)")); + assert_eq!(Some(6), len(r"(a)(b)(c)(d)(e)(f)")); + assert_eq!(Some(3), len(r"(a)(b)(extra)|(a)(b)()")); + assert_eq!(Some(3), len(r"(a)(b)((?:extra)?)")); + assert_eq!(None, len(r"(a)(b)(extra)?")); + assert_eq!(Some(1), len(r"(foo)|(bar)")); + assert_eq!(Some(2), len(r"(foo)(bar)")); + assert_eq!(Some(2), len(r"(foo)+(bar)")); + assert_eq!(None, len(r"(foo)*(bar)")); + assert_eq!(Some(0), len(r"(foo)?{0}")); + assert_eq!(None, len(r"(foo)?{1}")); + assert_eq!(Some(1), len(r"(foo){1}")); + assert_eq!(Some(1), len(r"(foo){1,}")); + assert_eq!(Some(1), len(r"(foo){1,}?")); + assert_eq!(None, len(r"(foo){1,}??")); + assert_eq!(None, len(r"(foo){0,}")); + assert_eq!(Some(1), len(r"(foo)(?:bar)")); + assert_eq!(Some(2), len(r"(foo(?:bar)+)(?:baz(boo))")); + assert_eq!(Some(2), len(r"(?Pfoo)(?:bar)(bal|loon)")); + assert_eq!( + Some(2), + len(r#"<(a)[^>]+href="([^"]+)"|<(img)[^>]+src="([^"]+)""#) + ); + } + + #[test] + fn analysis_is_all_assertions() { + // Positive examples. + let p = props(r"\b"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\A"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"\z"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$^\z\A\b\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"$|^|\z|\A|\b|\B"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"^$|$^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + let p = props(r"((\b)+())*^"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(0)); + + // Negative examples. + let p = props(r"^a"); + assert!(!p.look_set().is_empty()); + assert_eq!(p.minimum_len(), Some(1)); + } + + #[test] + fn analysis_look_set_prefix_any() { + let p = props(r"(?-u)(?i:(?:\b|_)win(?:32|64|dows)?(?:\b|_))"); + assert!(p.look_set_prefix_any().contains(Look::WordAscii)); + } + + #[test] + fn analysis_is_anchored() { + let is_start = |p| props(p).look_set_prefix().contains(Look::Start); + let is_end = |p| props(p).look_set_suffix().contains(Look::End); + + // Positive examples. + assert!(is_start(r"^")); + assert!(is_end(r"$")); + + assert!(is_start(r"^^")); + assert!(props(r"$$").look_set_suffix().contains(Look::End)); + + assert!(is_start(r"^$")); + assert!(is_end(r"^$")); + + assert!(is_start(r"^foo")); + assert!(is_end(r"foo$")); + + assert!(is_start(r"^foo|^bar")); + assert!(is_end(r"foo$|bar$")); + + assert!(is_start(r"^(foo|bar)")); + assert!(is_end(r"(foo|bar)$")); + + assert!(is_start(r"^+")); + assert!(is_end(r"$+")); + assert!(is_start(r"^++")); + assert!(is_end(r"$++")); + assert!(is_start(r"(^)+")); + assert!(is_end(r"($)+")); + + assert!(is_start(r"$^")); + assert!(is_start(r"$^")); + assert!(is_start(r"$^|^$")); + assert!(is_end(r"$^|^$")); + + assert!(is_start(r"\b^")); + assert!(is_end(r"$\b")); + assert!(is_start(r"^(?m:^)")); + assert!(is_end(r"(?m:$)$")); + assert!(is_start(r"(?m:^)^")); + assert!(is_end(r"$(?m:$)")); + + // Negative examples. + assert!(!is_start(r"(?m)^")); + assert!(!is_end(r"(?m)$")); + assert!(!is_start(r"(?m:^$)|$^")); + assert!(!is_end(r"(?m:^$)|$^")); + assert!(!is_start(r"$^|(?m:^$)")); + assert!(!is_end(r"$^|(?m:^$)")); + + assert!(!is_start(r"a^")); + assert!(!is_start(r"$a")); + + assert!(!is_end(r"a^")); + assert!(!is_end(r"$a")); + + assert!(!is_start(r"^foo|bar")); + assert!(!is_end(r"foo|bar$")); + + assert!(!is_start(r"^*")); + assert!(!is_end(r"$*")); + assert!(!is_start(r"^*+")); + assert!(!is_end(r"$*+")); + assert!(!is_start(r"^+*")); + assert!(!is_end(r"$+*")); + assert!(!is_start(r"(^)*")); + assert!(!is_end(r"($)*")); + } + + #[test] + fn analysis_is_any_anchored() { + let is_start = |p| props(p).look_set().contains(Look::Start); + let is_end = |p| props(p).look_set().contains(Look::End); + + // Positive examples. + assert!(is_start(r"^")); + assert!(is_end(r"$")); + assert!(is_start(r"\A")); + assert!(is_end(r"\z")); + + // Negative examples. + assert!(!is_start(r"(?m)^")); + assert!(!is_end(r"(?m)$")); + assert!(!is_start(r"$")); + assert!(!is_end(r"^")); + } + + #[test] + fn analysis_can_empty() { + // Positive examples. + let assert_empty = + |p| assert_eq!(Some(0), props_bytes(p).minimum_len()); + assert_empty(r""); + assert_empty(r"()"); + assert_empty(r"()*"); + assert_empty(r"()+"); + assert_empty(r"()?"); + assert_empty(r"a*"); + assert_empty(r"a?"); + assert_empty(r"a{0}"); + assert_empty(r"a{0,}"); + assert_empty(r"a{0,1}"); + assert_empty(r"a{0,10}"); + #[cfg(feature = "unicode-gencat")] + assert_empty(r"\pL*"); + assert_empty(r"a*|b"); + assert_empty(r"b|a*"); + assert_empty(r"a|"); + assert_empty(r"|a"); + assert_empty(r"a||b"); + assert_empty(r"a*a?(abcd)*"); + assert_empty(r"^"); + assert_empty(r"$"); + assert_empty(r"(?m)^"); + assert_empty(r"(?m)$"); + assert_empty(r"\A"); + assert_empty(r"\z"); + assert_empty(r"\B"); + assert_empty(r"(?-u)\B"); + assert_empty(r"\b"); + assert_empty(r"(?-u)\b"); + + // Negative examples. + let assert_non_empty = + |p| assert_ne!(Some(0), props_bytes(p).minimum_len()); + assert_non_empty(r"a+"); + assert_non_empty(r"a{1}"); + assert_non_empty(r"a{1,}"); + assert_non_empty(r"a{1,2}"); + assert_non_empty(r"a{1,10}"); + assert_non_empty(r"b|a"); + assert_non_empty(r"a*a+(abcd)*"); + #[cfg(feature = "unicode-gencat")] + assert_non_empty(r"\P{any}"); + assert_non_empty(r"[a--a]"); + assert_non_empty(r"[a&&b]"); + } + + #[test] + fn analysis_is_literal() { + // Positive examples. + assert!(props(r"a").is_literal()); + assert!(props(r"ab").is_literal()); + assert!(props(r"abc").is_literal()); + assert!(props(r"(?m)abc").is_literal()); + assert!(props(r"(?:a)").is_literal()); + assert!(props(r"foo(?:a)").is_literal()); + assert!(props(r"(?:a)foo").is_literal()); + assert!(props(r"[a]").is_literal()); + + // Negative examples. + assert!(!props(r"").is_literal()); + assert!(!props(r"^").is_literal()); + assert!(!props(r"a|b").is_literal()); + assert!(!props(r"(a)").is_literal()); + assert!(!props(r"a+").is_literal()); + assert!(!props(r"foo(a)").is_literal()); + assert!(!props(r"(a)foo").is_literal()); + assert!(!props(r"[ab]").is_literal()); + } + + #[test] + fn analysis_is_alternation_literal() { + // Positive examples. + assert!(props(r"a").is_alternation_literal()); + assert!(props(r"ab").is_alternation_literal()); + assert!(props(r"abc").is_alternation_literal()); + assert!(props(r"(?m)abc").is_alternation_literal()); + assert!(props(r"foo|bar").is_alternation_literal()); + assert!(props(r"foo|bar|baz").is_alternation_literal()); + assert!(props(r"[a]").is_alternation_literal()); + assert!(props(r"(?:ab)|cd").is_alternation_literal()); + assert!(props(r"ab|(?:cd)").is_alternation_literal()); + + // Negative examples. + assert!(!props(r"").is_alternation_literal()); + assert!(!props(r"^").is_alternation_literal()); + assert!(!props(r"(a)").is_alternation_literal()); + assert!(!props(r"a+").is_alternation_literal()); + assert!(!props(r"foo(a)").is_alternation_literal()); + assert!(!props(r"(a)foo").is_alternation_literal()); + assert!(!props(r"[ab]").is_alternation_literal()); + assert!(!props(r"[ab]|b").is_alternation_literal()); + assert!(!props(r"a|[ab]").is_alternation_literal()); + assert!(!props(r"(a)|b").is_alternation_literal()); + assert!(!props(r"a|(b)").is_alternation_literal()); + assert!(!props(r"a|b").is_alternation_literal()); + assert!(!props(r"a|b|c").is_alternation_literal()); + assert!(!props(r"[a]|b").is_alternation_literal()); + assert!(!props(r"a|[b]").is_alternation_literal()); + assert!(!props(r"(?:a)|b").is_alternation_literal()); + assert!(!props(r"a|(?:b)").is_alternation_literal()); + assert!(!props(r"(?:z|xx)@|xx").is_alternation_literal()); + } + + // This tests that the smart Hir::repetition constructors does some basic + // simplifications. + #[test] + fn smart_repetition() { + assert_eq!(t(r"a{0}"), Hir::empty()); + assert_eq!(t(r"a{1}"), hir_lit("a")); + assert_eq!(t(r"\B{32111}"), hir_look(hir::Look::WordUnicodeNegate)); + } + + // This tests that the smart Hir::concat constructor simplifies the given + // exprs in a way we expect. + #[test] + fn smart_concat() { + assert_eq!(t(""), Hir::empty()); + assert_eq!(t("(?:)"), Hir::empty()); + assert_eq!(t("abc"), hir_lit("abc")); + assert_eq!(t("(?:foo)(?:bar)"), hir_lit("foobar")); + assert_eq!(t("quux(?:foo)(?:bar)baz"), hir_lit("quuxfoobarbaz")); + assert_eq!( + t("foo(?:bar^baz)quux"), + hir_cat(vec![ + hir_lit("foobar"), + hir_look(hir::Look::Start), + hir_lit("bazquux"), + ]) + ); + assert_eq!( + t("foo(?:ba(?:r^b)az)quux"), + hir_cat(vec![ + hir_lit("foobar"), + hir_look(hir::Look::Start), + hir_lit("bazquux"), + ]) + ); + } + + // This tests that the smart Hir::alternation constructor simplifies the + // given exprs in a way we expect. + #[test] + fn smart_alternation() { + assert_eq!( + t("(?:foo)|(?:bar)"), + hir_alt(vec![hir_lit("foo"), hir_lit("bar")]) + ); + assert_eq!( + t("quux|(?:abc|def|xyz)|baz"), + hir_alt(vec![ + hir_lit("quux"), + hir_lit("abc"), + hir_lit("def"), + hir_lit("xyz"), + hir_lit("baz"), + ]) + ); + assert_eq!( + t("quux|(?:abc|(?:def|mno)|xyz)|baz"), + hir_alt(vec![ + hir_lit("quux"), + hir_lit("abc"), + hir_lit("def"), + hir_lit("mno"), + hir_lit("xyz"), + hir_lit("baz"), + ]) + ); + assert_eq!( + t("a|b|c|d|e|f|x|y|z"), + hir_uclass(&[('a', 'f'), ('x', 'z')]), + ); + // Tests that we lift common prefixes out of an alternation. + assert_eq!( + t("[A-Z]foo|[A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_lit("foo"), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z][A-Z]|[A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_uclass(&[('A', 'Z')]), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z][A-Z]|[A-Z][A-Z]quux"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![Hir::empty(), hir_lit("quux")]), + ]), + ); + assert_eq!( + t("[A-Z]foo|[A-Z]foobar"), + hir_cat(vec![ + hir_uclass(&[('A', 'Z')]), + hir_alt(vec![hir_lit("foo"), hir_lit("foobar")]), + ]), + ); + } + + #[test] + fn regression_alt_empty_concat() { + use crate::ast::{self, Ast}; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::alternation(ast::Alternation { + span, + asts: vec![Ast::concat(ast::Concat { span, asts: vec![] })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::empty()), t.translate("", &ast)); + } + + #[test] + fn regression_empty_alt() { + use crate::ast::{self, Ast}; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::concat(ast::Concat { + span, + asts: vec![Ast::alternation(ast::Alternation { + span, + asts: vec![], + })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::fail()), t.translate("", &ast)); + } + + #[test] + fn regression_singleton_alt() { + use crate::{ + ast::{self, Ast}, + hir::Dot, + }; + + let span = Span::splat(Position::new(0, 0, 0)); + let ast = Ast::concat(ast::Concat { + span, + asts: vec![Ast::alternation(ast::Alternation { + span, + asts: vec![Ast::dot(span)], + })], + }); + + let mut t = Translator::new(); + assert_eq!(Ok(Hir::dot(Dot::AnyCharExceptLF)), t.translate("", &ast)); + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63168 + #[test] + fn regression_fuzz_match() { + let pat = "[(\u{6} \0-\u{afdf5}] \0 "; + let ast = ParserBuilder::new() + .octal(false) + .ignore_whitespace(true) + .build() + .parse(pat) + .unwrap(); + let hir = TranslatorBuilder::new() + .utf8(true) + .case_insensitive(false) + .multi_line(false) + .dot_matches_new_line(false) + .swap_greed(true) + .unicode(true) + .build() + .translate(pat, &ast) + .unwrap(); + assert_eq!( + hir, + Hir::concat(vec![ + hir_uclass(&[('\0', '\u{afdf5}')]), + hir_lit("\0"), + ]) + ); + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63155 + #[cfg(feature = "unicode")] + #[test] + fn regression_fuzz_difference1() { + let pat = r"\W\W|\W[^\v--\W\W\P{Script_Extensions:Pau_Cin_Hau}\u10A1A1-\U{3E3E3}--~~~~--~~~~~~~~------~~~~~~--~~~~~~]*"; + let _ = t(pat); // shouldn't panic + } + + // See: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=63153 + #[test] + fn regression_fuzz_char_decrement1() { + let pat = "w[w[^w?\rw\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\r\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0*\0\0\u{1}\0]\0\0-*\0][^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w?\rw[^w\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0\0\0\0\0\0\0\0x\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\0\0*??\0\u{7f}{2}\u{10}??\0\0\0\0\0\0\0\0\0\u{3}\0\0\0}\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\u{1}\0]\0\0-*\0]\0\0\0\0\0\0\0\u{1}\0]\0\u{1}\u{1}H-i]-]\0\0\0\0\u{1}\0]\0\0\0\u{1}\0]\0\0-*\0\0\0\0\u{1}9-\u{7f}]\0'|-\u{7f}]\0'|(?i-ux)[-\u{7f}]\0'\u{3}\0\0\0}\0-*\0] Result; + + /// This method is called before beginning traversal of the HIR. + fn start(&mut self) {} + + /// This method is called on an `Hir` before descending into child `Hir` + /// nodes. + fn visit_pre(&mut self, _hir: &Hir) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called on an `Hir` after descending all of its child + /// `Hir` nodes. + fn visit_post(&mut self, _hir: &Hir) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of an alternation. + fn visit_alternation_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } + + /// This method is called between child nodes of a concatenation. + fn visit_concat_in(&mut self) -> Result<(), Self::Err> { + Ok(()) + } +} + +/// Executes an implementation of `Visitor` in constant stack space. +/// +/// This function will visit every node in the given `Hir` while calling +/// appropriate methods provided by the [`Visitor`] trait. +/// +/// The primary use case for this method is when one wants to perform case +/// analysis over an `Hir` without using a stack size proportional to the depth +/// of the `Hir`. Namely, this method will instead use constant stack space, +/// but will use heap space proportional to the size of the `Hir`. This may be +/// desirable in cases where the size of `Hir` is proportional to end user +/// input. +/// +/// If the visitor returns an error at any point, then visiting is stopped and +/// the error is returned. +pub fn visit(hir: &Hir, visitor: V) -> Result { + HeapVisitor::new().visit(hir, visitor) +} + +/// HeapVisitor visits every item in an `Hir` recursively using constant stack +/// size and a heap size proportional to the size of the `Hir`. +struct HeapVisitor<'a> { + /// A stack of `Hir` nodes. This is roughly analogous to the call stack + /// used in a typical recursive visitor. + stack: Vec<(&'a Hir, Frame<'a>)>, +} + +/// Represents a single stack frame while performing structural induction over +/// an `Hir`. +enum Frame<'a> { + /// A stack frame allocated just before descending into a repetition + /// operator's child node. + Repetition(&'a hir::Repetition), + /// A stack frame allocated just before descending into a capture's child + /// node. + Capture(&'a hir::Capture), + /// The stack frame used while visiting every child node of a concatenation + /// of expressions. + Concat { + /// The child node we are currently visiting. + head: &'a Hir, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Hir], + }, + /// The stack frame used while visiting every child node of an alternation + /// of expressions. + Alternation { + /// The child node we are currently visiting. + head: &'a Hir, + /// The remaining child nodes to visit (which may be empty). + tail: &'a [Hir], + }, +} + +impl<'a> HeapVisitor<'a> { + fn new() -> HeapVisitor<'a> { + HeapVisitor { stack: vec![] } + } + + fn visit( + &mut self, + mut hir: &'a Hir, + mut visitor: V, + ) -> Result { + self.stack.clear(); + + visitor.start(); + loop { + visitor.visit_pre(hir)?; + if let Some(x) = self.induct(hir) { + let child = x.child(); + self.stack.push((hir, x)); + hir = child; + continue; + } + // No induction means we have a base case, so we can post visit + // it now. + visitor.visit_post(hir)?; + + // At this point, we now try to pop our call stack until it is + // either empty or we hit another inductive case. + loop { + let (post_hir, frame) = match self.stack.pop() { + None => return visitor.finish(), + Some((post_hir, frame)) => (post_hir, frame), + }; + // If this is a concat/alternate, then we might have additional + // inductive steps to process. + if let Some(x) = self.pop(frame) { + match x { + Frame::Alternation { .. } => { + visitor.visit_alternation_in()?; + } + Frame::Concat { .. } => { + visitor.visit_concat_in()?; + } + _ => {} + } + hir = x.child(); + self.stack.push((post_hir, x)); + break; + } + // Otherwise, we've finished visiting all the child nodes for + // this HIR, so we can post visit it now. + visitor.visit_post(post_hir)?; + } + } + } + + /// Build a stack frame for the given HIR if one is needed (which occurs if + /// and only if there are child nodes in the HIR). Otherwise, return None. + fn induct(&mut self, hir: &'a Hir) -> Option> { + match *hir.kind() { + HirKind::Repetition(ref x) => Some(Frame::Repetition(x)), + HirKind::Capture(ref x) => Some(Frame::Capture(x)), + HirKind::Concat(ref x) if x.is_empty() => None, + HirKind::Concat(ref x) => { + Some(Frame::Concat { head: &x[0], tail: &x[1..] }) + } + HirKind::Alternation(ref x) if x.is_empty() => None, + HirKind::Alternation(ref x) => { + Some(Frame::Alternation { head: &x[0], tail: &x[1..] }) + } + _ => None, + } + } + + /// Pops the given frame. If the frame has an additional inductive step, + /// then return it, otherwise return `None`. + fn pop(&self, induct: Frame<'a>) -> Option> { + match induct { + Frame::Repetition(_) => None, + Frame::Capture(_) => None, + Frame::Concat { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Concat { head: &tail[0], tail: &tail[1..] }) + } + } + Frame::Alternation { tail, .. } => { + if tail.is_empty() { + None + } else { + Some(Frame::Alternation { + head: &tail[0], + tail: &tail[1..], + }) + } + } + } + } +} + +impl<'a> Frame<'a> { + /// Perform the next inductive step on this frame and return the next + /// child HIR node to visit. + fn child(&self) -> &'a Hir { + match *self { + Frame::Repetition(rep) => &rep.sub, + Frame::Capture(capture) => &capture.sub, + Frame::Concat { head, .. } => head, + Frame::Alternation { head, .. } => head, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4512e23de360d92b282c2633a329c8cee32ecf3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/lib.rs @@ -0,0 +1,433 @@ +/*! +This crate provides a robust regular expression parser. + +This crate defines two primary types: + +* [`Ast`](ast::Ast) is the abstract syntax of a regular expression. + An abstract syntax corresponds to a *structured representation* of the + concrete syntax of a regular expression, where the concrete syntax is the + pattern string itself (e.g., `foo(bar)+`). Given some abstract syntax, it + can be converted back to the original concrete syntax (modulo some details, + like whitespace). To a first approximation, the abstract syntax is complex + and difficult to analyze. +* [`Hir`](hir::Hir) is the high-level intermediate representation + ("HIR" or "high-level IR" for short) of regular expression. It corresponds to + an intermediate state of a regular expression that sits between the abstract + syntax and the low level compiled opcodes that are eventually responsible for + executing a regular expression search. Given some high-level IR, it is not + possible to produce the original concrete syntax (although it is possible to + produce an equivalent concrete syntax, but it will likely scarcely resemble + the original pattern). To a first approximation, the high-level IR is simple + and easy to analyze. + +These two types come with conversion routines: + +* An [`ast::parse::Parser`] converts concrete syntax (a `&str`) to an +[`Ast`](ast::Ast). +* A [`hir::translate::Translator`] converts an [`Ast`](ast::Ast) to a +[`Hir`](hir::Hir). + +As a convenience, the above two conversion routines are combined into one via +the top-level [`Parser`] type. This `Parser` will first convert your pattern to +an `Ast` and then convert the `Ast` to an `Hir`. It's also exposed as top-level +[`parse`] free function. + + +# Example + +This example shows how to parse a pattern string into its HIR: + +``` +use regex_syntax::{hir::Hir, parse}; + +let hir = parse("a|b")?; +assert_eq!(hir, Hir::alternation(vec![ + Hir::literal("a".as_bytes()), + Hir::literal("b".as_bytes()), +])); +# Ok::<(), Box>(()) +``` + + +# Concrete syntax supported + +The concrete syntax is documented as part of the public API of the +[`regex` crate](https://docs.rs/regex/%2A/regex/#syntax). + + +# Input safety + +A key feature of this library is that it is safe to use with end user facing +input. This plays a significant role in the internal implementation. In +particular: + +1. Parsers provide a `nest_limit` option that permits callers to control how + deeply nested a regular expression is allowed to be. This makes it possible + to do case analysis over an `Ast` or an `Hir` using recursion without + worrying about stack overflow. +2. Since relying on a particular stack size is brittle, this crate goes to + great lengths to ensure that all interactions with both the `Ast` and the + `Hir` do not use recursion. Namely, they use constant stack space and heap + space proportional to the size of the original pattern string (in bytes). + This includes the type's corresponding destructors. (One exception to this + is literal extraction, but this will eventually get fixed.) + + +# Error reporting + +The `Display` implementations on all `Error` types exposed in this library +provide nice human readable errors that are suitable for showing to end users +in a monospace font. + + +# Literal extraction + +This crate provides limited support for [literal extraction from `Hir` +values](hir::literal). Be warned that literal extraction uses recursion, and +therefore, stack size proportional to the size of the `Hir`. + +The purpose of literal extraction is to speed up searches. That is, if you +know a regular expression must match a prefix or suffix literal, then it is +often quicker to search for instances of that literal, and then confirm or deny +the match using the full regular expression engine. These optimizations are +done automatically in the `regex` crate. + + +# Crate features + +An important feature provided by this crate is its Unicode support. This +includes things like case folding, boolean properties, general categories, +scripts and Unicode-aware support for the Perl classes `\w`, `\s` and `\d`. +However, a downside of this support is that it requires bundling several +Unicode data tables that are substantial in size. + +A fair number of use cases do not require full Unicode support. For this +reason, this crate exposes a number of features to control which Unicode +data is available. + +If a regular expression attempts to use a Unicode feature that is not available +because the corresponding crate feature was disabled, then translating that +regular expression to an `Hir` will return an error. (It is still possible +construct an `Ast` for such a regular expression, since Unicode data is not +used until translation to an `Hir`.) Stated differently, enabling or disabling +any of the features below can only add or subtract from the total set of valid +regular expressions. Enabling or disabling a feature will never modify the +match semantics of a regular expression. + +The following features are available: + +* **std** - + Enables support for the standard library. This feature is enabled by default. + When disabled, only `core` and `alloc` are used. Otherwise, enabling `std` + generally just enables `std::error::Error` trait impls for the various error + types. +* **unicode** - + Enables all Unicode features. This feature is enabled by default, and will + always cover all Unicode features, even if more are added in the future. +* **unicode-age** - + Provide the data for the + [Unicode `Age` property](https://www.unicode.org/reports/tr44/tr44-24.html#Character_Age). + This makes it possible to use classes like `\p{Age:6.0}` to refer to all + codepoints first introduced in Unicode 6.0 +* **unicode-bool** - + Provide the data for numerous Unicode boolean properties. The full list + is not included here, but contains properties like `Alphabetic`, `Emoji`, + `Lowercase`, `Math`, `Uppercase` and `White_Space`. +* **unicode-case** - + Provide the data for case insensitive matching using + [Unicode's "simple loose matches" specification](https://www.unicode.org/reports/tr18/#Simple_Loose_Matches). +* **unicode-gencat** - + Provide the data for + [Unicode general categories](https://www.unicode.org/reports/tr44/tr44-24.html#General_Category_Values). + This includes, but is not limited to, `Decimal_Number`, `Letter`, + `Math_Symbol`, `Number` and `Punctuation`. +* **unicode-perl** - + Provide the data for supporting the Unicode-aware Perl character classes, + corresponding to `\w`, `\s` and `\d`. This is also necessary for using + Unicode-aware word boundary assertions. Note that if this feature is + disabled, the `\s` and `\d` character classes are still available if the + `unicode-bool` and `unicode-gencat` features are enabled, respectively. +* **unicode-script** - + Provide the data for + [Unicode scripts and script extensions](https://www.unicode.org/reports/tr24/). + This includes, but is not limited to, `Arabic`, `Cyrillic`, `Hebrew`, + `Latin` and `Thai`. +* **unicode-segment** - + Provide the data necessary to provide the properties used to implement the + [Unicode text segmentation algorithms](https://www.unicode.org/reports/tr29/). + This enables using classes like `\p{gcb=Extend}`, `\p{wb=Katakana}` and + `\p{sb=ATerm}`. +* **arbitrary** - + Enabling this feature introduces a public dependency on the + [`arbitrary`](https://crates.io/crates/arbitrary) + crate. Namely, it implements the `Arbitrary` trait from that crate for the + [`Ast`](crate::ast::Ast) type. This feature is disabled by default. +*/ + +#![no_std] +#![forbid(unsafe_code)] +#![deny(missing_docs, rustdoc::broken_intra_doc_links)] +#![warn(missing_debug_implementations)] +// This adds Cargo feature annotations to items in the rustdoc output. Which is +// sadly hugely beneficial for this crate due to the number of features. +#![cfg_attr(docsrs_regex, feature(doc_cfg))] + +#[cfg(any(test, feature = "std"))] +extern crate std; + +extern crate alloc; + +pub use crate::{ + error::Error, + parser::{parse, Parser, ParserBuilder}, + unicode::UnicodeWordError, +}; + +use alloc::string::String; + +pub mod ast; +mod debug; +mod either; +mod error; +pub mod hir; +mod parser; +mod rank; +mod unicode; +mod unicode_tables; +pub mod utf8; + +/// Escapes all regular expression meta characters in `text`. +/// +/// The string returned may be safely used as a literal in a regular +/// expression. +pub fn escape(text: &str) -> String { + let mut quoted = String::new(); + escape_into(text, &mut quoted); + quoted +} + +/// Escapes all meta characters in `text` and writes the result into `buf`. +/// +/// This will append escape characters into the given buffer. The characters +/// that are appended are safe to use as a literal in a regular expression. +pub fn escape_into(text: &str, buf: &mut String) { + buf.reserve(text.len()); + for c in text.chars() { + if is_meta_character(c) { + buf.push('\\'); + } + buf.push(c); + } +} + +/// Returns true if the given character has significance in a regex. +/// +/// Generally speaking, these are the only characters which _must_ be escaped +/// in order to match their literal meaning. For example, to match a literal +/// `|`, one could write `\|`. Sometimes escaping isn't always necessary. For +/// example, `-` is treated as a meta character because of its significance +/// for writing ranges inside of character classes, but the regex `-` will +/// match a literal `-` because `-` has no special meaning outside of character +/// classes. +/// +/// In order to determine whether a character may be escaped at all, the +/// [`is_escapeable_character`] routine should be used. The difference between +/// `is_meta_character` and `is_escapeable_character` is that the latter will +/// return true for some characters that are _not_ meta characters. For +/// example, `%` and `\%` both match a literal `%` in all contexts. In other +/// words, `is_escapeable_character` includes "superfluous" escapes. +/// +/// Note that the set of characters for which this function returns `true` or +/// `false` is fixed and won't change in a semver compatible release. (In this +/// case, "semver compatible release" actually refers to the `regex` crate +/// itself, since reducing or expanding the set of meta characters would be a +/// breaking change for not just `regex-syntax` but also `regex` itself.) +/// +/// # Example +/// +/// ``` +/// use regex_syntax::is_meta_character; +/// +/// assert!(is_meta_character('?')); +/// assert!(is_meta_character('-')); +/// assert!(is_meta_character('&')); +/// assert!(is_meta_character('#')); +/// +/// assert!(!is_meta_character('%')); +/// assert!(!is_meta_character('/')); +/// assert!(!is_meta_character('!')); +/// assert!(!is_meta_character('"')); +/// assert!(!is_meta_character('e')); +/// ``` +pub fn is_meta_character(c: char) -> bool { + match c { + '\\' | '.' | '+' | '*' | '?' | '(' | ')' | '|' | '[' | ']' | '{' + | '}' | '^' | '$' | '#' | '&' | '-' | '~' => true, + _ => false, + } +} + +/// Returns true if the given character can be escaped in a regex. +/// +/// This returns true in all cases that `is_meta_character` returns true, but +/// also returns true in some cases where `is_meta_character` returns false. +/// For example, `%` is not a meta character, but it is escapable. That is, +/// `%` and `\%` both match a literal `%` in all contexts. +/// +/// The purpose of this routine is to provide knowledge about what characters +/// may be escaped. Namely, most regex engines permit "superfluous" escapes +/// where characters without any special significance may be escaped even +/// though there is no actual _need_ to do so. +/// +/// This will return false for some characters. For example, `e` is not +/// escapable. Therefore, `\e` will either result in a parse error (which is +/// true today), or it could backwards compatibly evolve into a new construct +/// with its own meaning. Indeed, that is the purpose of banning _some_ +/// superfluous escapes: it provides a way to evolve the syntax in a compatible +/// manner. +/// +/// # Example +/// +/// ``` +/// use regex_syntax::is_escapeable_character; +/// +/// assert!(is_escapeable_character('?')); +/// assert!(is_escapeable_character('-')); +/// assert!(is_escapeable_character('&')); +/// assert!(is_escapeable_character('#')); +/// assert!(is_escapeable_character('%')); +/// assert!(is_escapeable_character('/')); +/// assert!(is_escapeable_character('!')); +/// assert!(is_escapeable_character('"')); +/// +/// assert!(!is_escapeable_character('e')); +/// ``` +pub fn is_escapeable_character(c: char) -> bool { + // Certainly escapable if it's a meta character. + if is_meta_character(c) { + return true; + } + // Any character that isn't ASCII is definitely not escapable. There's + // no real need to allow things like \☃ right? + if !c.is_ascii() { + return false; + } + // Otherwise, we basically say that everything is escapable unless it's a + // letter or digit. Things like \3 are either octal (when enabled) or an + // error, and we should keep it that way. Otherwise, letters are reserved + // for adding new syntax in a backwards compatible way. + match c { + '0'..='9' | 'A'..='Z' | 'a'..='z' => false, + // While not currently supported, we keep these as not escapable to + // give us some flexibility with respect to supporting the \< and + // \> word boundary assertions in the future. By rejecting them as + // escapable, \< and \> will result in a parse error. Thus, we can + // turn them into something else in the future without it being a + // backwards incompatible change. + // + // OK, now we support \< and \>, and we need to retain them as *not* + // escapable here since the escape sequence is significant. + '<' | '>' => false, + _ => true, + } +} + +/// Returns true if and only if the given character is a Unicode word +/// character. +/// +/// A Unicode word character is defined by +/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). +/// In particular, a character +/// is considered a word character if it is in either of the `Alphabetic` or +/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` +/// or `Connector_Punctuation` general categories. +/// +/// # Panics +/// +/// If the `unicode-perl` feature is not enabled, then this function +/// panics. For this reason, it is recommended that callers use +/// [`try_is_word_character`] instead. +pub fn is_word_character(c: char) -> bool { + try_is_word_character(c).expect("unicode-perl feature must be enabled") +} + +/// Returns true if and only if the given character is a Unicode word +/// character. +/// +/// A Unicode word character is defined by +/// [UTS#18 Annex C](https://unicode.org/reports/tr18/#Compatibility_Properties). +/// In particular, a character +/// is considered a word character if it is in either of the `Alphabetic` or +/// `Join_Control` properties, or is in one of the `Decimal_Number`, `Mark` +/// or `Connector_Punctuation` general categories. +/// +/// # Errors +/// +/// If the `unicode-perl` feature is not enabled, then this function always +/// returns an error. +pub fn try_is_word_character( + c: char, +) -> core::result::Result { + unicode::is_word_character(c) +} + +/// Returns true if and only if the given character is an ASCII word character. +/// +/// An ASCII word character is defined by the following character class: +/// `[_0-9a-zA-Z]`. +pub fn is_word_byte(c: u8) -> bool { + match c { + b'_' | b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' => true, + _ => false, + } +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + + use super::*; + + #[test] + fn escape_meta() { + assert_eq!( + escape(r"\.+*?()|[]{}^$#&-~"), + r"\\\.\+\*\?\(\)\|\[\]\{\}\^\$\#\&\-\~".to_string() + ); + } + + #[test] + fn word_byte() { + assert!(is_word_byte(b'a')); + assert!(!is_word_byte(b'-')); + } + + #[test] + #[cfg(feature = "unicode-perl")] + fn word_char() { + assert!(is_word_character('a'), "ASCII"); + assert!(is_word_character('à'), "Latin-1"); + assert!(is_word_character('β'), "Greek"); + assert!(is_word_character('\u{11011}'), "Brahmi (Unicode 6.0)"); + assert!(is_word_character('\u{11611}'), "Modi (Unicode 7.0)"); + assert!(is_word_character('\u{11711}'), "Ahom (Unicode 8.0)"); + assert!(is_word_character('\u{17828}'), "Tangut (Unicode 9.0)"); + assert!(is_word_character('\u{1B1B1}'), "Nushu (Unicode 10.0)"); + assert!(is_word_character('\u{16E40}'), "Medefaidrin (Unicode 11.0)"); + assert!(!is_word_character('-')); + assert!(!is_word_character('☃')); + } + + #[test] + #[should_panic] + #[cfg(not(feature = "unicode-perl"))] + fn word_char_disabled_panic() { + assert!(is_word_character('a')); + } + + #[test] + #[cfg(not(feature = "unicode-perl"))] + fn word_char_disabled_error() { + assert!(try_is_word_character('a').is_err()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/parser.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/parser.rs new file mode 100644 index 0000000000000000000000000000000000000000..f482b84667a7aa6cf3a4c26a2a2af0204a315884 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/parser.rs @@ -0,0 +1,254 @@ +use crate::{ast, hir, Error}; + +/// A convenience routine for parsing a regex using default options. +/// +/// This is equivalent to `Parser::new().parse(pattern)`. +/// +/// If you need to set non-default options, then use a [`ParserBuilder`]. +/// +/// This routine returns an [`Hir`](hir::Hir) value. Namely, it automatically +/// parses the pattern as an [`Ast`](ast::Ast) and then invokes the translator +/// to convert the `Ast` into an `Hir`. If you need access to the `Ast`, then +/// you should use a [`ast::parse::Parser`]. +pub fn parse(pattern: &str) -> Result { + Parser::new().parse(pattern) +} + +/// A builder for a regular expression parser. +/// +/// This builder permits modifying configuration options for the parser. +/// +/// This type combines the builder options for both the [AST +/// `ParserBuilder`](ast::parse::ParserBuilder) and the [HIR +/// `TranslatorBuilder`](hir::translate::TranslatorBuilder). +#[derive(Clone, Debug, Default)] +pub struct ParserBuilder { + ast: ast::parse::ParserBuilder, + hir: hir::translate::TranslatorBuilder, +} + +impl ParserBuilder { + /// Create a new parser builder with a default configuration. + pub fn new() -> ParserBuilder { + ParserBuilder::default() + } + + /// Build a parser from this configuration with the given pattern. + pub fn build(&self) -> Parser { + Parser { ast: self.ast.build(), hir: self.hir.build() } + } + + /// Set the nesting limit for this parser. + /// + /// The nesting limit controls how deep the abstract syntax tree is allowed + /// to be. If the AST exceeds the given limit (e.g., with too many nested + /// groups), then an error is returned by the parser. + /// + /// The purpose of this limit is to act as a heuristic to prevent stack + /// overflow for consumers that do structural induction on an `Ast` using + /// explicit recursion. While this crate never does this (instead using + /// constant stack space and moving the call stack to the heap), other + /// crates may. + /// + /// This limit is not checked until the entire Ast is parsed. Therefore, + /// if callers want to put a limit on the amount of heap space used, then + /// they should impose a limit on the length, in bytes, of the concrete + /// pattern string. In particular, this is viable since this parser + /// implementation will limit itself to heap space proportional to the + /// length of the pattern string. + /// + /// Note that a nest limit of `0` will return a nest limit error for most + /// patterns but not all. For example, a nest limit of `0` permits `a` but + /// not `ab`, since `ab` requires a concatenation, which results in a nest + /// depth of `1`. In general, a nest limit is not something that manifests + /// in an obvious way in the concrete syntax, therefore, it should not be + /// used in a granular way. + pub fn nest_limit(&mut self, limit: u32) -> &mut ParserBuilder { + self.ast.nest_limit(limit); + self + } + + /// Whether to support octal syntax or not. + /// + /// Octal syntax is a little-known way of uttering Unicode codepoints in + /// a regular expression. For example, `a`, `\x61`, `\u0061` and + /// `\141` are all equivalent regular expressions, where the last example + /// shows octal syntax. + /// + /// While supporting octal syntax isn't in and of itself a problem, it does + /// make good error messages harder. That is, in PCRE based regex engines, + /// syntax like `\0` invokes a backreference, which is explicitly + /// unsupported in Rust's regex engine. However, many users expect it to + /// be supported. Therefore, when octal support is disabled, the error + /// message will explicitly mention that backreferences aren't supported. + /// + /// Octal syntax is disabled by default. + pub fn octal(&mut self, yes: bool) -> &mut ParserBuilder { + self.ast.octal(yes); + self + } + + /// When disabled, translation will permit the construction of a regular + /// expression that may match invalid UTF-8. + /// + /// When enabled (the default), the translator is guaranteed to produce an + /// expression that, for non-empty matches, will only ever produce spans + /// that are entirely valid UTF-8 (otherwise, the translator will return an + /// error). + /// + /// Perhaps surprisingly, when UTF-8 is enabled, an empty regex or even + /// a negated ASCII word boundary (uttered as `(?-u:\B)` in the concrete + /// syntax) will be allowed even though they can produce matches that split + /// a UTF-8 encoded codepoint. This only applies to zero-width or "empty" + /// matches, and it is expected that the regex engine itself must handle + /// these cases if necessary (perhaps by suppressing any zero-width matches + /// that split a codepoint). + pub fn utf8(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.utf8(yes); + self + } + + /// Enable verbose mode in the regular expression. + /// + /// When enabled, verbose mode permits insignificant whitespace in many + /// places in the regular expression, as well as comments. Comments are + /// started using `#` and continue until the end of the line. + /// + /// By default, this is disabled. It may be selectively enabled in the + /// regular expression by using the `x` flag regardless of this setting. + pub fn ignore_whitespace(&mut self, yes: bool) -> &mut ParserBuilder { + self.ast.ignore_whitespace(yes); + self + } + + /// Enable or disable the case insensitive flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `i` flag. + pub fn case_insensitive(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.case_insensitive(yes); + self + } + + /// Enable or disable the multi-line matching flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `m` flag. + pub fn multi_line(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.multi_line(yes); + self + } + + /// Enable or disable the "dot matches any character" flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `s` flag. + pub fn dot_matches_new_line(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.dot_matches_new_line(yes); + self + } + + /// Enable or disable the CRLF mode flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `R` flag. + /// + /// When CRLF mode is enabled, the following happens: + /// + /// * Unless `dot_matches_new_line` is enabled, `.` will match any character + /// except for `\r` and `\n`. + /// * When `multi_line` mode is enabled, `^` and `$` will treat `\r\n`, + /// `\r` and `\n` as line terminators. And in particular, neither will + /// match between a `\r` and a `\n`. + pub fn crlf(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.crlf(yes); + self + } + + /// Sets the line terminator for use with `(?u-s:.)` and `(?-us:.)`. + /// + /// Namely, instead of `.` (by default) matching everything except for `\n`, + /// this will cause `.` to match everything except for the byte given. + /// + /// If `.` is used in a context where Unicode mode is enabled and this byte + /// isn't ASCII, then an error will be returned. When Unicode mode is + /// disabled, then any byte is permitted, but will return an error if UTF-8 + /// mode is enabled and it is a non-ASCII byte. + /// + /// In short, any ASCII value for a line terminator is always okay. But a + /// non-ASCII byte might result in an error depending on whether Unicode + /// mode or UTF-8 mode are enabled. + /// + /// Note that if `R` mode is enabled then it always takes precedence and + /// the line terminator will be treated as `\r` and `\n` simultaneously. + /// + /// Note also that this *doesn't* impact the look-around assertions + /// `(?m:^)` and `(?m:$)`. That's usually controlled by additional + /// configuration in the regex engine itself. + pub fn line_terminator(&mut self, byte: u8) -> &mut ParserBuilder { + self.hir.line_terminator(byte); + self + } + + /// Enable or disable the "swap greed" flag by default. + /// + /// By default this is disabled. It may alternatively be selectively + /// enabled in the regular expression itself via the `U` flag. + pub fn swap_greed(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.swap_greed(yes); + self + } + + /// Enable or disable the Unicode flag (`u`) by default. + /// + /// By default this is **enabled**. It may alternatively be selectively + /// disabled in the regular expression itself via the `u` flag. + /// + /// Note that unless `utf8` is disabled (it's enabled by default), a + /// regular expression will fail to parse if Unicode mode is disabled and a + /// sub-expression could possibly match invalid UTF-8. + pub fn unicode(&mut self, yes: bool) -> &mut ParserBuilder { + self.hir.unicode(yes); + self + } +} + +/// A convenience parser for regular expressions. +/// +/// This parser takes as input a regular expression pattern string (the +/// "concrete syntax") and returns a high-level intermediate representation +/// (the HIR) suitable for most types of analysis. In particular, this parser +/// hides the intermediate state of producing an AST (the "abstract syntax"). +/// The AST is itself far more complex than the HIR, so this parser serves as a +/// convenience for never having to deal with it at all. +/// +/// If callers have more fine grained use cases that need an AST, then please +/// see the [`ast::parse`] module. +/// +/// A `Parser` can be configured in more detail via a [`ParserBuilder`]. +#[derive(Clone, Debug)] +pub struct Parser { + ast: ast::parse::Parser, + hir: hir::translate::Translator, +} + +impl Parser { + /// Create a new parser with a default configuration. + /// + /// The parser can be run with `parse` method. The parse method returns + /// a high level intermediate representation of the given regular + /// expression. + /// + /// To set configuration options on the parser, use [`ParserBuilder`]. + pub fn new() -> Parser { + ParserBuilder::new().build() + } + + /// Parse the regular expression into a high level intermediate + /// representation. + pub fn parse(&mut self, pattern: &str) -> Result { + let ast = self.ast.parse(pattern)?; + let hir = self.hir.translate(pattern, &ast)?; + Ok(hir) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/rank.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/rank.rs new file mode 100644 index 0000000000000000000000000000000000000000..ccb25a20aedcdf182445f6a6e1ff4f3affdc549a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/rank.rs @@ -0,0 +1,258 @@ +pub(crate) const BYTE_FREQUENCIES: [u8; 256] = [ + 55, // '\x00' + 52, // '\x01' + 51, // '\x02' + 50, // '\x03' + 49, // '\x04' + 48, // '\x05' + 47, // '\x06' + 46, // '\x07' + 45, // '\x08' + 103, // '\t' + 242, // '\n' + 66, // '\x0b' + 67, // '\x0c' + 229, // '\r' + 44, // '\x0e' + 43, // '\x0f' + 42, // '\x10' + 41, // '\x11' + 40, // '\x12' + 39, // '\x13' + 38, // '\x14' + 37, // '\x15' + 36, // '\x16' + 35, // '\x17' + 34, // '\x18' + 33, // '\x19' + 56, // '\x1a' + 32, // '\x1b' + 31, // '\x1c' + 30, // '\x1d' + 29, // '\x1e' + 28, // '\x1f' + 255, // ' ' + 148, // '!' + 164, // '"' + 149, // '#' + 136, // '$' + 160, // '%' + 155, // '&' + 173, // "'" + 221, // '(' + 222, // ')' + 134, // '*' + 122, // '+' + 232, // ',' + 202, // '-' + 215, // '.' + 224, // '/' + 208, // '0' + 220, // '1' + 204, // '2' + 187, // '3' + 183, // '4' + 179, // '5' + 177, // '6' + 168, // '7' + 178, // '8' + 200, // '9' + 226, // ':' + 195, // ';' + 154, // '<' + 184, // '=' + 174, // '>' + 126, // '?' + 120, // '@' + 191, // 'A' + 157, // 'B' + 194, // 'C' + 170, // 'D' + 189, // 'E' + 162, // 'F' + 161, // 'G' + 150, // 'H' + 193, // 'I' + 142, // 'J' + 137, // 'K' + 171, // 'L' + 176, // 'M' + 185, // 'N' + 167, // 'O' + 186, // 'P' + 112, // 'Q' + 175, // 'R' + 192, // 'S' + 188, // 'T' + 156, // 'U' + 140, // 'V' + 143, // 'W' + 123, // 'X' + 133, // 'Y' + 128, // 'Z' + 147, // '[' + 138, // '\\' + 146, // ']' + 114, // '^' + 223, // '_' + 151, // '`' + 249, // 'a' + 216, // 'b' + 238, // 'c' + 236, // 'd' + 253, // 'e' + 227, // 'f' + 218, // 'g' + 230, // 'h' + 247, // 'i' + 135, // 'j' + 180, // 'k' + 241, // 'l' + 233, // 'm' + 246, // 'n' + 244, // 'o' + 231, // 'p' + 139, // 'q' + 245, // 'r' + 243, // 's' + 251, // 't' + 235, // 'u' + 201, // 'v' + 196, // 'w' + 240, // 'x' + 214, // 'y' + 152, // 'z' + 182, // '{' + 205, // '|' + 181, // '}' + 127, // '~' + 27, // '\x7f' + 212, // '\x80' + 211, // '\x81' + 210, // '\x82' + 213, // '\x83' + 228, // '\x84' + 197, // '\x85' + 169, // '\x86' + 159, // '\x87' + 131, // '\x88' + 172, // '\x89' + 105, // '\x8a' + 80, // '\x8b' + 98, // '\x8c' + 96, // '\x8d' + 97, // '\x8e' + 81, // '\x8f' + 207, // '\x90' + 145, // '\x91' + 116, // '\x92' + 115, // '\x93' + 144, // '\x94' + 130, // '\x95' + 153, // '\x96' + 121, // '\x97' + 107, // '\x98' + 132, // '\x99' + 109, // '\x9a' + 110, // '\x9b' + 124, // '\x9c' + 111, // '\x9d' + 82, // '\x9e' + 108, // '\x9f' + 118, // '\xa0' + 141, // '¡' + 113, // '¢' + 129, // '£' + 119, // '¤' + 125, // '¥' + 165, // '¦' + 117, // '§' + 92, // '¨' + 106, // '©' + 83, // 'ª' + 72, // '«' + 99, // '¬' + 93, // '\xad' + 65, // '®' + 79, // '¯' + 166, // '°' + 237, // '±' + 163, // '²' + 199, // '³' + 190, // '´' + 225, // 'µ' + 209, // '¶' + 203, // '·' + 198, // '¸' + 217, // '¹' + 219, // 'º' + 206, // '»' + 234, // '¼' + 248, // '½' + 158, // '¾' + 239, // '¿' + 255, // 'À' + 255, // 'Á' + 255, // 'Â' + 255, // 'Ã' + 255, // 'Ä' + 255, // 'Å' + 255, // 'Æ' + 255, // 'Ç' + 255, // 'È' + 255, // 'É' + 255, // 'Ê' + 255, // 'Ë' + 255, // 'Ì' + 255, // 'Í' + 255, // 'Î' + 255, // 'Ï' + 255, // 'Ð' + 255, // 'Ñ' + 255, // 'Ò' + 255, // 'Ó' + 255, // 'Ô' + 255, // 'Õ' + 255, // 'Ö' + 255, // '×' + 255, // 'Ø' + 255, // 'Ù' + 255, // 'Ú' + 255, // 'Û' + 255, // 'Ü' + 255, // 'Ý' + 255, // 'Þ' + 255, // 'ß' + 255, // 'à' + 255, // 'á' + 255, // 'â' + 255, // 'ã' + 255, // 'ä' + 255, // 'å' + 255, // 'æ' + 255, // 'ç' + 255, // 'è' + 255, // 'é' + 255, // 'ê' + 255, // 'ë' + 255, // 'ì' + 255, // 'í' + 255, // 'î' + 255, // 'ï' + 255, // 'ð' + 255, // 'ñ' + 255, // 'ò' + 255, // 'ó' + 255, // 'ô' + 255, // 'õ' + 255, // 'ö' + 255, // '÷' + 255, // 'ø' + 255, // 'ù' + 255, // 'ú' + 255, // 'û' + 255, // 'ü' + 255, // 'ý' + 255, // 'þ' + 255, // 'ÿ' +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode.rs new file mode 100644 index 0000000000000000000000000000000000000000..07f78194b21eaf0b9508d45bc7ac74c037f21f7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode.rs @@ -0,0 +1,1041 @@ +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; + +use crate::hir; + +/// An inclusive range of codepoints from a generated file (hence the static +/// lifetime). +type Range = &'static [(char, char)]; + +/// An error that occurs when dealing with Unicode. +/// +/// We don't impl the Error trait here because these always get converted +/// into other public errors. (This error type isn't exported.) +#[derive(Debug)] +pub enum Error { + PropertyNotFound, + PropertyValueNotFound, + // Not used when unicode-perl is enabled. + #[allow(dead_code)] + PerlClassNotFound, +} + +/// An error that occurs when Unicode-aware simple case folding fails. +/// +/// This error can occur when the case mapping tables necessary for Unicode +/// aware case folding are unavailable. This only occurs when the +/// `unicode-case` feature is disabled. (The feature is enabled by default.) +#[derive(Debug)] +pub struct CaseFoldError(()); + +#[cfg(feature = "std")] +impl std::error::Error for CaseFoldError {} + +impl core::fmt::Display for CaseFoldError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Unicode-aware case folding is not available \ + (probably because the unicode-case feature is not enabled)" + ) + } +} + +/// An error that occurs when the Unicode-aware `\w` class is unavailable. +/// +/// This error can occur when the data tables necessary for the Unicode aware +/// Perl character class `\w` are unavailable. This only occurs when the +/// `unicode-perl` feature is disabled. (The feature is enabled by default.) +#[derive(Debug)] +pub struct UnicodeWordError(()); + +#[cfg(feature = "std")] +impl std::error::Error for UnicodeWordError {} + +impl core::fmt::Display for UnicodeWordError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Unicode-aware \\w class is not available \ + (probably because the unicode-perl feature is not enabled)" + ) + } +} + +/// A state oriented traverser of the simple case folding table. +/// +/// A case folder can be constructed via `SimpleCaseFolder::new()`, which will +/// return an error if the underlying case folding table is unavailable. +/// +/// After construction, it is expected that callers will use +/// `SimpleCaseFolder::mapping` by calling it with codepoints in strictly +/// increasing order. For example, calling it on `b` and then on `a` is illegal +/// and will result in a panic. +/// +/// The main idea of this type is that it tries hard to make mapping lookups +/// fast by exploiting the structure of the underlying table, and the ordering +/// assumption enables this. +#[derive(Debug)] +pub struct SimpleCaseFolder { + /// The simple case fold table. It's a sorted association list, where the + /// keys are Unicode scalar values and the values are the corresponding + /// equivalence class (not including the key) of the "simple" case folded + /// Unicode scalar values. + table: &'static [(char, &'static [char])], + /// The last codepoint that was used for a lookup. + last: Option, + /// The index to the entry in `table` corresponding to the smallest key `k` + /// such that `k > k0`, where `k0` is the most recent key lookup. Note that + /// in particular, `k0` may not be in the table! + next: usize, +} + +impl SimpleCaseFolder { + /// Create a new simple case folder, returning an error if the underlying + /// case folding table is unavailable. + pub fn new() -> Result { + #[cfg(not(feature = "unicode-case"))] + { + Err(CaseFoldError(())) + } + #[cfg(feature = "unicode-case")] + { + Ok(SimpleCaseFolder { + table: crate::unicode_tables::case_folding_simple::CASE_FOLDING_SIMPLE, + last: None, + next: 0, + }) + } + } + + /// Return the equivalence class of case folded codepoints for the given + /// codepoint. The equivalence class returned never includes the codepoint + /// given. If the given codepoint has no case folded codepoints (i.e., + /// no entry in the underlying case folding table), then this returns an + /// empty slice. + /// + /// # Panics + /// + /// This panics when called with a `c` that is less than or equal to the + /// previous call. In other words, callers need to use this method with + /// strictly increasing values of `c`. + pub fn mapping(&mut self, c: char) -> &'static [char] { + if let Some(last) = self.last { + assert!( + last < c, + "got codepoint U+{:X} which occurs before \ + last codepoint U+{:X}", + u32::from(c), + u32::from(last), + ); + } + self.last = Some(c); + if self.next >= self.table.len() { + return &[]; + } + let (k, v) = self.table[self.next]; + if k == c { + self.next += 1; + return v; + } + match self.get(c) { + Err(i) => { + self.next = i; + &[] + } + Ok(i) => { + // Since we require lookups to proceed + // in order, anything we find should be + // after whatever we thought might be + // next. Otherwise, the caller is either + // going out of order or we would have + // found our next key at 'self.next'. + assert!(i > self.next); + self.next = i + 1; + self.table[i].1 + } + } + } + + /// Returns true if and only if the given range overlaps with any region + /// of the underlying case folding table. That is, when true, there exists + /// at least one codepoint in the inclusive range `[start, end]` that has + /// a non-trivial equivalence class of case folded codepoints. Conversely, + /// when this returns false, all codepoints in the range `[start, end]` + /// correspond to the trivial equivalence class of case folded codepoints, + /// i.e., itself. + /// + /// This is useful to call before iterating over the codepoints in the + /// range and looking up the mapping for each. If you know none of the + /// mappings will return anything, then you might be able to skip doing it + /// altogether. + /// + /// # Panics + /// + /// This panics when `end < start`. + pub fn overlaps(&self, start: char, end: char) -> bool { + use core::cmp::Ordering; + + assert!(start <= end); + self.table + .binary_search_by(|&(c, _)| { + if start <= c && c <= end { + Ordering::Equal + } else if c > end { + Ordering::Greater + } else { + Ordering::Less + } + }) + .is_ok() + } + + /// Returns the index at which `c` occurs in the simple case fold table. If + /// `c` does not occur, then this returns an `i` such that `table[i-1].0 < + /// c` and `table[i].0 > c`. + fn get(&self, c: char) -> Result { + self.table.binary_search_by_key(&c, |&(c1, _)| c1) + } +} + +/// A query for finding a character class defined by Unicode. This supports +/// either use of a property name directly, or lookup by property value. The +/// former generally refers to Binary properties (see UTS#44, Table 8), but +/// as a special exception (see UTS#18, Section 1.2) both general categories +/// (an enumeration) and scripts (a catalog) are supported as if each of their +/// possible values were a binary property. +/// +/// In all circumstances, property names and values are normalized and +/// canonicalized. That is, `GC == gc == GeneralCategory == general_category`. +/// +/// The lifetime `'a` refers to the shorter of the lifetimes of property name +/// and property value. +#[derive(Debug)] +pub enum ClassQuery<'a> { + /// Return a class corresponding to a Unicode binary property, named by + /// a single letter. + OneLetter(char), + /// Return a class corresponding to a Unicode binary property. + /// + /// Note that, by special exception (see UTS#18, Section 1.2), both + /// general category values and script values are permitted here as if + /// they were a binary property. + Binary(&'a str), + /// Return a class corresponding to all codepoints whose property + /// (identified by `property_name`) corresponds to the given value + /// (identified by `property_value`). + ByValue { + /// A property name. + property_name: &'a str, + /// A property value. + property_value: &'a str, + }, +} + +impl<'a> ClassQuery<'a> { + fn canonicalize(&self) -> Result { + match *self { + ClassQuery::OneLetter(c) => self.canonical_binary(&c.to_string()), + ClassQuery::Binary(name) => self.canonical_binary(name), + ClassQuery::ByValue { property_name, property_value } => { + let property_name = symbolic_name_normalize(property_name); + let property_value = symbolic_name_normalize(property_value); + + let canon_name = match canonical_prop(&property_name)? { + None => return Err(Error::PropertyNotFound), + Some(canon_name) => canon_name, + }; + Ok(match canon_name { + "General_Category" => { + let canon = match canonical_gencat(&property_value)? { + None => return Err(Error::PropertyValueNotFound), + Some(canon) => canon, + }; + CanonicalClassQuery::GeneralCategory(canon) + } + "Script" => { + let canon = match canonical_script(&property_value)? { + None => return Err(Error::PropertyValueNotFound), + Some(canon) => canon, + }; + CanonicalClassQuery::Script(canon) + } + _ => { + let vals = match property_values(canon_name)? { + None => return Err(Error::PropertyValueNotFound), + Some(vals) => vals, + }; + let canon_val = + match canonical_value(vals, &property_value) { + None => { + return Err(Error::PropertyValueNotFound) + } + Some(canon_val) => canon_val, + }; + CanonicalClassQuery::ByValue { + property_name: canon_name, + property_value: canon_val, + } + } + }) + } + } + } + + fn canonical_binary( + &self, + name: &str, + ) -> Result { + let norm = symbolic_name_normalize(name); + + // This is a special case where 'cf' refers to the 'Format' general + // category, but where the 'cf' abbreviation is also an abbreviation + // for the 'Case_Folding' property. But we want to treat it as + // a general category. (Currently, we don't even support the + // 'Case_Folding' property. But if we do in the future, users will be + // required to spell it out.) + // + // Also 'sc' refers to the 'Currency_Symbol' general category, but is + // also the abbreviation for the 'Script' property. So we avoid calling + // 'canonical_prop' for it too, which would erroneously normalize it + // to 'Script'. + // + // Another case: 'lc' is an abbreviation for the 'Cased_Letter' + // general category, but is also an abbreviation for the 'Lowercase_Mapping' + // property. We don't currently support the latter, so as with 'cf' + // above, we treat 'lc' as 'Cased_Letter'. + if norm != "cf" && norm != "sc" && norm != "lc" { + if let Some(canon) = canonical_prop(&norm)? { + return Ok(CanonicalClassQuery::Binary(canon)); + } + } + if let Some(canon) = canonical_gencat(&norm)? { + return Ok(CanonicalClassQuery::GeneralCategory(canon)); + } + if let Some(canon) = canonical_script(&norm)? { + return Ok(CanonicalClassQuery::Script(canon)); + } + Err(Error::PropertyNotFound) + } +} + +/// Like ClassQuery, but its parameters have been canonicalized. This also +/// differentiates binary properties from flattened general categories and +/// scripts. +#[derive(Debug, Eq, PartialEq)] +enum CanonicalClassQuery { + /// The canonical binary property name. + Binary(&'static str), + /// The canonical general category name. + GeneralCategory(&'static str), + /// The canonical script name. + Script(&'static str), + /// An arbitrary association between property and value, both of which + /// have been canonicalized. + /// + /// Note that by construction, the property name of ByValue will never + /// be General_Category or Script. Those two cases are subsumed by the + /// eponymous variants. + ByValue { + /// The canonical property name. + property_name: &'static str, + /// The canonical property value. + property_value: &'static str, + }, +} + +/// Looks up a Unicode class given a query. If one doesn't exist, then +/// `None` is returned. +pub fn class(query: ClassQuery<'_>) -> Result { + use self::CanonicalClassQuery::*; + + match query.canonicalize()? { + Binary(name) => bool_property(name), + GeneralCategory(name) => gencat(name), + Script(name) => script(name), + ByValue { property_name: "Age", property_value } => { + let mut class = hir::ClassUnicode::empty(); + for set in ages(property_value)? { + class.union(&hir_class(set)); + } + Ok(class) + } + ByValue { property_name: "Script_Extensions", property_value } => { + script_extension(property_value) + } + ByValue { + property_name: "Grapheme_Cluster_Break", + property_value, + } => gcb(property_value), + ByValue { property_name: "Sentence_Break", property_value } => { + sb(property_value) + } + ByValue { property_name: "Word_Break", property_value } => { + wb(property_value) + } + _ => { + // What else should we support? + Err(Error::PropertyNotFound) + } + } +} + +/// Returns a Unicode aware class for \w. +/// +/// This returns an error if the data is not available for \w. +pub fn perl_word() -> Result { + #[cfg(not(feature = "unicode-perl"))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(feature = "unicode-perl")] + fn imp() -> Result { + use crate::unicode_tables::perl_word::PERL_WORD; + Ok(hir_class(PERL_WORD)) + } + + imp() +} + +/// Returns a Unicode aware class for \s. +/// +/// This returns an error if the data is not available for \s. +pub fn perl_space() -> Result { + #[cfg(not(any(feature = "unicode-perl", feature = "unicode-bool")))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] + fn imp() -> Result { + use crate::unicode_tables::perl_space::WHITE_SPACE; + Ok(hir_class(WHITE_SPACE)) + } + + #[cfg(feature = "unicode-bool")] + fn imp() -> Result { + use crate::unicode_tables::property_bool::WHITE_SPACE; + Ok(hir_class(WHITE_SPACE)) + } + + imp() +} + +/// Returns a Unicode aware class for \d. +/// +/// This returns an error if the data is not available for \d. +pub fn perl_digit() -> Result { + #[cfg(not(any(feature = "unicode-perl", feature = "unicode-gencat")))] + fn imp() -> Result { + Err(Error::PerlClassNotFound) + } + + #[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] + fn imp() -> Result { + use crate::unicode_tables::perl_decimal::DECIMAL_NUMBER; + Ok(hir_class(DECIMAL_NUMBER)) + } + + #[cfg(feature = "unicode-gencat")] + fn imp() -> Result { + use crate::unicode_tables::general_category::DECIMAL_NUMBER; + Ok(hir_class(DECIMAL_NUMBER)) + } + + imp() +} + +/// Build a Unicode HIR class from a sequence of Unicode scalar value ranges. +pub fn hir_class(ranges: &[(char, char)]) -> hir::ClassUnicode { + let hir_ranges: Vec = ranges + .iter() + .map(|&(s, e)| hir::ClassUnicodeRange::new(s, e)) + .collect(); + hir::ClassUnicode::new(hir_ranges) +} + +/// Returns true only if the given codepoint is in the `\w` character class. +/// +/// If the `unicode-perl` feature is not enabled, then this returns an error. +pub fn is_word_character(c: char) -> Result { + #[cfg(not(feature = "unicode-perl"))] + fn imp(_: char) -> Result { + Err(UnicodeWordError(())) + } + + #[cfg(feature = "unicode-perl")] + fn imp(c: char) -> Result { + use crate::{is_word_byte, unicode_tables::perl_word::PERL_WORD}; + + if u8::try_from(c).map_or(false, is_word_byte) { + return Ok(true); + } + Ok(PERL_WORD + .binary_search_by(|&(start, end)| { + use core::cmp::Ordering; + + if start <= c && c <= end { + Ordering::Equal + } else if start > c { + Ordering::Greater + } else { + Ordering::Less + } + }) + .is_ok()) + } + + imp(c) +} + +/// A mapping of property values for a specific property. +/// +/// The first element of each tuple is a normalized property value while the +/// second element of each tuple is the corresponding canonical property +/// value. +type PropertyValues = &'static [(&'static str, &'static str)]; + +fn canonical_gencat( + normalized_value: &str, +) -> Result, Error> { + Ok(match normalized_value { + "any" => Some("Any"), + "assigned" => Some("Assigned"), + "ascii" => Some("ASCII"), + _ => { + let gencats = property_values("General_Category")?.unwrap(); + canonical_value(gencats, normalized_value) + } + }) +} + +fn canonical_script( + normalized_value: &str, +) -> Result, Error> { + let scripts = property_values("Script")?.unwrap(); + Ok(canonical_value(scripts, normalized_value)) +} + +/// Find the canonical property name for the given normalized property name. +/// +/// If no such property exists, then `None` is returned. +/// +/// The normalized property name must have been normalized according to +/// UAX44 LM3, which can be done using `symbolic_name_normalize`. +/// +/// If the property names data is not available, then an error is returned. +fn canonical_prop( + normalized_name: &str, +) -> Result, Error> { + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + fn imp(_: &str) -> Result, Error> { + Err(Error::PropertyNotFound) + } + + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + fn imp(name: &str) -> Result, Error> { + use crate::unicode_tables::property_names::PROPERTY_NAMES; + + Ok(PROPERTY_NAMES + .binary_search_by_key(&name, |&(n, _)| n) + .ok() + .map(|i| PROPERTY_NAMES[i].1)) + } + + imp(normalized_name) +} + +/// Find the canonical property value for the given normalized property +/// value. +/// +/// The given property values should correspond to the values for the property +/// under question, which can be found using `property_values`. +/// +/// If no such property value exists, then `None` is returned. +/// +/// The normalized property value must have been normalized according to +/// UAX44 LM3, which can be done using `symbolic_name_normalize`. +fn canonical_value( + vals: PropertyValues, + normalized_value: &str, +) -> Option<&'static str> { + vals.binary_search_by_key(&normalized_value, |&(n, _)| n) + .ok() + .map(|i| vals[i].1) +} + +/// Return the table of property values for the given property name. +/// +/// If the property values data is not available, then an error is returned. +fn property_values( + canonical_property_name: &'static str, +) -> Result, Error> { + #[cfg(not(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + )))] + fn imp(_: &'static str) -> Result, Error> { + Err(Error::PropertyValueNotFound) + } + + #[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", + ))] + fn imp(name: &'static str) -> Result, Error> { + use crate::unicode_tables::property_values::PROPERTY_VALUES; + + Ok(PROPERTY_VALUES + .binary_search_by_key(&name, |&(n, _)| n) + .ok() + .map(|i| PROPERTY_VALUES[i].1)) + } + + imp(canonical_property_name) +} + +// This is only used in some cases, but small enough to just let it be dead +// instead of figuring out (and maintaining) the right set of features. +#[allow(dead_code)] +fn property_set( + name_map: &'static [(&'static str, Range)], + canonical: &'static str, +) -> Option { + name_map + .binary_search_by_key(&canonical, |x| x.0) + .ok() + .map(|i| name_map[i].1) +} + +/// Returns an iterator over Unicode Age sets. Each item corresponds to a set +/// of codepoints that were added in a particular revision of Unicode. The +/// iterator yields items in chronological order. +/// +/// If the given age value isn't valid or if the data isn't available, then an +/// error is returned instead. +fn ages(canonical_age: &str) -> Result, Error> { + #[cfg(not(feature = "unicode-age"))] + fn imp(_: &str) -> Result, Error> { + use core::option::IntoIter; + Err::, _>(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-age")] + fn imp(canonical_age: &str) -> Result, Error> { + use crate::unicode_tables::age; + + const AGES: &[(&str, Range)] = &[ + ("V1_1", age::V1_1), + ("V2_0", age::V2_0), + ("V2_1", age::V2_1), + ("V3_0", age::V3_0), + ("V3_1", age::V3_1), + ("V3_2", age::V3_2), + ("V4_0", age::V4_0), + ("V4_1", age::V4_1), + ("V5_0", age::V5_0), + ("V5_1", age::V5_1), + ("V5_2", age::V5_2), + ("V6_0", age::V6_0), + ("V6_1", age::V6_1), + ("V6_2", age::V6_2), + ("V6_3", age::V6_3), + ("V7_0", age::V7_0), + ("V8_0", age::V8_0), + ("V9_0", age::V9_0), + ("V10_0", age::V10_0), + ("V11_0", age::V11_0), + ("V12_0", age::V12_0), + ("V12_1", age::V12_1), + ("V13_0", age::V13_0), + ("V14_0", age::V14_0), + ("V15_0", age::V15_0), + ("V15_1", age::V15_1), + ("V16_0", age::V16_0), + ]; + assert_eq!(AGES.len(), age::BY_NAME.len(), "ages are out of sync"); + + let pos = AGES.iter().position(|&(age, _)| canonical_age == age); + match pos { + None => Err(Error::PropertyValueNotFound), + Some(i) => Ok(AGES[..=i].iter().map(|&(_, classes)| classes)), + } + } + + imp(canonical_age) +} + +/// Returns the Unicode HIR class corresponding to the given general category. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given general category could not be found, or if the general +/// category data is not available, then an error is returned. +fn gencat(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-gencat"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-gencat")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::general_category::BY_NAME; + match name { + "ASCII" => Ok(hir_class(&[('\0', '\x7F')])), + "Any" => Ok(hir_class(&[('\0', '\u{10FFFF}')])), + "Assigned" => { + let mut cls = gencat("Unassigned")?; + cls.negate(); + Ok(cls) + } + name => property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound), + } + } + + match canonical_name { + "Decimal_Number" => perl_digit(), + name => imp(name), + } +} + +/// Returns the Unicode HIR class corresponding to the given script. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given script could not be found, or if the script data is not +/// available, then an error is returned. +fn script(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-script"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-script")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::script::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given script extension. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given script extension could not be found, or if the script data is +/// not available, then an error is returned. +fn script_extension( + canonical_name: &'static str, +) -> Result { + #[cfg(not(feature = "unicode-script"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-script")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::script_extension::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given Unicode boolean +/// property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given boolean property could not be found, or if the boolean +/// property data is not available, then an error is returned. +fn bool_property( + canonical_name: &'static str, +) -> Result { + #[cfg(not(feature = "unicode-bool"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-bool")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::property_bool::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyNotFound) + } + + match canonical_name { + "Decimal_Number" => perl_digit(), + "White_Space" => perl_space(), + name => imp(name), + } +} + +/// Returns the Unicode HIR class corresponding to the given grapheme cluster +/// break property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn gcb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::grapheme_cluster_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given word break +/// property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn wb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::word_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Returns the Unicode HIR class corresponding to the given sentence +/// break property. +/// +/// Name canonicalization is assumed to be performed by the caller. +/// +/// If the given property could not be found, or if the corresponding data is +/// not available, then an error is returned. +fn sb(canonical_name: &'static str) -> Result { + #[cfg(not(feature = "unicode-segment"))] + fn imp(_: &'static str) -> Result { + Err(Error::PropertyNotFound) + } + + #[cfg(feature = "unicode-segment")] + fn imp(name: &'static str) -> Result { + use crate::unicode_tables::sentence_break::BY_NAME; + property_set(BY_NAME, name) + .map(hir_class) + .ok_or(Error::PropertyValueNotFound) + } + + imp(canonical_name) +} + +/// Like symbolic_name_normalize_bytes, but operates on a string. +fn symbolic_name_normalize(x: &str) -> String { + let mut tmp = x.as_bytes().to_vec(); + let len = symbolic_name_normalize_bytes(&mut tmp).len(); + tmp.truncate(len); + // This should always succeed because `symbolic_name_normalize_bytes` + // guarantees that `&tmp[..len]` is always valid UTF-8. + // + // N.B. We could avoid the additional UTF-8 check here, but it's unlikely + // to be worth skipping the additional safety check. A benchmark must + // justify it first. + String::from_utf8(tmp).unwrap() +} + +/// Normalize the given symbolic name in place according to UAX44-LM3. +/// +/// A "symbolic name" typically corresponds to property names and property +/// value aliases. Note, though, that it should not be applied to property +/// string values. +/// +/// The slice returned is guaranteed to be valid UTF-8 for all possible values +/// of `slice`. +/// +/// See: https://unicode.org/reports/tr44/#UAX44-LM3 +fn symbolic_name_normalize_bytes(slice: &mut [u8]) -> &mut [u8] { + // I couldn't find a place in the standard that specified that property + // names/aliases had a particular structure (unlike character names), but + // we assume that it's ASCII only and drop anything that isn't ASCII. + let mut start = 0; + let mut starts_with_is = false; + if slice.len() >= 2 { + // Ignore any "is" prefix. + starts_with_is = slice[0..2] == b"is"[..] + || slice[0..2] == b"IS"[..] + || slice[0..2] == b"iS"[..] + || slice[0..2] == b"Is"[..]; + if starts_with_is { + start = 2; + } + } + let mut next_write = 0; + for i in start..slice.len() { + // VALIDITY ARGUMENT: To guarantee that the resulting slice is valid + // UTF-8, we ensure that the slice contains only ASCII bytes. In + // particular, we drop every non-ASCII byte from the normalized string. + let b = slice[i]; + if b == b' ' || b == b'_' || b == b'-' { + continue; + } else if b'A' <= b && b <= b'Z' { + slice[next_write] = b + (b'a' - b'A'); + next_write += 1; + } else if b <= 0x7F { + slice[next_write] = b; + next_write += 1; + } + } + // Special case: ISO_Comment has a 'isc' abbreviation. Since we generally + // ignore 'is' prefixes, the 'isc' abbreviation gets caught in the cross + // fire and ends up creating an alias for 'c' to 'ISO_Comment', but it + // is actually an alias for the 'Other' general category. + if starts_with_is && next_write == 1 && slice[0] == b'c' { + slice[0] = b'i'; + slice[1] = b's'; + slice[2] = b'c'; + next_write = 3; + } + &mut slice[..next_write] +} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(feature = "unicode-case")] + fn simple_fold_ok(c: char) -> impl Iterator { + SimpleCaseFolder::new().unwrap().mapping(c).iter().copied() + } + + #[cfg(feature = "unicode-case")] + fn contains_case_map(start: char, end: char) -> bool { + SimpleCaseFolder::new().unwrap().overlaps(start, end) + } + + #[test] + #[cfg(feature = "unicode-case")] + fn simple_fold_k() { + let xs: Vec = simple_fold_ok('k').collect(); + assert_eq!(xs, alloc::vec!['K', 'K']); + + let xs: Vec = simple_fold_ok('K').collect(); + assert_eq!(xs, alloc::vec!['k', 'K']); + + let xs: Vec = simple_fold_ok('K').collect(); + assert_eq!(xs, alloc::vec!['K', 'k']); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn simple_fold_a() { + let xs: Vec = simple_fold_ok('a').collect(); + assert_eq!(xs, alloc::vec!['A']); + + let xs: Vec = simple_fold_ok('A').collect(); + assert_eq!(xs, alloc::vec!['a']); + } + + #[test] + #[cfg(not(feature = "unicode-case"))] + fn simple_fold_disabled() { + assert!(SimpleCaseFolder::new().is_err()); + } + + #[test] + #[cfg(feature = "unicode-case")] + fn range_contains() { + assert!(contains_case_map('A', 'A')); + assert!(contains_case_map('Z', 'Z')); + assert!(contains_case_map('A', 'Z')); + assert!(contains_case_map('@', 'A')); + assert!(contains_case_map('Z', '[')); + assert!(contains_case_map('☃', 'Ⰰ')); + + assert!(!contains_case_map('[', '[')); + assert!(!contains_case_map('[', '`')); + + assert!(!contains_case_map('☃', '☃')); + } + + #[test] + #[cfg(feature = "unicode-gencat")] + fn regression_466() { + use super::{CanonicalClassQuery, ClassQuery}; + + let q = ClassQuery::OneLetter('C'); + assert_eq!( + q.canonicalize().unwrap(), + CanonicalClassQuery::GeneralCategory("Other") + ); + } + + #[test] + fn sym_normalize() { + let sym_norm = symbolic_name_normalize; + + assert_eq!(sym_norm("Line_Break"), "linebreak"); + assert_eq!(sym_norm("Line-break"), "linebreak"); + assert_eq!(sym_norm("linebreak"), "linebreak"); + assert_eq!(sym_norm("BA"), "ba"); + assert_eq!(sym_norm("ba"), "ba"); + assert_eq!(sym_norm("Greek"), "greek"); + assert_eq!(sym_norm("isGreek"), "greek"); + assert_eq!(sym_norm("IS_Greek"), "greek"); + assert_eq!(sym_norm("isc"), "isc"); + assert_eq!(sym_norm("is c"), "isc"); + assert_eq!(sym_norm("is_c"), "isc"); + } + + #[test] + fn valid_utf8_symbolic() { + let mut x = b"abc\xFFxyz".to_vec(); + let y = symbolic_name_normalize_bytes(&mut x); + assert_eq!(y, b"abcxyz"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/LICENSE-UNICODE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/LICENSE-UNICODE new file mode 100644 index 0000000000000000000000000000000000000000..b82826bdbdd2c34622ca53747e9011919d8b78bf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/LICENSE-UNICODE @@ -0,0 +1,57 @@ +UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2018 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/age.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/age.rs new file mode 100644 index 0000000000000000000000000000000000000000..466510c9e6131eedccf9c29fff4d633b0ef4786e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/age.rs @@ -0,0 +1,1846 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate age ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("V10_0", V10_0), + ("V11_0", V11_0), + ("V12_0", V12_0), + ("V12_1", V12_1), + ("V13_0", V13_0), + ("V14_0", V14_0), + ("V15_0", V15_0), + ("V15_1", V15_1), + ("V16_0", V16_0), + ("V1_1", V1_1), + ("V2_0", V2_0), + ("V2_1", V2_1), + ("V3_0", V3_0), + ("V3_1", V3_1), + ("V3_2", V3_2), + ("V4_0", V4_0), + ("V4_1", V4_1), + ("V5_0", V5_0), + ("V5_1", V5_1), + ("V5_2", V5_2), + ("V6_0", V6_0), + ("V6_1", V6_1), + ("V6_2", V6_2), + ("V6_3", V6_3), + ("V7_0", V7_0), + ("V8_0", V8_0), + ("V9_0", V9_0), +]; + +pub const V10_0: &'static [(char, char)] = &[ + ('ࡠ', 'ࡪ'), + ('ৼ', '৽'), + ('\u{afa}', '\u{aff}'), + ('\u{d00}', '\u{d00}'), + ('\u{d3b}', '\u{d3c}'), + ('᳷', '᳷'), + ('\u{1df6}', '\u{1df9}'), + ('₿', '₿'), + ('⏿', '⏿'), + ('⯒', '⯒'), + ('⹅', '⹉'), + ('ㄮ', 'ㄮ'), + ('鿖', '鿪'), + ('𐌭', '𐌯'), + ('𑨀', '\u{11a47}'), + ('𑩐', '𑪃'), + ('𑪆', '𑪜'), + ('𑪞', '𑪢'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𖿡', '𖿡'), + ('𛀂', '𛄞'), + ('𛅰', '𛋻'), + ('🉠', '🉥'), + ('🛓', '🛔'), + ('🛷', '🛸'), + ('🤀', '🤋'), + ('🤟', '🤟'), + ('🤨', '🤯'), + ('🤱', '🤲'), + ('🥌', '🥌'), + ('🥟', '🥫'), + ('🦒', '🦗'), + ('🧐', '🧦'), + ('𬺰', '𮯠'), +]; + +pub const V11_0: &'static [(char, char)] = &[ + ('ՠ', 'ՠ'), + ('ֈ', 'ֈ'), + ('ׯ', 'ׯ'), + ('\u{7fd}', '߿'), + ('\u{8d3}', '\u{8d3}'), + ('\u{9fe}', '\u{9fe}'), + ('੶', '੶'), + ('\u{c04}', '\u{c04}'), + ('಄', '಄'), + ('ᡸ', 'ᡸ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('⮺', '⮼'), + ('⯓', '⯫'), + ('⯰', '⯾'), + ('⹊', '⹎'), + ('ㄯ', 'ㄯ'), + ('鿫', '鿯'), + ('ꞯ', 'ꞯ'), + ('Ꞹ', 'ꞹ'), + ('ꣾ', '\u{a8ff}'), + ('𐨴', '𐨵'), + ('𐩈', '𐩈'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐼀', '𐼧'), + ('𐼰', '𐽙'), + ('\u{110cd}', '\u{110cd}'), + ('𑅄', '𑅆'), + ('\u{1133b}', '\u{1133b}'), + ('\u{1145e}', '\u{1145e}'), + ('𑜚', '𑜚'), + ('𑠀', '𑠻'), + ('𑪝', '𑪝'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻸'), + ('𖹀', '𖺚'), + ('𘟭', '𘟱'), + ('𝋠', '𝋳'), + ('𝍲', '𝍸'), + ('𞱱', '𞲴'), + ('🄯', '🄯'), + ('🛹', '🛹'), + ('🟕', '🟘'), + ('🥍', '🥏'), + ('🥬', '🥰'), + ('🥳', '🥶'), + ('🥺', '🥺'), + ('🥼', '🥿'), + ('🦘', '🦢'), + ('🦰', '🦹'), + ('🧁', '🧂'), + ('🧧', '🧿'), + ('🩠', '🩭'), +]; + +pub const V12_0: &'static [(char, char)] = &[ + ('౷', '౷'), + ('ຆ', 'ຆ'), + ('ຉ', 'ຉ'), + ('ຌ', 'ຌ'), + ('ຎ', 'ຓ'), + ('ຘ', 'ຘ'), + ('ຠ', 'ຠ'), + ('ຨ', 'ຩ'), + ('ຬ', 'ຬ'), + ('\u{eba}', '\u{eba}'), + ('ᳺ', 'ᳺ'), + ('⯉', '⯉'), + ('⯿', '⯿'), + ('⹏', '⹏'), + ('Ꞻ', 'ꞿ'), + ('Ꟃ', 'Ᶎ'), + ('ꭦ', 'ꭧ'), + ('𐿠', '𐿶'), + ('𑑟', '𑑟'), + ('𑚸', '𑚸'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧤'), + ('𑪄', '𑪅'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), + ('\u{13430}', '\u{13438}'), + ('𖽅', '𖽊'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽿', '𖾇'), + ('𖿢', '𖿣'), + ('𘟲', '𘟷'), + ('𛅐', '𛅒'), + ('𛅤', '𛅧'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅏'), + ('𞋀', '𞋹'), + ('𞋿', '𞋿'), + ('𞥋', '𞥋'), + ('𞴁', '𞴽'), + ('🅬', '🅬'), + ('🛕', '🛕'), + ('🛺', '🛺'), + ('🟠', '🟫'), + ('🤍', '🤏'), + ('🤿', '🤿'), + ('🥱', '🥱'), + ('🥻', '🥻'), + ('🦥', '🦪'), + ('🦮', '🦯'), + ('🦺', '🦿'), + ('🧃', '🧊'), + ('🧍', '🧏'), + ('🨀', '🩓'), + ('🩰', '🩳'), + ('🩸', '🩺'), + ('🪀', '🪂'), + ('🪐', '🪕'), +]; + +pub const V12_1: &'static [(char, char)] = &[('㋿', '㋿')]; + +pub const V13_0: &'static [(char, char)] = &[ + ('ࢾ', 'ࣇ'), + ('\u{b55}', '\u{b55}'), + ('ഄ', 'ഄ'), + ('\u{d81}', '\u{d81}'), + ('\u{1abf}', '\u{1ac0}'), + ('⮗', '⮗'), + ('⹐', '⹒'), + ('ㆻ', 'ㆿ'), + ('䶶', '䶿'), + ('鿰', '鿼'), + ('Ꟈ', 'ꟊ'), + ('Ꟶ', 'ꟶ'), + ('\u{a82c}', '\u{a82c}'), + ('ꭨ', '꭫'), + ('𐆜', '𐆜'), + ('𐺀', '𐺩'), + ('\u{10eab}', '𐺭'), + ('𐺰', '𐺱'), + ('𐾰', '𐿋'), + ('𑅇', '𑅇'), + ('𑇎', '\u{111cf}'), + ('𑑚', '𑑚'), + ('𑑠', '𑑡'), + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), + ('𑾰', '𑾰'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𘫳', '𘳕'), + ('𘴀', '𘴈'), + ('🄍', '🄏'), + ('🅭', '🅯'), + ('🆭', '🆭'), + ('🛖', '🛗'), + ('🛻', '🛼'), + ('🢰', '🢱'), + ('🤌', '🤌'), + ('🥲', '🥲'), + ('🥷', '🥸'), + ('🦣', '🦤'), + ('🦫', '🦭'), + ('🧋', '🧋'), + ('🩴', '🩴'), + ('🪃', '🪆'), + ('🪖', '🪨'), + ('🪰', '🪶'), + ('🫀', '🫂'), + ('🫐', '🫖'), + ('🬀', '🮒'), + ('🮔', '🯊'), + ('🯰', '🯹'), + ('𪛗', '𪛝'), + ('𰀀', '𱍊'), +]; + +pub const V14_0: &'static [(char, char)] = &[ + ('؝', '؝'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{898}', '\u{89f}'), + ('ࢵ', 'ࢵ'), + ('ࣈ', '\u{8d2}'), + ('\u{c3c}', '\u{c3c}'), + ('ౝ', 'ౝ'), + ('ೝ', 'ೝ'), + ('ᜍ', 'ᜍ'), + ('\u{1715}', '\u{1715}'), + ('ᜟ', 'ᜟ'), + ('\u{180f}', '\u{180f}'), + ('\u{1ac1}', '\u{1ace}'), + ('ᭌ', 'ᭌ'), + ('᭽', '᭾'), + ('\u{1dfa}', '\u{1dfa}'), + ('⃀', '⃀'), + ('Ⱟ', 'Ⱟ'), + ('ⱟ', 'ⱟ'), + ('⹓', '⹝'), + ('鿽', '鿿'), + ('Ꟁ', 'ꟁ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟙ'), + ('ꟲ', 'ꟴ'), + ('﯂', '﯂'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷾', '﷿'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐽰', '𐾉'), + ('\u{11070}', '𑁵'), + ('\u{110c2}', '\u{110c2}'), + ('𑚹', '𑚹'), + ('𑝀', '𑝆'), + ('𑪰', '𑪿'), + ('𒾐', '𒿲'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛄟', '𛄢'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('𜽐', '𜿃'), + ('𝇩', '𝇪'), + ('𝼀', '𝼞'), + ('𞊐', '\u{1e2ae}'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('🛝', '🛟'), + ('🟰', '🟰'), + ('🥹', '🥹'), + ('🧌', '🧌'), + ('🩻', '🩼'), + ('🪩', '🪬'), + ('🪷', '🪺'), + ('🫃', '🫅'), + ('🫗', '🫙'), + ('🫠', '🫧'), + ('🫰', '🫶'), + ('𪛞', '𪛟'), + ('𫜵', '𫜸'), +]; + +pub const V15_0: &'static [(char, char)] = &[ + ('ೳ', 'ೳ'), + ('\u{ece}', '\u{ece}'), + ('\u{10efd}', '\u{10eff}'), + ('𑈿', '\u{11241}'), + ('𑬀', '𑬉'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '𑽙'), + ('𓐯', '𓐯'), + ('\u{13439}', '\u{13455}'), + ('𛄲', '𛄲'), + ('𛅕', '𛅕'), + ('𝋀', '𝋓'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞓐', '𞓹'), + ('🛜', '🛜'), + ('🝴', '🝶'), + ('🝻', '🝿'), + ('🟙', '🟙'), + ('🩵', '🩷'), + ('🪇', '🪈'), + ('🪭', '🪯'), + ('🪻', '🪽'), + ('🪿', '🪿'), + ('🫎', '🫏'), + ('🫚', '🫛'), + ('🫨', '🫨'), + ('🫷', '🫸'), + ('𫜹', '𫜹'), + ('𱍐', '𲎯'), +]; + +pub const V15_1: &'static [(char, char)] = + &[('⿼', '⿿'), ('㇯', '㇯'), ('𮯰', '𮹝')]; + +pub const V16_0: &'static [(char, char)] = &[ + ('\u{897}', '\u{897}'), + ('᭎', '᭏'), + ('᭿', '᭿'), + ('Ᲊ', 'ᲊ'), + ('␧', '␩'), + ('㇤', '㇥'), + ('Ɤ', 'ꟍ'), + ('Ꟛ', 'Ƛ'), + ('𐗀', '𐗳'), + ('𐵀', '𐵥'), + ('\u{10d69}', '𐶅'), + ('𐶎', '𐶏'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10efc}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), + ('𑛐', '𑛣'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), + ('\u{11f5a}', '\u{11f5a}'), + ('𓑠', '𔏺'), + ('𖄀', '𖄹'), + ('𖵀', '𖵹'), + ('𘳿', '𘳿'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𞗐', '𞗺'), + ('𞗿', '𞗿'), + ('🢲', '🢻'), + ('🣀', '🣁'), + ('🪉', '🪉'), + ('🪏', '🪏'), + ('🪾', '🪾'), + ('🫆', '🫆'), + ('🫜', '🫜'), + ('🫟', '🫟'), + ('🫩', '🫩'), + ('🯋', '🯯'), +]; + +pub const V1_1: &'static [(char, char)] = &[ + ('\0', 'ǵ'), + ('Ǻ', 'ȗ'), + ('ɐ', 'ʨ'), + ('ʰ', '˞'), + ('ˠ', '˩'), + ('\u{300}', '\u{345}'), + ('\u{360}', '\u{361}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + (';', ';'), + ('΄', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ώ'), + ('ϐ', 'ϖ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'ϳ'), + ('Ё', 'Ќ'), + ('Ў', 'я'), + ('ё', 'ќ'), + ('ў', '\u{486}'), + ('Ґ', 'ӄ'), + ('Ӈ', 'ӈ'), + ('Ӌ', 'ӌ'), + ('Ӑ', 'ӫ'), + ('Ӯ', 'ӵ'), + ('Ӹ', 'ӹ'), + ('Ա', 'Ֆ'), + ('ՙ', '՟'), + ('ա', 'և'), + ('։', '։'), + ('\u{5b0}', '\u{5b9}'), + ('\u{5bb}', '׃'), + ('א', 'ת'), + ('װ', '״'), + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ء', 'غ'), + ('ـ', '\u{652}'), + ('٠', '٭'), + ('\u{670}', 'ڷ'), + ('ں', 'ھ'), + ('ۀ', 'ێ'), + ('ې', '\u{6ed}'), + ('۰', '۹'), + ('\u{901}', 'ः'), + ('अ', 'ह'), + ('\u{93c}', '\u{94d}'), + ('ॐ', '\u{954}'), + ('क़', '॰'), + ('\u{981}', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '৺'), + ('\u{a02}', '\u{a02}'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', 'ੴ'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઋ'), + ('ઍ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૠ'), + ('૦', '૯'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଶ', 'ହ'), + ('\u{b3c}', '\u{b43}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b56}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('୦', '୰'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'வ'), + ('ஷ', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('௧', '௲'), + ('ఁ', 'ః'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'ళ'), + ('వ', 'హ'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౠ', 'ౡ'), + ('౦', '౯'), + ('ಂ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೞ', 'ೞ'), + ('ೠ', 'ೡ'), + ('೦', '೯'), + ('ം', 'ഃ'), + ('അ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ന'), + ('പ', 'ഹ'), + ('\u{d3e}', '\u{d43}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('ൠ', 'ൡ'), + ('൦', '൯'), + ('ก', '\u{e3a}'), + ('฿', '๛'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ງ', 'ຈ'), + ('ຊ', 'ຊ'), + ('ຍ', 'ຍ'), + ('ດ', 'ທ'), + ('ນ', 'ຟ'), + ('ມ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ວ'), + ('ສ', 'ຫ'), + ('ອ', '\u{eb9}'), + ('\u{ebb}', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ecd}'), + ('໐', '໙'), + ('ໜ', 'ໝ'), + ('Ⴀ', 'Ⴥ'), + ('ა', 'ჶ'), + ('჻', '჻'), + ('ᄀ', 'ᅙ'), + ('ᅟ', 'ᆢ'), + ('ᆨ', 'ᇹ'), + ('Ḁ', 'ẚ'), + ('Ạ', 'ỹ'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('\u{2000}', '\u{202e}'), + ('‰', '⁆'), + ('\u{206a}', '⁰'), + ('⁴', '₎'), + ('₠', '₪'), + ('\u{20d0}', '\u{20e1}'), + ('℀', 'ℸ'), + ('⅓', 'ↂ'), + ('←', '⇪'), + ('∀', '⋱'), + ('⌀', '⌀'), + ('⌂', '⍺'), + ('␀', '␤'), + ('⑀', '⑊'), + ('①', '⓪'), + ('─', '▕'), + ('■', '◯'), + ('☀', '☓'), + ('☚', '♯'), + ('✁', '✄'), + ('✆', '✉'), + ('✌', '✧'), + ('✩', '❋'), + ('❍', '❍'), + ('❏', '❒'), + ('❖', '❖'), + ('❘', '❞'), + ('❡', '❧'), + ('❶', '➔'), + ('➘', '➯'), + ('➱', '➾'), + ('\u{3000}', '〷'), + ('〿', '〿'), + ('ぁ', 'ゔ'), + ('\u{3099}', 'ゞ'), + ('ァ', 'ヾ'), + ('ㄅ', 'ㄬ'), + ('ㄱ', 'ㆎ'), + ('㆐', '㆟'), + ('㈀', '㈜'), + ('㈠', '㉃'), + ('㉠', '㉻'), + ('㉿', '㊰'), + ('㋀', '㋋'), + ('㋐', '㋾'), + ('㌀', '㍶'), + ('㍻', '㏝'), + ('㏠', '㏾'), + ('一', '龥'), + ('\u{e000}', '鶴'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('\u{fb1e}', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', '﴿'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe20}', '\u{fe23}'), + ('︰', '﹄'), + ('﹉', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('ﹰ', 'ﹲ'), + ('ﹴ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('\u{feff}', '\u{feff}'), + ('!', '~'), + ('。', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('¢', '₩'), + ('│', '○'), + ('�', '\u{ffff}'), +]; + +pub const V2_0: &'static [(char, char)] = &[ + ('\u{591}', '\u{5a1}'), + ('\u{5a3}', '\u{5af}'), + ('\u{5c4}', '\u{5c4}'), + ('ༀ', 'ཇ'), + ('ཉ', 'ཀྵ'), + ('\u{f71}', 'ྋ'), + ('\u{f90}', '\u{f95}'), + ('\u{f97}', '\u{f97}'), + ('\u{f99}', '\u{fad}'), + ('\u{fb1}', '\u{fb7}'), + ('\u{fb9}', '\u{fb9}'), + ('ẛ', 'ẛ'), + ('₫', '₫'), + ('가', '힣'), + ('\u{1fffe}', '\u{1ffff}'), + ('\u{2fffe}', '\u{2ffff}'), + ('\u{3fffe}', '\u{3ffff}'), + ('\u{4fffe}', '\u{4ffff}'), + ('\u{5fffe}', '\u{5ffff}'), + ('\u{6fffe}', '\u{6ffff}'), + ('\u{7fffe}', '\u{7ffff}'), + ('\u{8fffe}', '\u{8ffff}'), + ('\u{9fffe}', '\u{9ffff}'), + ('\u{afffe}', '\u{affff}'), + ('\u{bfffe}', '\u{bffff}'), + ('\u{cfffe}', '\u{cffff}'), + ('\u{dfffe}', '\u{dffff}'), + ('\u{efffe}', '\u{10ffff}'), +]; + +pub const V2_1: &'static [(char, char)] = &[('€', '€'), ('', '')]; + +pub const V3_0: &'static [(char, char)] = &[ + ('Ƕ', 'ǹ'), + ('Ș', 'ȟ'), + ('Ȣ', 'ȳ'), + ('ʩ', 'ʭ'), + ('˟', '˟'), + ('˪', 'ˮ'), + ('\u{346}', '\u{34e}'), + ('\u{362}', '\u{362}'), + ('ϗ', 'ϗ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('Ѐ', 'Ѐ'), + ('Ѝ', 'Ѝ'), + ('ѐ', 'ѐ'), + ('ѝ', 'ѝ'), + ('\u{488}', '\u{489}'), + ('Ҍ', 'ҏ'), + ('Ӭ', 'ӭ'), + ('֊', '֊'), + ('\u{653}', '\u{655}'), + ('ڸ', 'ڹ'), + ('ڿ', 'ڿ'), + ('ۏ', 'ۏ'), + ('ۺ', '۾'), + ('܀', '܍'), + ('\u{70f}', 'ܬ'), + ('\u{730}', '\u{74a}'), + ('ހ', '\u{7b0}'), + ('ං', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', '෴'), + ('ཪ', 'ཪ'), + ('\u{f96}', '\u{f96}'), + ('\u{fae}', '\u{fb0}'), + ('\u{fb8}', '\u{fb8}'), + ('\u{fba}', '\u{fbc}'), + ('྾', '࿌'), + ('࿏', '࿏'), + ('က', 'အ'), + ('ဣ', 'ဧ'), + ('ဩ', 'ဪ'), + ('ာ', '\u{1032}'), + ('\u{1036}', '\u{1039}'), + ('၀', '\u{1059}'), + ('ሀ', 'ሆ'), + ('ለ', 'ቆ'), + ('ቈ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኆ'), + ('ኈ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኮ'), + ('ኰ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዎ'), + ('ዐ', 'ዖ'), + ('ዘ', 'ዮ'), + ('ደ', 'ጎ'), + ('ጐ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ጞ'), + ('ጠ', 'ፆ'), + ('ፈ', 'ፚ'), + ('፡', '፼'), + ('Ꭰ', 'Ᏼ'), + ('ᐁ', 'ᙶ'), + ('\u{1680}', '᚜'), + ('ᚠ', 'ᛰ'), + ('ក', 'ៜ'), + ('០', '៩'), + ('᠀', '\u{180e}'), + ('᠐', '᠙'), + ('ᠠ', 'ᡷ'), + ('ᢀ', '\u{18a9}'), + ('\u{202f}', '\u{202f}'), + ('⁈', '⁍'), + ('₭', '₯'), + ('\u{20e2}', '\u{20e3}'), + ('ℹ', '℺'), + ('Ↄ', 'Ↄ'), + ('⇫', '⇳'), + ('⌁', '⌁'), + ('⍻', '⍻'), + ('⍽', '⎚'), + ('␥', '␦'), + ('◰', '◷'), + ('☙', '☙'), + ('♰', '♱'), + ('⠀', '⣿'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿻'), + ('〸', '〺'), + ('〾', '〾'), + ('ㆠ', 'ㆷ'), + ('㐀', '䶵'), + ('ꀀ', 'ꒌ'), + ('꒐', '꒡'), + ('꒤', '꒳'), + ('꒵', '꓀'), + ('꓂', '꓄'), + ('꓆', '꓆'), + ('יִ', 'יִ'), + ('\u{fff9}', '\u{fffb}'), +]; + +pub const V3_1: &'static [(char, char)] = &[ + ('ϴ', 'ϵ'), + ('\u{fdd0}', '\u{fdef}'), + ('𐌀', '𐌞'), + ('𐌠', '𐌣'), + ('𐌰', '𐍊'), + ('𐐀', '𐐥'), + ('𐐨', '𐑍'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄪', '𝇝'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓀'), + ('𝓂', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚣'), + ('𝚨', '𝟉'), + ('𝟎', '𝟿'), + ('𠀀', '𪛖'), + ('丽', '𪘀'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const V3_2: &'static [(char, char)] = &[ + ('Ƞ', 'Ƞ'), + ('\u{34f}', '\u{34f}'), + ('\u{363}', '\u{36f}'), + ('Ϙ', 'ϙ'), + ('϶', '϶'), + ('Ҋ', 'ҋ'), + ('Ӆ', 'ӆ'), + ('Ӊ', 'ӊ'), + ('Ӎ', 'ӎ'), + ('Ԁ', 'ԏ'), + ('ٮ', 'ٯ'), + ('ޱ', 'ޱ'), + ('ჷ', 'ჸ'), + ('ᜀ', 'ᜌ'), + ('ᜎ', '\u{1714}'), + ('ᜠ', '᜶'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('⁇', '⁇'), + ('⁎', '⁒'), + ('⁗', '⁗'), + ('\u{205f}', '\u{2063}'), + ('ⁱ', 'ⁱ'), + ('₰', '₱'), + ('\u{20e4}', '\u{20ea}'), + ('ℽ', '⅋'), + ('⇴', '⇿'), + ('⋲', '⋿'), + ('⍼', '⍼'), + ('⎛', '⏎'), + ('⓫', '⓾'), + ('▖', '▟'), + ('◸', '◿'), + ('☖', '☗'), + ('♲', '♽'), + ('⚀', '⚉'), + ('❨', '❵'), + ('⟐', '⟫'), + ('⟰', '⟿'), + ('⤀', '⫿'), + ('〻', '〽'), + ('ゕ', 'ゖ'), + ('ゟ', '゠'), + ('ヿ', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㉑', '㉟'), + ('㊱', '㊿'), + ('꒢', '꒣'), + ('꒴', '꒴'), + ('꓁', '꓁'), + ('꓅', '꓅'), + ('侮', '頻'), + ('﷼', '﷼'), + ('\u{fe00}', '\u{fe0f}'), + ('﹅', '﹆'), + ('ﹳ', 'ﹳ'), + ('⦅', '⦆'), +]; + +pub const V4_0: &'static [(char, char)] = &[ + ('ȡ', 'ȡ'), + ('ȴ', 'ȶ'), + ('ʮ', 'ʯ'), + ('˯', '˿'), + ('\u{350}', '\u{357}'), + ('\u{35d}', '\u{35f}'), + ('Ϸ', 'ϻ'), + ('\u{600}', '\u{603}'), + ('؍', '\u{615}'), + ('\u{656}', '\u{658}'), + ('ۮ', 'ۯ'), + ('ۿ', 'ۿ'), + ('ܭ', 'ܯ'), + ('ݍ', 'ݏ'), + ('ऄ', 'ऄ'), + ('ঽ', 'ঽ'), + ('\u{a01}', '\u{a01}'), + ('ਃ', 'ਃ'), + ('ઌ', 'ઌ'), + ('ૡ', '\u{ae3}'), + ('૱', '૱'), + ('ଵ', 'ଵ'), + ('ୱ', 'ୱ'), + ('௳', '௺'), + ('\u{cbc}', 'ಽ'), + ('\u{17dd}', '\u{17dd}'), + ('៰', '៹'), + ('ᤀ', 'ᤜ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('᧠', '᧿'), + ('ᴀ', 'ᵫ'), + ('⁓', '⁔'), + ('℻', '℻'), + ('⏏', '⏐'), + ('⓿', '⓿'), + ('☔', '☕'), + ('⚊', '⚑'), + ('⚠', '⚡'), + ('⬀', '⬍'), + ('㈝', '㈞'), + ('㉐', '㉐'), + ('㉼', '㉽'), + ('㋌', '㋏'), + ('㍷', '㍺'), + ('㏞', '㏟'), + ('㏿', '㏿'), + ('䷀', '䷿'), + ('﷽', '﷽'), + ('﹇', '﹈'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐎀', '𐎝'), + ('𐎟', '𐎟'), + ('𐐦', '𐐧'), + ('𐑎', '𐒝'), + ('𐒠', '𐒩'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐠿'), + ('𝌀', '𝍖'), + ('𝓁', '𝓁'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const V4_1: &'static [(char, char)] = &[ + ('ȷ', 'Ɂ'), + ('\u{358}', '\u{35c}'), + ('ϼ', 'Ͽ'), + ('Ӷ', 'ӷ'), + ('\u{5a2}', '\u{5a2}'), + ('\u{5c5}', '\u{5c7}'), + ('؋', '؋'), + ('؞', '؞'), + ('\u{659}', '\u{65e}'), + ('ݐ', 'ݭ'), + ('ॽ', 'ॽ'), + ('ৎ', 'ৎ'), + ('ஶ', 'ஶ'), + ('௦', '௦'), + ('࿐', '࿑'), + ('ჹ', 'ჺ'), + ('ჼ', 'ჼ'), + ('ሇ', 'ሇ'), + ('ቇ', 'ቇ'), + ('ኇ', 'ኇ'), + ('ኯ', 'ኯ'), + ('ዏ', 'ዏ'), + ('ዯ', 'ዯ'), + ('ጏ', 'ጏ'), + ('ጟ', 'ጟ'), + ('ፇ', 'ፇ'), + ('\u{135f}', '፠'), + ('ᎀ', '᎙'), + ('ᦀ', 'ᦩ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧙'), + ('᧞', '᧟'), + ('ᨀ', '\u{1a1b}'), + ('᨞', '᨟'), + ('ᵬ', '\u{1dc3}'), + ('⁕', '⁖'), + ('⁘', '⁞'), + ('ₐ', 'ₔ'), + ('₲', '₵'), + ('\u{20eb}', '\u{20eb}'), + ('ℼ', 'ℼ'), + ('⅌', '⅌'), + ('⏑', '⏛'), + ('☘', '☘'), + ('♾', '♿'), + ('⚒', '⚜'), + ('⚢', '⚱'), + ('⟀', '⟆'), + ('⬎', '⬓'), + ('Ⰰ', 'Ⱞ'), + ('ⰰ', 'ⱞ'), + ('Ⲁ', '⳪'), + ('⳹', 'ⴥ'), + ('ⴰ', 'ⵥ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('⸀', '⸗'), + ('⸜', '⸝'), + ('㇀', '㇏'), + ('㉾', '㉾'), + ('龦', '龻'), + ('꜀', '꜖'), + ('ꠀ', '꠫'), + ('並', '龎'), + ('︐', '︙'), + ('𐅀', '𐆊'), + ('𐎠', '𐏃'), + ('𐏈', '𐏕'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨳'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩇'), + ('𐩐', '𐩘'), + ('𝈀', '𝉅'), + ('𝚤', '𝚥'), +]; + +pub const V5_0: &'static [(char, char)] = &[ + ('ɂ', 'ɏ'), + ('ͻ', 'ͽ'), + ('ӏ', 'ӏ'), + ('Ӻ', 'ӿ'), + ('Ԑ', 'ԓ'), + ('\u{5ba}', '\u{5ba}'), + ('߀', 'ߺ'), + ('ॻ', 'ॼ'), + ('ॾ', 'ॿ'), + ('\u{ce2}', '\u{ce3}'), + ('ೱ', 'ೲ'), + ('\u{1b00}', 'ᭋ'), + ('᭐', '᭼'), + ('\u{1dc4}', '\u{1dca}'), + ('\u{1dfe}', '\u{1dff}'), + ('\u{20ec}', '\u{20ef}'), + ('⅍', 'ⅎ'), + ('ↄ', 'ↄ'), + ('⏜', '⏧'), + ('⚲', '⚲'), + ('⟇', '⟊'), + ('⬔', '⬚'), + ('⬠', '⬣'), + ('Ⱡ', 'ⱬ'), + ('ⱴ', 'ⱷ'), + ('ꜗ', 'ꜚ'), + ('꜠', '꜡'), + ('ꡀ', '꡷'), + ('𐤀', '𐤙'), + ('𐤟', '𐤟'), + ('𒀀', '𒍮'), + ('𒐀', '𒑢'), + ('𒑰', '𒑳'), + ('𝍠', '𝍱'), + ('𝟊', '𝟋'), +]; + +pub const V5_1: &'static [(char, char)] = &[ + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('Ϗ', 'Ϗ'), + ('\u{487}', '\u{487}'), + ('Ԕ', 'ԣ'), + ('؆', '؊'), + ('\u{616}', '\u{61a}'), + ('ػ', 'ؿ'), + ('ݮ', 'ݿ'), + ('ॱ', 'ॲ'), + ('\u{a51}', '\u{a51}'), + ('\u{a75}', '\u{a75}'), + ('\u{b44}', '\u{b44}'), + ('\u{b62}', '\u{b63}'), + ('ௐ', 'ௐ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౙ'), + ('\u{c62}', '\u{c63}'), + ('౸', '౿'), + ('ഽ', 'ഽ'), + ('\u{d44}', '\u{d44}'), + ('\u{d62}', '\u{d63}'), + ('൰', '൵'), + ('൹', 'ൿ'), + ('ཫ', 'ཬ'), + ('࿎', '࿎'), + ('࿒', '࿔'), + ('ဢ', 'ဢ'), + ('ဨ', 'ဨ'), + ('ါ', 'ါ'), + ('\u{1033}', '\u{1035}'), + ('\u{103a}', 'ဿ'), + ('ၚ', '႙'), + ('႞', '႟'), + ('ᢪ', 'ᢪ'), + ('\u{1b80}', '\u{1baa}'), + ('ᮮ', '᮹'), + ('ᰀ', '\u{1c37}'), + ('᰻', '᱉'), + ('ᱍ', '᱿'), + ('\u{1dcb}', '\u{1de6}'), + ('ẜ', 'ẟ'), + ('Ỻ', 'ỿ'), + ('\u{2064}', '\u{2064}'), + ('\u{20f0}', '\u{20f0}'), + ('⅏', '⅏'), + ('ↅ', 'ↈ'), + ('⚝', '⚝'), + ('⚳', '⚼'), + ('⛀', '⛃'), + ('⟌', '⟌'), + ('⟬', '⟯'), + ('⬛', '⬟'), + ('⬤', '⭌'), + ('⭐', '⭔'), + ('Ɑ', 'Ɐ'), + ('ⱱ', 'ⱳ'), + ('ⱸ', 'ⱽ'), + ('\u{2de0}', '\u{2dff}'), + ('⸘', '⸛'), + ('⸞', '⸰'), + ('ㄭ', 'ㄭ'), + ('㇐', '㇣'), + ('龼', '鿃'), + ('ꔀ', 'ꘫ'), + ('Ꙁ', 'ꙟ'), + ('Ꙣ', '꙳'), + ('\u{a67c}', 'ꚗ'), + ('ꜛ', 'ꜟ'), + ('Ꜣ', 'ꞌ'), + ('ꟻ', 'ꟿ'), + ('ꢀ', '\u{a8c4}'), + ('꣎', '꣙'), + ('꤀', '\u{a953}'), + ('꥟', '꥟'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('꩜', '꩟'), + ('\u{fe24}', '\u{fe26}'), + ('𐆐', '𐆛'), + ('𐇐', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐤠', '𐤹'), + ('𐤿', '𐤿'), + ('𝄩', '𝄩'), + ('🀀', '🀫'), + ('🀰', '🂓'), +]; + +pub const V5_2: &'static [(char, char)] = &[ + ('Ԥ', 'ԥ'), + ('ࠀ', '\u{82d}'), + ('࠰', '࠾'), + ('\u{900}', '\u{900}'), + ('ॎ', 'ॎ'), + ('\u{955}', '\u{955}'), + ('ॹ', 'ॺ'), + ('৻', '৻'), + ('࿕', '࿘'), + ('ႚ', '\u{109d}'), + ('ᅚ', 'ᅞ'), + ('ᆣ', 'ᆧ'), + ('ᇺ', 'ᇿ'), + ('᐀', '᐀'), + ('ᙷ', 'ᙿ'), + ('ᢰ', 'ᣵ'), + ('ᦪ', 'ᦫ'), + ('᧚', '᧚'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), + ('\u{1cd0}', 'ᳲ'), + ('\u{1dfd}', '\u{1dfd}'), + ('₶', '₸'), + ('⅐', '⅒'), + ('↉', '↉'), + ('⏨', '⏨'), + ('⚞', '⚟'), + ('⚽', '⚿'), + ('⛄', '⛍'), + ('⛏', '⛡'), + ('⛣', '⛣'), + ('⛨', '⛿'), + ('❗', '❗'), + ('⭕', '⭙'), + ('Ɒ', 'Ɒ'), + ('Ȿ', 'Ɀ'), + ('Ⳬ', '\u{2cf1}'), + ('⸱', '⸱'), + ('㉄', '㉏'), + ('鿄', '鿋'), + ('ꓐ', '꓿'), + ('ꚠ', '꛷'), + ('꠰', '꠹'), + ('\u{a8e0}', 'ꣻ'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '꧍'), + ('ꧏ', '꧙'), + ('꧞', '꧟'), + ('ꩠ', 'ꩻ'), + ('ꪀ', 'ꫂ'), + ('ꫛ', '꫟'), + ('ꯀ', '\u{abed}'), + ('꯰', '꯹'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('恵', '舘'), + ('𐡀', '𐡕'), + ('𐡗', '𐡟'), + ('𐤚', '𐤛'), + ('𐩠', '𐩿'), + ('𐬀', '𐬵'), + ('𐬹', '𐭕'), + ('𐭘', '𐭲'), + ('𐭸', '𐭿'), + ('𐰀', '𐱈'), + ('𐹠', '𐹾'), + ('\u{11080}', '𑃁'), + ('𓀀', '𓐮'), + ('🄀', '🄊'), + ('🄐', '🄮'), + ('🄱', '🄱'), + ('🄽', '🄽'), + ('🄿', '🄿'), + ('🅂', '🅂'), + ('🅆', '🅆'), + ('🅊', '🅎'), + ('🅗', '🅗'), + ('🅟', '🅟'), + ('🅹', '🅹'), + ('🅻', '🅼'), + ('🅿', '🅿'), + ('🆊', '🆍'), + ('🆐', '🆐'), + ('🈀', '🈀'), + ('🈐', '🈱'), + ('🉀', '🉈'), + ('𪜀', '𫜴'), +]; + +pub const V6_0: &'static [(char, char)] = &[ + ('Ԧ', 'ԧ'), + ('ؠ', 'ؠ'), + ('\u{65f}', '\u{65f}'), + ('ࡀ', '\u{85b}'), + ('࡞', '࡞'), + ('\u{93a}', 'ऻ'), + ('ॏ', 'ॏ'), + ('\u{956}', '\u{957}'), + ('ॳ', 'ॷ'), + ('୲', '୷'), + ('ഩ', 'ഩ'), + ('ഺ', 'ഺ'), + ('ൎ', 'ൎ'), + ('ྌ', '\u{f8f}'), + ('࿙', '࿚'), + ('\u{135d}', '\u{135e}'), + ('ᯀ', '\u{1bf3}'), + ('᯼', '᯿'), + ('\u{1dfc}', '\u{1dfc}'), + ('ₕ', 'ₜ'), + ('₹', '₹'), + ('⏩', '⏳'), + ('⛎', '⛎'), + ('⛢', '⛢'), + ('⛤', '⛧'), + ('✅', '✅'), + ('✊', '✋'), + ('✨', '✨'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❟', '❠'), + ('➕', '➗'), + ('➰', '➰'), + ('➿', '➿'), + ('⟎', '⟏'), + ('⵰', '⵰'), + ('\u{2d7f}', '\u{2d7f}'), + ('ㆸ', 'ㆺ'), + ('Ꙡ', 'ꙡ'), + ('Ɥ', 'ꞎ'), + ('Ꞑ', 'ꞑ'), + ('Ꞡ', 'ꞩ'), + ('ꟺ', 'ꟺ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('﮲', '﯁'), + ('𑀀', '𑁍'), + ('𑁒', '𑁯'), + ('𖠀', '𖨸'), + ('𛀀', '𛀁'), + ('🂠', '🂮'), + ('🂱', '🂾'), + ('🃁', '🃏'), + ('🃑', '🃟'), + ('🄰', '🄰'), + ('🄲', '🄼'), + ('🄾', '🄾'), + ('🅀', '🅁'), + ('🅃', '🅅'), + ('🅇', '🅉'), + ('🅏', '🅖'), + ('🅘', '🅞'), + ('🅠', '🅩'), + ('🅰', '🅸'), + ('🅺', '🅺'), + ('🅽', '🅾'), + ('🆀', '🆉'), + ('🆎', '🆏'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈲', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌠'), + ('🌰', '🌵'), + ('🌷', '🍼'), + ('🎀', '🎓'), + ('🎠', '🏄'), + ('🏆', '🏊'), + ('🏠', '🏰'), + ('🐀', '🐾'), + ('👀', '👀'), + ('👂', '📷'), + ('📹', '📼'), + ('🔀', '🔽'), + ('🕐', '🕧'), + ('🗻', '🗿'), + ('😁', '😐'), + ('😒', '😔'), + ('😖', '😖'), + ('😘', '😘'), + ('😚', '😚'), + ('😜', '😞'), + ('😠', '😥'), + ('😨', '😫'), + ('😭', '😭'), + ('😰', '😳'), + ('😵', '🙀'), + ('🙅', '🙏'), + ('🚀', '🛅'), + ('🜀', '🝳'), + ('𫝀', '𫠝'), +]; + +pub const V6_1: &'static [(char, char)] = &[ + ('֏', '֏'), + ('\u{604}', '\u{604}'), + ('ࢠ', 'ࢠ'), + ('ࢢ', 'ࢬ'), + ('\u{8e4}', '\u{8fe}'), + ('૰', '૰'), + ('ໞ', 'ໟ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ჽ', 'ჿ'), + ('\u{1bab}', '\u{1bad}'), + ('ᮺ', 'ᮿ'), + ('᳀', '᳇'), + ('ᳳ', 'ᳶ'), + ('⟋', '⟋'), + ('⟍', '⟍'), + ('Ⳳ', 'ⳳ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⵦ', 'ⵧ'), + ('⸲', '⸻'), + ('鿌', '鿌'), + ('\u{a674}', '\u{a67b}'), + ('\u{a69f}', '\u{a69f}'), + ('Ꞓ', 'ꞓ'), + ('Ɦ', 'Ɦ'), + ('ꟸ', 'ꟹ'), + ('ꫠ', '\u{aaf6}'), + ('郞', '隷'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑅃'), + ('\u{11180}', '𑇈'), + ('𑇐', '𑇙'), + ('𑚀', '\u{116b7}'), + ('𑛀', '𑛉'), + ('𖼀', '𖽄'), + ('𖽐', '𖽾'), + ('\u{16f8f}', '𖾟'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), + ('🅪', '🅫'), + ('🕀', '🕃'), + ('😀', '😀'), + ('😑', '😑'), + ('😕', '😕'), + ('😗', '😗'), + ('😙', '😙'), + ('😛', '😛'), + ('😟', '😟'), + ('😦', '😧'), + ('😬', '😬'), + ('😮', '😯'), + ('😴', '😴'), +]; + +pub const V6_2: &'static [(char, char)] = &[('₺', '₺')]; + +pub const V6_3: &'static [(char, char)] = + &[('\u{61c}', '\u{61c}'), ('\u{2066}', '\u{2069}')]; + +pub const V7_0: &'static [(char, char)] = &[ + ('Ϳ', 'Ϳ'), + ('Ԩ', 'ԯ'), + ('֍', '֎'), + ('\u{605}', '\u{605}'), + ('ࢡ', 'ࢡ'), + ('ࢭ', 'ࢲ'), + ('\u{8ff}', '\u{8ff}'), + ('ॸ', 'ॸ'), + ('ঀ', 'ঀ'), + ('\u{c00}', '\u{c00}'), + ('ఴ', 'ఴ'), + ('\u{c81}', '\u{c81}'), + ('\u{d01}', '\u{d01}'), + ('෦', '෯'), + ('ᛱ', 'ᛸ'), + ('ᤝ', 'ᤞ'), + ('\u{1ab0}', '\u{1abe}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1de7}', '\u{1df5}'), + ('₻', '₽'), + ('⏴', '⏺'), + ('✀', '✀'), + ('⭍', '⭏'), + ('⭚', '⭳'), + ('⭶', '⮕'), + ('⮘', '⮹'), + ('⮽', '⯈'), + ('⯊', '⯑'), + ('⸼', '⹂'), + ('Ꚙ', 'ꚝ'), + ('ꞔ', 'ꞟ'), + ('Ɜ', 'Ɬ'), + ('Ʞ', 'Ʇ'), + ('ꟷ', 'ꟷ'), + ('ꧠ', 'ꧾ'), + ('\u{aa7c}', 'ꩿ'), + ('ꬰ', 'ꭟ'), + ('ꭤ', 'ꭥ'), + ('\u{fe27}', '\u{fe2d}'), + ('𐆋', '𐆌'), + ('𐆠', '𐆠'), + ('\u{102e0}', '𐋻'), + ('𐌟', '𐌟'), + ('𐍐', '\u{1037a}'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕯', '𐕯'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐡠', '𐢞'), + ('𐢧', '𐢯'), + ('𐪀', '𐪟'), + ('𐫀', '\u{10ae6}'), + ('𐫫', '𐫶'), + ('𐮀', '𐮑'), + ('𐮙', '𐮜'), + ('𐮩', '𐮯'), + ('\u{1107f}', '\u{1107f}'), + ('𑅐', '𑅶'), + ('𑇍', '𑇍'), + ('𑇚', '𑇚'), + ('𑇡', '𑇴'), + ('𑈀', '𑈑'), + ('𑈓', '𑈽'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11301}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133c}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑒀', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '𑗉'), + ('𑘀', '𑙄'), + ('𑙐', '𑙙'), + ('𑢠', '𑣲'), + ('𑣿', '𑣿'), + ('𑫀', '𑫸'), + ('𒍯', '𒎘'), + ('𒑣', '𒑮'), + ('𒑴', '𒑴'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩮', '𖩯'), + ('𖫐', '𖫭'), + ('\u{16af0}', '𖫵'), + ('𖬀', '𖭅'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '\u{1bca3}'), + ('𞠀', '𞣄'), + ('𞣇', '\u{1e8d6}'), + ('🂿', '🂿'), + ('🃠', '🃵'), + ('🄋', '🄌'), + ('🌡', '🌬'), + ('🌶', '🌶'), + ('🍽', '🍽'), + ('🎔', '🎟'), + ('🏅', '🏅'), + ('🏋', '🏎'), + ('🏔', '🏟'), + ('🏱', '🏷'), + ('🐿', '🐿'), + ('👁', '👁'), + ('📸', '📸'), + ('📽', '📾'), + ('🔾', '🔿'), + ('🕄', '🕊'), + ('🕨', '🕹'), + ('🕻', '🖣'), + ('🖥', '🗺'), + ('🙁', '🙂'), + ('🙐', '🙿'), + ('🛆', '🛏'), + ('🛠', '🛬'), + ('🛰', '🛳'), + ('🞀', '🟔'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), +]; + +pub const V8_0: &'static [(char, char)] = &[ + ('ࢳ', 'ࢴ'), + ('\u{8e3}', '\u{8e3}'), + ('ૹ', 'ૹ'), + ('ౚ', 'ౚ'), + ('ൟ', 'ൟ'), + ('Ᏽ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('₾', '₾'), + ('↊', '↋'), + ('⯬', '⯯'), + ('鿍', '鿕'), + ('\u{a69e}', '\u{a69e}'), + ('ꞏ', 'ꞏ'), + ('Ʝ', 'ꞷ'), + ('꣼', 'ꣽ'), + ('ꭠ', 'ꭣ'), + ('ꭰ', 'ꮿ'), + ('\u{fe2e}', '\u{fe2f}'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐣻', '𐣿'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐳿'), + ('\u{111c9}', '\u{111cc}'), + ('𑇛', '𑇟'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊩'), + ('\u{11300}', '\u{11300}'), + ('𑍐', '𑍐'), + ('𑗊', '\u{115dd}'), + ('𑜀', '𑜙'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜿'), + ('𒎙', '𒎙'), + ('𒒀', '𒕃'), + ('𔐀', '𔙆'), + ('𝇞', '𝇨'), + ('𝠀', '𝪋'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('🌭', '🌯'), + ('🍾', '🍿'), + ('🏏', '🏓'), + ('🏸', '🏿'), + ('📿', '📿'), + ('🕋', '🕏'), + ('🙃', '🙄'), + ('🛐', '🛐'), + ('🤐', '🤘'), + ('🦀', '🦄'), + ('🧀', '🧀'), + ('𫠠', '𬺡'), +]; + +pub const V9_0: &'static [(char, char)] = &[ + ('ࢶ', 'ࢽ'), + ('\u{8d4}', '\u{8e2}'), + ('ಀ', 'ಀ'), + ('൏', '൏'), + ('ൔ', 'ൖ'), + ('൘', '൞'), + ('൶', '൸'), + ('ᲀ', 'ᲈ'), + ('\u{1dfb}', '\u{1dfb}'), + ('⏻', '⏾'), + ('⹃', '⹄'), + ('Ɪ', 'Ɪ'), + ('\u{a8c5}', '\u{a8c5}'), + ('𐆍', '𐆎'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('\u{1123e}', '\u{1123e}'), + ('𑐀', '𑑙'), + ('𑑛', '𑑛'), + ('𑑝', '𑑝'), + ('𑙠', '𑙬'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱅'), + ('𑱐', '𑱬'), + ('𑱰', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𖿠', '𖿠'), + ('𗀀', '𘟬'), + ('𘠀', '𘫲'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞤀', '\u{1e94a}'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), + ('🆛', '🆬'), + ('🈻', '🈻'), + ('🕺', '🕺'), + ('🖤', '🖤'), + ('🛑', '🛒'), + ('🛴', '🛶'), + ('🤙', '🤞'), + ('🤠', '🤧'), + ('🤰', '🤰'), + ('🤳', '🤾'), + ('🥀', '🥋'), + ('🥐', '🥞'), + ('🦅', '🦑'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/case_folding_simple.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/case_folding_simple.rs new file mode 100644 index 0000000000000000000000000000000000000000..07f6ff2f5af7f8b2f4ccd77c7994496534a34913 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/case_folding_simple.rs @@ -0,0 +1,2948 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate case-folding-simple ucd-16.0.0 --chars --all-pairs +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const CASE_FOLDING_SIMPLE: &'static [(char, &'static [char])] = &[ + ('A', &['a']), + ('B', &['b']), + ('C', &['c']), + ('D', &['d']), + ('E', &['e']), + ('F', &['f']), + ('G', &['g']), + ('H', &['h']), + ('I', &['i']), + ('J', &['j']), + ('K', &['k', 'K']), + ('L', &['l']), + ('M', &['m']), + ('N', &['n']), + ('O', &['o']), + ('P', &['p']), + ('Q', &['q']), + ('R', &['r']), + ('S', &['s', 'ſ']), + ('T', &['t']), + ('U', &['u']), + ('V', &['v']), + ('W', &['w']), + ('X', &['x']), + ('Y', &['y']), + ('Z', &['z']), + ('a', &['A']), + ('b', &['B']), + ('c', &['C']), + ('d', &['D']), + ('e', &['E']), + ('f', &['F']), + ('g', &['G']), + ('h', &['H']), + ('i', &['I']), + ('j', &['J']), + ('k', &['K', 'K']), + ('l', &['L']), + ('m', &['M']), + ('n', &['N']), + ('o', &['O']), + ('p', &['P']), + ('q', &['Q']), + ('r', &['R']), + ('s', &['S', 'ſ']), + ('t', &['T']), + ('u', &['U']), + ('v', &['V']), + ('w', &['W']), + ('x', &['X']), + ('y', &['Y']), + ('z', &['Z']), + ('µ', &['Μ', 'μ']), + ('À', &['à']), + ('Á', &['á']), + ('Â', &['â']), + ('Ã', &['ã']), + ('Ä', &['ä']), + ('Å', &['å', 'Å']), + ('Æ', &['æ']), + ('Ç', &['ç']), + ('È', &['è']), + ('É', &['é']), + ('Ê', &['ê']), + ('Ë', &['ë']), + ('Ì', &['ì']), + ('Í', &['í']), + ('Î', &['î']), + ('Ï', &['ï']), + ('Ð', &['ð']), + ('Ñ', &['ñ']), + ('Ò', &['ò']), + ('Ó', &['ó']), + ('Ô', &['ô']), + ('Õ', &['õ']), + ('Ö', &['ö']), + ('Ø', &['ø']), + ('Ù', &['ù']), + ('Ú', &['ú']), + ('Û', &['û']), + ('Ü', &['ü']), + ('Ý', &['ý']), + ('Þ', &['þ']), + ('ß', &['ẞ']), + ('à', &['À']), + ('á', &['Á']), + ('â', &['Â']), + ('ã', &['Ã']), + ('ä', &['Ä']), + ('å', &['Å', 'Å']), + ('æ', &['Æ']), + ('ç', &['Ç']), + ('è', &['È']), + ('é', &['É']), + ('ê', &['Ê']), + ('ë', &['Ë']), + ('ì', &['Ì']), + ('í', &['Í']), + ('î', &['Î']), + ('ï', &['Ï']), + ('ð', &['Ð']), + ('ñ', &['Ñ']), + ('ò', &['Ò']), + ('ó', &['Ó']), + ('ô', &['Ô']), + ('õ', &['Õ']), + ('ö', &['Ö']), + ('ø', &['Ø']), + ('ù', &['Ù']), + ('ú', &['Ú']), + ('û', &['Û']), + ('ü', &['Ü']), + ('ý', &['Ý']), + ('þ', &['Þ']), + ('ÿ', &['Ÿ']), + ('Ā', &['ā']), + ('ā', &['Ā']), + ('Ă', &['ă']), + ('ă', &['Ă']), + ('Ą', &['ą']), + ('ą', &['Ą']), + ('Ć', &['ć']), + ('ć', &['Ć']), + ('Ĉ', &['ĉ']), + ('ĉ', &['Ĉ']), + ('Ċ', &['ċ']), + ('ċ', &['Ċ']), + ('Č', &['č']), + ('č', &['Č']), + ('Ď', &['ď']), + ('ď', &['Ď']), + ('Đ', &['đ']), + ('đ', &['Đ']), + ('Ē', &['ē']), + ('ē', &['Ē']), + ('Ĕ', &['ĕ']), + ('ĕ', &['Ĕ']), + ('Ė', &['ė']), + ('ė', &['Ė']), + ('Ę', &['ę']), + ('ę', &['Ę']), + ('Ě', &['ě']), + ('ě', &['Ě']), + ('Ĝ', &['ĝ']), + ('ĝ', &['Ĝ']), + ('Ğ', &['ğ']), + ('ğ', &['Ğ']), + ('Ġ', &['ġ']), + ('ġ', &['Ġ']), + ('Ģ', &['ģ']), + ('ģ', &['Ģ']), + ('Ĥ', &['ĥ']), + ('ĥ', &['Ĥ']), + ('Ħ', &['ħ']), + ('ħ', &['Ħ']), + ('Ĩ', &['ĩ']), + ('ĩ', &['Ĩ']), + ('Ī', &['ī']), + ('ī', &['Ī']), + ('Ĭ', &['ĭ']), + ('ĭ', &['Ĭ']), + ('Į', &['į']), + ('į', &['Į']), + ('IJ', &['ij']), + ('ij', &['IJ']), + ('Ĵ', &['ĵ']), + ('ĵ', &['Ĵ']), + ('Ķ', &['ķ']), + ('ķ', &['Ķ']), + ('Ĺ', &['ĺ']), + ('ĺ', &['Ĺ']), + ('Ļ', &['ļ']), + ('ļ', &['Ļ']), + ('Ľ', &['ľ']), + ('ľ', &['Ľ']), + ('Ŀ', &['ŀ']), + ('ŀ', &['Ŀ']), + ('Ł', &['ł']), + ('ł', &['Ł']), + ('Ń', &['ń']), + ('ń', &['Ń']), + ('Ņ', &['ņ']), + ('ņ', &['Ņ']), + ('Ň', &['ň']), + ('ň', &['Ň']), + ('Ŋ', &['ŋ']), + ('ŋ', &['Ŋ']), + ('Ō', &['ō']), + ('ō', &['Ō']), + ('Ŏ', &['ŏ']), + ('ŏ', &['Ŏ']), + ('Ő', &['ő']), + ('ő', &['Ő']), + ('Œ', &['œ']), + ('œ', &['Œ']), + ('Ŕ', &['ŕ']), + ('ŕ', &['Ŕ']), + ('Ŗ', &['ŗ']), + ('ŗ', &['Ŗ']), + ('Ř', &['ř']), + ('ř', &['Ř']), + ('Ś', &['ś']), + ('ś', &['Ś']), + ('Ŝ', &['ŝ']), + ('ŝ', &['Ŝ']), + ('Ş', &['ş']), + ('ş', &['Ş']), + ('Š', &['š']), + ('š', &['Š']), + ('Ţ', &['ţ']), + ('ţ', &['Ţ']), + ('Ť', &['ť']), + ('ť', &['Ť']), + ('Ŧ', &['ŧ']), + ('ŧ', &['Ŧ']), + ('Ũ', &['ũ']), + ('ũ', &['Ũ']), + ('Ū', &['ū']), + ('ū', &['Ū']), + ('Ŭ', &['ŭ']), + ('ŭ', &['Ŭ']), + ('Ů', &['ů']), + ('ů', &['Ů']), + ('Ű', &['ű']), + ('ű', &['Ű']), + ('Ų', &['ų']), + ('ų', &['Ų']), + ('Ŵ', &['ŵ']), + ('ŵ', &['Ŵ']), + ('Ŷ', &['ŷ']), + ('ŷ', &['Ŷ']), + ('Ÿ', &['ÿ']), + ('Ź', &['ź']), + ('ź', &['Ź']), + ('Ż', &['ż']), + ('ż', &['Ż']), + ('Ž', &['ž']), + ('ž', &['Ž']), + ('ſ', &['S', 's']), + ('ƀ', &['Ƀ']), + ('Ɓ', &['ɓ']), + ('Ƃ', &['ƃ']), + ('ƃ', &['Ƃ']), + ('Ƅ', &['ƅ']), + ('ƅ', &['Ƅ']), + ('Ɔ', &['ɔ']), + ('Ƈ', &['ƈ']), + ('ƈ', &['Ƈ']), + ('Ɖ', &['ɖ']), + ('Ɗ', &['ɗ']), + ('Ƌ', &['ƌ']), + ('ƌ', &['Ƌ']), + ('Ǝ', &['ǝ']), + ('Ə', &['ə']), + ('Ɛ', &['ɛ']), + ('Ƒ', &['ƒ']), + ('ƒ', &['Ƒ']), + ('Ɠ', &['ɠ']), + ('Ɣ', &['ɣ']), + ('ƕ', &['Ƕ']), + ('Ɩ', &['ɩ']), + ('Ɨ', &['ɨ']), + ('Ƙ', &['ƙ']), + ('ƙ', &['Ƙ']), + ('ƚ', &['Ƚ']), + ('ƛ', &['Ƛ']), + ('Ɯ', &['ɯ']), + ('Ɲ', &['ɲ']), + ('ƞ', &['Ƞ']), + ('Ɵ', &['ɵ']), + ('Ơ', &['ơ']), + ('ơ', &['Ơ']), + ('Ƣ', &['ƣ']), + ('ƣ', &['Ƣ']), + ('Ƥ', &['ƥ']), + ('ƥ', &['Ƥ']), + ('Ʀ', &['ʀ']), + ('Ƨ', &['ƨ']), + ('ƨ', &['Ƨ']), + ('Ʃ', &['ʃ']), + ('Ƭ', &['ƭ']), + ('ƭ', &['Ƭ']), + ('Ʈ', &['ʈ']), + ('Ư', &['ư']), + ('ư', &['Ư']), + ('Ʊ', &['ʊ']), + ('Ʋ', &['ʋ']), + ('Ƴ', &['ƴ']), + ('ƴ', &['Ƴ']), + ('Ƶ', &['ƶ']), + ('ƶ', &['Ƶ']), + ('Ʒ', &['ʒ']), + ('Ƹ', &['ƹ']), + ('ƹ', &['Ƹ']), + ('Ƽ', &['ƽ']), + ('ƽ', &['Ƽ']), + ('ƿ', &['Ƿ']), + ('DŽ', &['Dž', 'dž']), + ('Dž', &['DŽ', 'dž']), + ('dž', &['DŽ', 'Dž']), + ('LJ', &['Lj', 'lj']), + ('Lj', &['LJ', 'lj']), + ('lj', &['LJ', 'Lj']), + ('NJ', &['Nj', 'nj']), + ('Nj', &['NJ', 'nj']), + ('nj', &['NJ', 'Nj']), + ('Ǎ', &['ǎ']), + ('ǎ', &['Ǎ']), + ('Ǐ', &['ǐ']), + ('ǐ', &['Ǐ']), + ('Ǒ', &['ǒ']), + ('ǒ', &['Ǒ']), + ('Ǔ', &['ǔ']), + ('ǔ', &['Ǔ']), + ('Ǖ', &['ǖ']), + ('ǖ', &['Ǖ']), + ('Ǘ', &['ǘ']), + ('ǘ', &['Ǘ']), + ('Ǚ', &['ǚ']), + ('ǚ', &['Ǚ']), + ('Ǜ', &['ǜ']), + ('ǜ', &['Ǜ']), + ('ǝ', &['Ǝ']), + ('Ǟ', &['ǟ']), + ('ǟ', &['Ǟ']), + ('Ǡ', &['ǡ']), + ('ǡ', &['Ǡ']), + ('Ǣ', &['ǣ']), + ('ǣ', &['Ǣ']), + ('Ǥ', &['ǥ']), + ('ǥ', &['Ǥ']), + ('Ǧ', &['ǧ']), + ('ǧ', &['Ǧ']), + ('Ǩ', &['ǩ']), + ('ǩ', &['Ǩ']), + ('Ǫ', &['ǫ']), + ('ǫ', &['Ǫ']), + ('Ǭ', &['ǭ']), + ('ǭ', &['Ǭ']), + ('Ǯ', &['ǯ']), + ('ǯ', &['Ǯ']), + ('DZ', &['Dz', 'dz']), + ('Dz', &['DZ', 'dz']), + ('dz', &['DZ', 'Dz']), + ('Ǵ', &['ǵ']), + ('ǵ', &['Ǵ']), + ('Ƕ', &['ƕ']), + ('Ƿ', &['ƿ']), + ('Ǹ', &['ǹ']), + ('ǹ', &['Ǹ']), + ('Ǻ', &['ǻ']), + ('ǻ', &['Ǻ']), + ('Ǽ', &['ǽ']), + ('ǽ', &['Ǽ']), + ('Ǿ', &['ǿ']), + ('ǿ', &['Ǿ']), + ('Ȁ', &['ȁ']), + ('ȁ', &['Ȁ']), + ('Ȃ', &['ȃ']), + ('ȃ', &['Ȃ']), + ('Ȅ', &['ȅ']), + ('ȅ', &['Ȅ']), + ('Ȇ', &['ȇ']), + ('ȇ', &['Ȇ']), + ('Ȉ', &['ȉ']), + ('ȉ', &['Ȉ']), + ('Ȋ', &['ȋ']), + ('ȋ', &['Ȋ']), + ('Ȍ', &['ȍ']), + ('ȍ', &['Ȍ']), + ('Ȏ', &['ȏ']), + ('ȏ', &['Ȏ']), + ('Ȑ', &['ȑ']), + ('ȑ', &['Ȑ']), + ('Ȓ', &['ȓ']), + ('ȓ', &['Ȓ']), + ('Ȕ', &['ȕ']), + ('ȕ', &['Ȕ']), + ('Ȗ', &['ȗ']), + ('ȗ', &['Ȗ']), + ('Ș', &['ș']), + ('ș', &['Ș']), + ('Ț', &['ț']), + ('ț', &['Ț']), + ('Ȝ', &['ȝ']), + ('ȝ', &['Ȝ']), + ('Ȟ', &['ȟ']), + ('ȟ', &['Ȟ']), + ('Ƞ', &['ƞ']), + ('Ȣ', &['ȣ']), + ('ȣ', &['Ȣ']), + ('Ȥ', &['ȥ']), + ('ȥ', &['Ȥ']), + ('Ȧ', &['ȧ']), + ('ȧ', &['Ȧ']), + ('Ȩ', &['ȩ']), + ('ȩ', &['Ȩ']), + ('Ȫ', &['ȫ']), + ('ȫ', &['Ȫ']), + ('Ȭ', &['ȭ']), + ('ȭ', &['Ȭ']), + ('Ȯ', &['ȯ']), + ('ȯ', &['Ȯ']), + ('Ȱ', &['ȱ']), + ('ȱ', &['Ȱ']), + ('Ȳ', &['ȳ']), + ('ȳ', &['Ȳ']), + ('Ⱥ', &['ⱥ']), + ('Ȼ', &['ȼ']), + ('ȼ', &['Ȼ']), + ('Ƚ', &['ƚ']), + ('Ⱦ', &['ⱦ']), + ('ȿ', &['Ȿ']), + ('ɀ', &['Ɀ']), + ('Ɂ', &['ɂ']), + ('ɂ', &['Ɂ']), + ('Ƀ', &['ƀ']), + ('Ʉ', &['ʉ']), + ('Ʌ', &['ʌ']), + ('Ɇ', &['ɇ']), + ('ɇ', &['Ɇ']), + ('Ɉ', &['ɉ']), + ('ɉ', &['Ɉ']), + ('Ɋ', &['ɋ']), + ('ɋ', &['Ɋ']), + ('Ɍ', &['ɍ']), + ('ɍ', &['Ɍ']), + ('Ɏ', &['ɏ']), + ('ɏ', &['Ɏ']), + ('ɐ', &['Ɐ']), + ('ɑ', &['Ɑ']), + ('ɒ', &['Ɒ']), + ('ɓ', &['Ɓ']), + ('ɔ', &['Ɔ']), + ('ɖ', &['Ɖ']), + ('ɗ', &['Ɗ']), + ('ə', &['Ə']), + ('ɛ', &['Ɛ']), + ('ɜ', &['Ɜ']), + ('ɠ', &['Ɠ']), + ('ɡ', &['Ɡ']), + ('ɣ', &['Ɣ']), + ('ɤ', &['Ɤ']), + ('ɥ', &['Ɥ']), + ('ɦ', &['Ɦ']), + ('ɨ', &['Ɨ']), + ('ɩ', &['Ɩ']), + ('ɪ', &['Ɪ']), + ('ɫ', &['Ɫ']), + ('ɬ', &['Ɬ']), + ('ɯ', &['Ɯ']), + ('ɱ', &['Ɱ']), + ('ɲ', &['Ɲ']), + ('ɵ', &['Ɵ']), + ('ɽ', &['Ɽ']), + ('ʀ', &['Ʀ']), + ('ʂ', &['Ʂ']), + ('ʃ', &['Ʃ']), + ('ʇ', &['Ʇ']), + ('ʈ', &['Ʈ']), + ('ʉ', &['Ʉ']), + ('ʊ', &['Ʊ']), + ('ʋ', &['Ʋ']), + ('ʌ', &['Ʌ']), + ('ʒ', &['Ʒ']), + ('ʝ', &['Ʝ']), + ('ʞ', &['Ʞ']), + ('\u{345}', &['Ι', 'ι', 'ι']), + ('Ͱ', &['ͱ']), + ('ͱ', &['Ͱ']), + ('Ͳ', &['ͳ']), + ('ͳ', &['Ͳ']), + ('Ͷ', &['ͷ']), + ('ͷ', &['Ͷ']), + ('ͻ', &['Ͻ']), + ('ͼ', &['Ͼ']), + ('ͽ', &['Ͽ']), + ('Ϳ', &['ϳ']), + ('Ά', &['ά']), + ('Έ', &['έ']), + ('Ή', &['ή']), + ('Ί', &['ί']), + ('Ό', &['ό']), + ('Ύ', &['ύ']), + ('Ώ', &['ώ']), + ('ΐ', &['ΐ']), + ('Α', &['α']), + ('Β', &['β', 'ϐ']), + ('Γ', &['γ']), + ('Δ', &['δ']), + ('Ε', &['ε', 'ϵ']), + ('Ζ', &['ζ']), + ('Η', &['η']), + ('Θ', &['θ', 'ϑ', 'ϴ']), + ('Ι', &['\u{345}', 'ι', 'ι']), + ('Κ', &['κ', 'ϰ']), + ('Λ', &['λ']), + ('Μ', &['µ', 'μ']), + ('Ν', &['ν']), + ('Ξ', &['ξ']), + ('Ο', &['ο']), + ('Π', &['π', 'ϖ']), + ('Ρ', &['ρ', 'ϱ']), + ('Σ', &['ς', 'σ']), + ('Τ', &['τ']), + ('Υ', &['υ']), + ('Φ', &['φ', 'ϕ']), + ('Χ', &['χ']), + ('Ψ', &['ψ']), + ('Ω', &['ω', 'Ω']), + ('Ϊ', &['ϊ']), + ('Ϋ', &['ϋ']), + ('ά', &['Ά']), + ('έ', &['Έ']), + ('ή', &['Ή']), + ('ί', &['Ί']), + ('ΰ', &['ΰ']), + ('α', &['Α']), + ('β', &['Β', 'ϐ']), + ('γ', &['Γ']), + ('δ', &['Δ']), + ('ε', &['Ε', 'ϵ']), + ('ζ', &['Ζ']), + ('η', &['Η']), + ('θ', &['Θ', 'ϑ', 'ϴ']), + ('ι', &['\u{345}', 'Ι', 'ι']), + ('κ', &['Κ', 'ϰ']), + ('λ', &['Λ']), + ('μ', &['µ', 'Μ']), + ('ν', &['Ν']), + ('ξ', &['Ξ']), + ('ο', &['Ο']), + ('π', &['Π', 'ϖ']), + ('ρ', &['Ρ', 'ϱ']), + ('ς', &['Σ', 'σ']), + ('σ', &['Σ', 'ς']), + ('τ', &['Τ']), + ('υ', &['Υ']), + ('φ', &['Φ', 'ϕ']), + ('χ', &['Χ']), + ('ψ', &['Ψ']), + ('ω', &['Ω', 'Ω']), + ('ϊ', &['Ϊ']), + ('ϋ', &['Ϋ']), + ('ό', &['Ό']), + ('ύ', &['Ύ']), + ('ώ', &['Ώ']), + ('Ϗ', &['ϗ']), + ('ϐ', &['Β', 'β']), + ('ϑ', &['Θ', 'θ', 'ϴ']), + ('ϕ', &['Φ', 'φ']), + ('ϖ', &['Π', 'π']), + ('ϗ', &['Ϗ']), + ('Ϙ', &['ϙ']), + ('ϙ', &['Ϙ']), + ('Ϛ', &['ϛ']), + ('ϛ', &['Ϛ']), + ('Ϝ', &['ϝ']), + ('ϝ', &['Ϝ']), + ('Ϟ', &['ϟ']), + ('ϟ', &['Ϟ']), + ('Ϡ', &['ϡ']), + ('ϡ', &['Ϡ']), + ('Ϣ', &['ϣ']), + ('ϣ', &['Ϣ']), + ('Ϥ', &['ϥ']), + ('ϥ', &['Ϥ']), + ('Ϧ', &['ϧ']), + ('ϧ', &['Ϧ']), + ('Ϩ', &['ϩ']), + ('ϩ', &['Ϩ']), + ('Ϫ', &['ϫ']), + ('ϫ', &['Ϫ']), + ('Ϭ', &['ϭ']), + ('ϭ', &['Ϭ']), + ('Ϯ', &['ϯ']), + ('ϯ', &['Ϯ']), + ('ϰ', &['Κ', 'κ']), + ('ϱ', &['Ρ', 'ρ']), + ('ϲ', &['Ϲ']), + ('ϳ', &['Ϳ']), + ('ϴ', &['Θ', 'θ', 'ϑ']), + ('ϵ', &['Ε', 'ε']), + ('Ϸ', &['ϸ']), + ('ϸ', &['Ϸ']), + ('Ϲ', &['ϲ']), + ('Ϻ', &['ϻ']), + ('ϻ', &['Ϻ']), + ('Ͻ', &['ͻ']), + ('Ͼ', &['ͼ']), + ('Ͽ', &['ͽ']), + ('Ѐ', &['ѐ']), + ('Ё', &['ё']), + ('Ђ', &['ђ']), + ('Ѓ', &['ѓ']), + ('Є', &['є']), + ('Ѕ', &['ѕ']), + ('І', &['і']), + ('Ї', &['ї']), + ('Ј', &['ј']), + ('Љ', &['љ']), + ('Њ', &['њ']), + ('Ћ', &['ћ']), + ('Ќ', &['ќ']), + ('Ѝ', &['ѝ']), + ('Ў', &['ў']), + ('Џ', &['џ']), + ('А', &['а']), + ('Б', &['б']), + ('В', &['в', 'ᲀ']), + ('Г', &['г']), + ('Д', &['д', 'ᲁ']), + ('Е', &['е']), + ('Ж', &['ж']), + ('З', &['з']), + ('И', &['и']), + ('Й', &['й']), + ('К', &['к']), + ('Л', &['л']), + ('М', &['м']), + ('Н', &['н']), + ('О', &['о', 'ᲂ']), + ('П', &['п']), + ('Р', &['р']), + ('С', &['с', 'ᲃ']), + ('Т', &['т', 'ᲄ', 'ᲅ']), + ('У', &['у']), + ('Ф', &['ф']), + ('Х', &['х']), + ('Ц', &['ц']), + ('Ч', &['ч']), + ('Ш', &['ш']), + ('Щ', &['щ']), + ('Ъ', &['ъ', 'ᲆ']), + ('Ы', &['ы']), + ('Ь', &['ь']), + ('Э', &['э']), + ('Ю', &['ю']), + ('Я', &['я']), + ('а', &['А']), + ('б', &['Б']), + ('в', &['В', 'ᲀ']), + ('г', &['Г']), + ('д', &['Д', 'ᲁ']), + ('е', &['Е']), + ('ж', &['Ж']), + ('з', &['З']), + ('и', &['И']), + ('й', &['Й']), + ('к', &['К']), + ('л', &['Л']), + ('м', &['М']), + ('н', &['Н']), + ('о', &['О', 'ᲂ']), + ('п', &['П']), + ('р', &['Р']), + ('с', &['С', 'ᲃ']), + ('т', &['Т', 'ᲄ', 'ᲅ']), + ('у', &['У']), + ('ф', &['Ф']), + ('х', &['Х']), + ('ц', &['Ц']), + ('ч', &['Ч']), + ('ш', &['Ш']), + ('щ', &['Щ']), + ('ъ', &['Ъ', 'ᲆ']), + ('ы', &['Ы']), + ('ь', &['Ь']), + ('э', &['Э']), + ('ю', &['Ю']), + ('я', &['Я']), + ('ѐ', &['Ѐ']), + ('ё', &['Ё']), + ('ђ', &['Ђ']), + ('ѓ', &['Ѓ']), + ('є', &['Є']), + ('ѕ', &['Ѕ']), + ('і', &['І']), + ('ї', &['Ї']), + ('ј', &['Ј']), + ('љ', &['Љ']), + ('њ', &['Њ']), + ('ћ', &['Ћ']), + ('ќ', &['Ќ']), + ('ѝ', &['Ѝ']), + ('ў', &['Ў']), + ('џ', &['Џ']), + ('Ѡ', &['ѡ']), + ('ѡ', &['Ѡ']), + ('Ѣ', &['ѣ', 'ᲇ']), + ('ѣ', &['Ѣ', 'ᲇ']), + ('Ѥ', &['ѥ']), + ('ѥ', &['Ѥ']), + ('Ѧ', &['ѧ']), + ('ѧ', &['Ѧ']), + ('Ѩ', &['ѩ']), + ('ѩ', &['Ѩ']), + ('Ѫ', &['ѫ']), + ('ѫ', &['Ѫ']), + ('Ѭ', &['ѭ']), + ('ѭ', &['Ѭ']), + ('Ѯ', &['ѯ']), + ('ѯ', &['Ѯ']), + ('Ѱ', &['ѱ']), + ('ѱ', &['Ѱ']), + ('Ѳ', &['ѳ']), + ('ѳ', &['Ѳ']), + ('Ѵ', &['ѵ']), + ('ѵ', &['Ѵ']), + ('Ѷ', &['ѷ']), + ('ѷ', &['Ѷ']), + ('Ѹ', &['ѹ']), + ('ѹ', &['Ѹ']), + ('Ѻ', &['ѻ']), + ('ѻ', &['Ѻ']), + ('Ѽ', &['ѽ']), + ('ѽ', &['Ѽ']), + ('Ѿ', &['ѿ']), + ('ѿ', &['Ѿ']), + ('Ҁ', &['ҁ']), + ('ҁ', &['Ҁ']), + ('Ҋ', &['ҋ']), + ('ҋ', &['Ҋ']), + ('Ҍ', &['ҍ']), + ('ҍ', &['Ҍ']), + ('Ҏ', &['ҏ']), + ('ҏ', &['Ҏ']), + ('Ґ', &['ґ']), + ('ґ', &['Ґ']), + ('Ғ', &['ғ']), + ('ғ', &['Ғ']), + ('Ҕ', &['ҕ']), + ('ҕ', &['Ҕ']), + ('Җ', &['җ']), + ('җ', &['Җ']), + ('Ҙ', &['ҙ']), + ('ҙ', &['Ҙ']), + ('Қ', &['қ']), + ('қ', &['Қ']), + ('Ҝ', &['ҝ']), + ('ҝ', &['Ҝ']), + ('Ҟ', &['ҟ']), + ('ҟ', &['Ҟ']), + ('Ҡ', &['ҡ']), + ('ҡ', &['Ҡ']), + ('Ң', &['ң']), + ('ң', &['Ң']), + ('Ҥ', &['ҥ']), + ('ҥ', &['Ҥ']), + ('Ҧ', &['ҧ']), + ('ҧ', &['Ҧ']), + ('Ҩ', &['ҩ']), + ('ҩ', &['Ҩ']), + ('Ҫ', &['ҫ']), + ('ҫ', &['Ҫ']), + ('Ҭ', &['ҭ']), + ('ҭ', &['Ҭ']), + ('Ү', &['ү']), + ('ү', &['Ү']), + ('Ұ', &['ұ']), + ('ұ', &['Ұ']), + ('Ҳ', &['ҳ']), + ('ҳ', &['Ҳ']), + ('Ҵ', &['ҵ']), + ('ҵ', &['Ҵ']), + ('Ҷ', &['ҷ']), + ('ҷ', &['Ҷ']), + ('Ҹ', &['ҹ']), + ('ҹ', &['Ҹ']), + ('Һ', &['һ']), + ('һ', &['Һ']), + ('Ҽ', &['ҽ']), + ('ҽ', &['Ҽ']), + ('Ҿ', &['ҿ']), + ('ҿ', &['Ҿ']), + ('Ӏ', &['ӏ']), + ('Ӂ', &['ӂ']), + ('ӂ', &['Ӂ']), + ('Ӄ', &['ӄ']), + ('ӄ', &['Ӄ']), + ('Ӆ', &['ӆ']), + ('ӆ', &['Ӆ']), + ('Ӈ', &['ӈ']), + ('ӈ', &['Ӈ']), + ('Ӊ', &['ӊ']), + ('ӊ', &['Ӊ']), + ('Ӌ', &['ӌ']), + ('ӌ', &['Ӌ']), + ('Ӎ', &['ӎ']), + ('ӎ', &['Ӎ']), + ('ӏ', &['Ӏ']), + ('Ӑ', &['ӑ']), + ('ӑ', &['Ӑ']), + ('Ӓ', &['ӓ']), + ('ӓ', &['Ӓ']), + ('Ӕ', &['ӕ']), + ('ӕ', &['Ӕ']), + ('Ӗ', &['ӗ']), + ('ӗ', &['Ӗ']), + ('Ә', &['ә']), + ('ә', &['Ә']), + ('Ӛ', &['ӛ']), + ('ӛ', &['Ӛ']), + ('Ӝ', &['ӝ']), + ('ӝ', &['Ӝ']), + ('Ӟ', &['ӟ']), + ('ӟ', &['Ӟ']), + ('Ӡ', &['ӡ']), + ('ӡ', &['Ӡ']), + ('Ӣ', &['ӣ']), + ('ӣ', &['Ӣ']), + ('Ӥ', &['ӥ']), + ('ӥ', &['Ӥ']), + ('Ӧ', &['ӧ']), + ('ӧ', &['Ӧ']), + ('Ө', &['ө']), + ('ө', &['Ө']), + ('Ӫ', &['ӫ']), + ('ӫ', &['Ӫ']), + ('Ӭ', &['ӭ']), + ('ӭ', &['Ӭ']), + ('Ӯ', &['ӯ']), + ('ӯ', &['Ӯ']), + ('Ӱ', &['ӱ']), + ('ӱ', &['Ӱ']), + ('Ӳ', &['ӳ']), + ('ӳ', &['Ӳ']), + ('Ӵ', &['ӵ']), + ('ӵ', &['Ӵ']), + ('Ӷ', &['ӷ']), + ('ӷ', &['Ӷ']), + ('Ӹ', &['ӹ']), + ('ӹ', &['Ӹ']), + ('Ӻ', &['ӻ']), + ('ӻ', &['Ӻ']), + ('Ӽ', &['ӽ']), + ('ӽ', &['Ӽ']), + ('Ӿ', &['ӿ']), + ('ӿ', &['Ӿ']), + ('Ԁ', &['ԁ']), + ('ԁ', &['Ԁ']), + ('Ԃ', &['ԃ']), + ('ԃ', &['Ԃ']), + ('Ԅ', &['ԅ']), + ('ԅ', &['Ԅ']), + ('Ԇ', &['ԇ']), + ('ԇ', &['Ԇ']), + ('Ԉ', &['ԉ']), + ('ԉ', &['Ԉ']), + ('Ԋ', &['ԋ']), + ('ԋ', &['Ԋ']), + ('Ԍ', &['ԍ']), + ('ԍ', &['Ԍ']), + ('Ԏ', &['ԏ']), + ('ԏ', &['Ԏ']), + ('Ԑ', &['ԑ']), + ('ԑ', &['Ԑ']), + ('Ԓ', &['ԓ']), + ('ԓ', &['Ԓ']), + ('Ԕ', &['ԕ']), + ('ԕ', &['Ԕ']), + ('Ԗ', &['ԗ']), + ('ԗ', &['Ԗ']), + ('Ԙ', &['ԙ']), + ('ԙ', &['Ԙ']), + ('Ԛ', &['ԛ']), + ('ԛ', &['Ԛ']), + ('Ԝ', &['ԝ']), + ('ԝ', &['Ԝ']), + ('Ԟ', &['ԟ']), + ('ԟ', &['Ԟ']), + ('Ԡ', &['ԡ']), + ('ԡ', &['Ԡ']), + ('Ԣ', &['ԣ']), + ('ԣ', &['Ԣ']), + ('Ԥ', &['ԥ']), + ('ԥ', &['Ԥ']), + ('Ԧ', &['ԧ']), + ('ԧ', &['Ԧ']), + ('Ԩ', &['ԩ']), + ('ԩ', &['Ԩ']), + ('Ԫ', &['ԫ']), + ('ԫ', &['Ԫ']), + ('Ԭ', &['ԭ']), + ('ԭ', &['Ԭ']), + ('Ԯ', &['ԯ']), + ('ԯ', &['Ԯ']), + ('Ա', &['ա']), + ('Բ', &['բ']), + ('Գ', &['գ']), + ('Դ', &['դ']), + ('Ե', &['ե']), + ('Զ', &['զ']), + ('Է', &['է']), + ('Ը', &['ը']), + ('Թ', &['թ']), + ('Ժ', &['ժ']), + ('Ի', &['ի']), + ('Լ', &['լ']), + ('Խ', &['խ']), + ('Ծ', &['ծ']), + ('Կ', &['կ']), + ('Հ', &['հ']), + ('Ձ', &['ձ']), + ('Ղ', &['ղ']), + ('Ճ', &['ճ']), + ('Մ', &['մ']), + ('Յ', &['յ']), + ('Ն', &['ն']), + ('Շ', &['շ']), + ('Ո', &['ո']), + ('Չ', &['չ']), + ('Պ', &['պ']), + ('Ջ', &['ջ']), + ('Ռ', &['ռ']), + ('Ս', &['ս']), + ('Վ', &['վ']), + ('Տ', &['տ']), + ('Ր', &['ր']), + ('Ց', &['ց']), + ('Ւ', &['ւ']), + ('Փ', &['փ']), + ('Ք', &['ք']), + ('Օ', &['օ']), + ('Ֆ', &['ֆ']), + ('ա', &['Ա']), + ('բ', &['Բ']), + ('գ', &['Գ']), + ('դ', &['Դ']), + ('ե', &['Ե']), + ('զ', &['Զ']), + ('է', &['Է']), + ('ը', &['Ը']), + ('թ', &['Թ']), + ('ժ', &['Ժ']), + ('ի', &['Ի']), + ('լ', &['Լ']), + ('խ', &['Խ']), + ('ծ', &['Ծ']), + ('կ', &['Կ']), + ('հ', &['Հ']), + ('ձ', &['Ձ']), + ('ղ', &['Ղ']), + ('ճ', &['Ճ']), + ('մ', &['Մ']), + ('յ', &['Յ']), + ('ն', &['Ն']), + ('շ', &['Շ']), + ('ո', &['Ո']), + ('չ', &['Չ']), + ('պ', &['Պ']), + ('ջ', &['Ջ']), + ('ռ', &['Ռ']), + ('ս', &['Ս']), + ('վ', &['Վ']), + ('տ', &['Տ']), + ('ր', &['Ր']), + ('ց', &['Ց']), + ('ւ', &['Ւ']), + ('փ', &['Փ']), + ('ք', &['Ք']), + ('օ', &['Օ']), + ('ֆ', &['Ֆ']), + ('Ⴀ', &['ⴀ']), + ('Ⴁ', &['ⴁ']), + ('Ⴂ', &['ⴂ']), + ('Ⴃ', &['ⴃ']), + ('Ⴄ', &['ⴄ']), + ('Ⴅ', &['ⴅ']), + ('Ⴆ', &['ⴆ']), + ('Ⴇ', &['ⴇ']), + ('Ⴈ', &['ⴈ']), + ('Ⴉ', &['ⴉ']), + ('Ⴊ', &['ⴊ']), + ('Ⴋ', &['ⴋ']), + ('Ⴌ', &['ⴌ']), + ('Ⴍ', &['ⴍ']), + ('Ⴎ', &['ⴎ']), + ('Ⴏ', &['ⴏ']), + ('Ⴐ', &['ⴐ']), + ('Ⴑ', &['ⴑ']), + ('Ⴒ', &['ⴒ']), + ('Ⴓ', &['ⴓ']), + ('Ⴔ', &['ⴔ']), + ('Ⴕ', &['ⴕ']), + ('Ⴖ', &['ⴖ']), + ('Ⴗ', &['ⴗ']), + ('Ⴘ', &['ⴘ']), + ('Ⴙ', &['ⴙ']), + ('Ⴚ', &['ⴚ']), + ('Ⴛ', &['ⴛ']), + ('Ⴜ', &['ⴜ']), + ('Ⴝ', &['ⴝ']), + ('Ⴞ', &['ⴞ']), + ('Ⴟ', &['ⴟ']), + ('Ⴠ', &['ⴠ']), + ('Ⴡ', &['ⴡ']), + ('Ⴢ', &['ⴢ']), + ('Ⴣ', &['ⴣ']), + ('Ⴤ', &['ⴤ']), + ('Ⴥ', &['ⴥ']), + ('Ⴧ', &['ⴧ']), + ('Ⴭ', &['ⴭ']), + ('ა', &['Ა']), + ('ბ', &['Ბ']), + ('გ', &['Გ']), + ('დ', &['Დ']), + ('ე', &['Ე']), + ('ვ', &['Ვ']), + ('ზ', &['Ზ']), + ('თ', &['Თ']), + ('ი', &['Ი']), + ('კ', &['Კ']), + ('ლ', &['Ლ']), + ('მ', &['Მ']), + ('ნ', &['Ნ']), + ('ო', &['Ო']), + ('პ', &['Პ']), + ('ჟ', &['Ჟ']), + ('რ', &['Რ']), + ('ს', &['Ს']), + ('ტ', &['Ტ']), + ('უ', &['Უ']), + ('ფ', &['Ფ']), + ('ქ', &['Ქ']), + ('ღ', &['Ღ']), + ('ყ', &['Ყ']), + ('შ', &['Შ']), + ('ჩ', &['Ჩ']), + ('ც', &['Ც']), + ('ძ', &['Ძ']), + ('წ', &['Წ']), + ('ჭ', &['Ჭ']), + ('ხ', &['Ხ']), + ('ჯ', &['Ჯ']), + ('ჰ', &['Ჰ']), + ('ჱ', &['Ჱ']), + ('ჲ', &['Ჲ']), + ('ჳ', &['Ჳ']), + ('ჴ', &['Ჴ']), + ('ჵ', &['Ჵ']), + ('ჶ', &['Ჶ']), + ('ჷ', &['Ჷ']), + ('ჸ', &['Ჸ']), + ('ჹ', &['Ჹ']), + ('ჺ', &['Ჺ']), + ('ჽ', &['Ჽ']), + ('ჾ', &['Ჾ']), + ('ჿ', &['Ჿ']), + ('Ꭰ', &['ꭰ']), + ('Ꭱ', &['ꭱ']), + ('Ꭲ', &['ꭲ']), + ('Ꭳ', &['ꭳ']), + ('Ꭴ', &['ꭴ']), + ('Ꭵ', &['ꭵ']), + ('Ꭶ', &['ꭶ']), + ('Ꭷ', &['ꭷ']), + ('Ꭸ', &['ꭸ']), + ('Ꭹ', &['ꭹ']), + ('Ꭺ', &['ꭺ']), + ('Ꭻ', &['ꭻ']), + ('Ꭼ', &['ꭼ']), + ('Ꭽ', &['ꭽ']), + ('Ꭾ', &['ꭾ']), + ('Ꭿ', &['ꭿ']), + ('Ꮀ', &['ꮀ']), + ('Ꮁ', &['ꮁ']), + ('Ꮂ', &['ꮂ']), + ('Ꮃ', &['ꮃ']), + ('Ꮄ', &['ꮄ']), + ('Ꮅ', &['ꮅ']), + ('Ꮆ', &['ꮆ']), + ('Ꮇ', &['ꮇ']), + ('Ꮈ', &['ꮈ']), + ('Ꮉ', &['ꮉ']), + ('Ꮊ', &['ꮊ']), + ('Ꮋ', &['ꮋ']), + ('Ꮌ', &['ꮌ']), + ('Ꮍ', &['ꮍ']), + ('Ꮎ', &['ꮎ']), + ('Ꮏ', &['ꮏ']), + ('Ꮐ', &['ꮐ']), + ('Ꮑ', &['ꮑ']), + ('Ꮒ', &['ꮒ']), + ('Ꮓ', &['ꮓ']), + ('Ꮔ', &['ꮔ']), + ('Ꮕ', &['ꮕ']), + ('Ꮖ', &['ꮖ']), + ('Ꮗ', &['ꮗ']), + ('Ꮘ', &['ꮘ']), + ('Ꮙ', &['ꮙ']), + ('Ꮚ', &['ꮚ']), + ('Ꮛ', &['ꮛ']), + ('Ꮜ', &['ꮜ']), + ('Ꮝ', &['ꮝ']), + ('Ꮞ', &['ꮞ']), + ('Ꮟ', &['ꮟ']), + ('Ꮠ', &['ꮠ']), + ('Ꮡ', &['ꮡ']), + ('Ꮢ', &['ꮢ']), + ('Ꮣ', &['ꮣ']), + ('Ꮤ', &['ꮤ']), + ('Ꮥ', &['ꮥ']), + ('Ꮦ', &['ꮦ']), + ('Ꮧ', &['ꮧ']), + ('Ꮨ', &['ꮨ']), + ('Ꮩ', &['ꮩ']), + ('Ꮪ', &['ꮪ']), + ('Ꮫ', &['ꮫ']), + ('Ꮬ', &['ꮬ']), + ('Ꮭ', &['ꮭ']), + ('Ꮮ', &['ꮮ']), + ('Ꮯ', &['ꮯ']), + ('Ꮰ', &['ꮰ']), + ('Ꮱ', &['ꮱ']), + ('Ꮲ', &['ꮲ']), + ('Ꮳ', &['ꮳ']), + ('Ꮴ', &['ꮴ']), + ('Ꮵ', &['ꮵ']), + ('Ꮶ', &['ꮶ']), + ('Ꮷ', &['ꮷ']), + ('Ꮸ', &['ꮸ']), + ('Ꮹ', &['ꮹ']), + ('Ꮺ', &['ꮺ']), + ('Ꮻ', &['ꮻ']), + ('Ꮼ', &['ꮼ']), + ('Ꮽ', &['ꮽ']), + ('Ꮾ', &['ꮾ']), + ('Ꮿ', &['ꮿ']), + ('Ᏸ', &['ᏸ']), + ('Ᏹ', &['ᏹ']), + ('Ᏺ', &['ᏺ']), + ('Ᏻ', &['ᏻ']), + ('Ᏼ', &['ᏼ']), + ('Ᏽ', &['ᏽ']), + ('ᏸ', &['Ᏸ']), + ('ᏹ', &['Ᏹ']), + ('ᏺ', &['Ᏺ']), + ('ᏻ', &['Ᏻ']), + ('ᏼ', &['Ᏼ']), + ('ᏽ', &['Ᏽ']), + ('ᲀ', &['В', 'в']), + ('ᲁ', &['Д', 'д']), + ('ᲂ', &['О', 'о']), + ('ᲃ', &['С', 'с']), + ('ᲄ', &['Т', 'т', 'ᲅ']), + ('ᲅ', &['Т', 'т', 'ᲄ']), + ('ᲆ', &['Ъ', 'ъ']), + ('ᲇ', &['Ѣ', 'ѣ']), + ('ᲈ', &['Ꙋ', 'ꙋ']), + ('Ᲊ', &['ᲊ']), + ('ᲊ', &['Ᲊ']), + ('Ა', &['ა']), + ('Ბ', &['ბ']), + ('Გ', &['გ']), + ('Დ', &['დ']), + ('Ე', &['ე']), + ('Ვ', &['ვ']), + ('Ზ', &['ზ']), + ('Თ', &['თ']), + ('Ი', &['ი']), + ('Კ', &['კ']), + ('Ლ', &['ლ']), + ('Მ', &['მ']), + ('Ნ', &['ნ']), + ('Ო', &['ო']), + ('Პ', &['პ']), + ('Ჟ', &['ჟ']), + ('Რ', &['რ']), + ('Ს', &['ს']), + ('Ტ', &['ტ']), + ('Უ', &['უ']), + ('Ფ', &['ფ']), + ('Ქ', &['ქ']), + ('Ღ', &['ღ']), + ('Ყ', &['ყ']), + ('Შ', &['შ']), + ('Ჩ', &['ჩ']), + ('Ც', &['ც']), + ('Ძ', &['ძ']), + ('Წ', &['წ']), + ('Ჭ', &['ჭ']), + ('Ხ', &['ხ']), + ('Ჯ', &['ჯ']), + ('Ჰ', &['ჰ']), + ('Ჱ', &['ჱ']), + ('Ჲ', &['ჲ']), + ('Ჳ', &['ჳ']), + ('Ჴ', &['ჴ']), + ('Ჵ', &['ჵ']), + ('Ჶ', &['ჶ']), + ('Ჷ', &['ჷ']), + ('Ჸ', &['ჸ']), + ('Ჹ', &['ჹ']), + ('Ჺ', &['ჺ']), + ('Ჽ', &['ჽ']), + ('Ჾ', &['ჾ']), + ('Ჿ', &['ჿ']), + ('ᵹ', &['Ᵹ']), + ('ᵽ', &['Ᵽ']), + ('ᶎ', &['Ᶎ']), + ('Ḁ', &['ḁ']), + ('ḁ', &['Ḁ']), + ('Ḃ', &['ḃ']), + ('ḃ', &['Ḃ']), + ('Ḅ', &['ḅ']), + ('ḅ', &['Ḅ']), + ('Ḇ', &['ḇ']), + ('ḇ', &['Ḇ']), + ('Ḉ', &['ḉ']), + ('ḉ', &['Ḉ']), + ('Ḋ', &['ḋ']), + ('ḋ', &['Ḋ']), + ('Ḍ', &['ḍ']), + ('ḍ', &['Ḍ']), + ('Ḏ', &['ḏ']), + ('ḏ', &['Ḏ']), + ('Ḑ', &['ḑ']), + ('ḑ', &['Ḑ']), + ('Ḓ', &['ḓ']), + ('ḓ', &['Ḓ']), + ('Ḕ', &['ḕ']), + ('ḕ', &['Ḕ']), + ('Ḗ', &['ḗ']), + ('ḗ', &['Ḗ']), + ('Ḙ', &['ḙ']), + ('ḙ', &['Ḙ']), + ('Ḛ', &['ḛ']), + ('ḛ', &['Ḛ']), + ('Ḝ', &['ḝ']), + ('ḝ', &['Ḝ']), + ('Ḟ', &['ḟ']), + ('ḟ', &['Ḟ']), + ('Ḡ', &['ḡ']), + ('ḡ', &['Ḡ']), + ('Ḣ', &['ḣ']), + ('ḣ', &['Ḣ']), + ('Ḥ', &['ḥ']), + ('ḥ', &['Ḥ']), + ('Ḧ', &['ḧ']), + ('ḧ', &['Ḧ']), + ('Ḩ', &['ḩ']), + ('ḩ', &['Ḩ']), + ('Ḫ', &['ḫ']), + ('ḫ', &['Ḫ']), + ('Ḭ', &['ḭ']), + ('ḭ', &['Ḭ']), + ('Ḯ', &['ḯ']), + ('ḯ', &['Ḯ']), + ('Ḱ', &['ḱ']), + ('ḱ', &['Ḱ']), + ('Ḳ', &['ḳ']), + ('ḳ', &['Ḳ']), + ('Ḵ', &['ḵ']), + ('ḵ', &['Ḵ']), + ('Ḷ', &['ḷ']), + ('ḷ', &['Ḷ']), + ('Ḹ', &['ḹ']), + ('ḹ', &['Ḹ']), + ('Ḻ', &['ḻ']), + ('ḻ', &['Ḻ']), + ('Ḽ', &['ḽ']), + ('ḽ', &['Ḽ']), + ('Ḿ', &['ḿ']), + ('ḿ', &['Ḿ']), + ('Ṁ', &['ṁ']), + ('ṁ', &['Ṁ']), + ('Ṃ', &['ṃ']), + ('ṃ', &['Ṃ']), + ('Ṅ', &['ṅ']), + ('ṅ', &['Ṅ']), + ('Ṇ', &['ṇ']), + ('ṇ', &['Ṇ']), + ('Ṉ', &['ṉ']), + ('ṉ', &['Ṉ']), + ('Ṋ', &['ṋ']), + ('ṋ', &['Ṋ']), + ('Ṍ', &['ṍ']), + ('ṍ', &['Ṍ']), + ('Ṏ', &['ṏ']), + ('ṏ', &['Ṏ']), + ('Ṑ', &['ṑ']), + ('ṑ', &['Ṑ']), + ('Ṓ', &['ṓ']), + ('ṓ', &['Ṓ']), + ('Ṕ', &['ṕ']), + ('ṕ', &['Ṕ']), + ('Ṗ', &['ṗ']), + ('ṗ', &['Ṗ']), + ('Ṙ', &['ṙ']), + ('ṙ', &['Ṙ']), + ('Ṛ', &['ṛ']), + ('ṛ', &['Ṛ']), + ('Ṝ', &['ṝ']), + ('ṝ', &['Ṝ']), + ('Ṟ', &['ṟ']), + ('ṟ', &['Ṟ']), + ('Ṡ', &['ṡ', 'ẛ']), + ('ṡ', &['Ṡ', 'ẛ']), + ('Ṣ', &['ṣ']), + ('ṣ', &['Ṣ']), + ('Ṥ', &['ṥ']), + ('ṥ', &['Ṥ']), + ('Ṧ', &['ṧ']), + ('ṧ', &['Ṧ']), + ('Ṩ', &['ṩ']), + ('ṩ', &['Ṩ']), + ('Ṫ', &['ṫ']), + ('ṫ', &['Ṫ']), + ('Ṭ', &['ṭ']), + ('ṭ', &['Ṭ']), + ('Ṯ', &['ṯ']), + ('ṯ', &['Ṯ']), + ('Ṱ', &['ṱ']), + ('ṱ', &['Ṱ']), + ('Ṳ', &['ṳ']), + ('ṳ', &['Ṳ']), + ('Ṵ', &['ṵ']), + ('ṵ', &['Ṵ']), + ('Ṷ', &['ṷ']), + ('ṷ', &['Ṷ']), + ('Ṹ', &['ṹ']), + ('ṹ', &['Ṹ']), + ('Ṻ', &['ṻ']), + ('ṻ', &['Ṻ']), + ('Ṽ', &['ṽ']), + ('ṽ', &['Ṽ']), + ('Ṿ', &['ṿ']), + ('ṿ', &['Ṿ']), + ('Ẁ', &['ẁ']), + ('ẁ', &['Ẁ']), + ('Ẃ', &['ẃ']), + ('ẃ', &['Ẃ']), + ('Ẅ', &['ẅ']), + ('ẅ', &['Ẅ']), + ('Ẇ', &['ẇ']), + ('ẇ', &['Ẇ']), + ('Ẉ', &['ẉ']), + ('ẉ', &['Ẉ']), + ('Ẋ', &['ẋ']), + ('ẋ', &['Ẋ']), + ('Ẍ', &['ẍ']), + ('ẍ', &['Ẍ']), + ('Ẏ', &['ẏ']), + ('ẏ', &['Ẏ']), + ('Ẑ', &['ẑ']), + ('ẑ', &['Ẑ']), + ('Ẓ', &['ẓ']), + ('ẓ', &['Ẓ']), + ('Ẕ', &['ẕ']), + ('ẕ', &['Ẕ']), + ('ẛ', &['Ṡ', 'ṡ']), + ('ẞ', &['ß']), + ('Ạ', &['ạ']), + ('ạ', &['Ạ']), + ('Ả', &['ả']), + ('ả', &['Ả']), + ('Ấ', &['ấ']), + ('ấ', &['Ấ']), + ('Ầ', &['ầ']), + ('ầ', &['Ầ']), + ('Ẩ', &['ẩ']), + ('ẩ', &['Ẩ']), + ('Ẫ', &['ẫ']), + ('ẫ', &['Ẫ']), + ('Ậ', &['ậ']), + ('ậ', &['Ậ']), + ('Ắ', &['ắ']), + ('ắ', &['Ắ']), + ('Ằ', &['ằ']), + ('ằ', &['Ằ']), + ('Ẳ', &['ẳ']), + ('ẳ', &['Ẳ']), + ('Ẵ', &['ẵ']), + ('ẵ', &['Ẵ']), + ('Ặ', &['ặ']), + ('ặ', &['Ặ']), + ('Ẹ', &['ẹ']), + ('ẹ', &['Ẹ']), + ('Ẻ', &['ẻ']), + ('ẻ', &['Ẻ']), + ('Ẽ', &['ẽ']), + ('ẽ', &['Ẽ']), + ('Ế', &['ế']), + ('ế', &['Ế']), + ('Ề', &['ề']), + ('ề', &['Ề']), + ('Ể', &['ể']), + ('ể', &['Ể']), + ('Ễ', &['ễ']), + ('ễ', &['Ễ']), + ('Ệ', &['ệ']), + ('ệ', &['Ệ']), + ('Ỉ', &['ỉ']), + ('ỉ', &['Ỉ']), + ('Ị', &['ị']), + ('ị', &['Ị']), + ('Ọ', &['ọ']), + ('ọ', &['Ọ']), + ('Ỏ', &['ỏ']), + ('ỏ', &['Ỏ']), + ('Ố', &['ố']), + ('ố', &['Ố']), + ('Ồ', &['ồ']), + ('ồ', &['Ồ']), + ('Ổ', &['ổ']), + ('ổ', &['Ổ']), + ('Ỗ', &['ỗ']), + ('ỗ', &['Ỗ']), + ('Ộ', &['ộ']), + ('ộ', &['Ộ']), + ('Ớ', &['ớ']), + ('ớ', &['Ớ']), + ('Ờ', &['ờ']), + ('ờ', &['Ờ']), + ('Ở', &['ở']), + ('ở', &['Ở']), + ('Ỡ', &['ỡ']), + ('ỡ', &['Ỡ']), + ('Ợ', &['ợ']), + ('ợ', &['Ợ']), + ('Ụ', &['ụ']), + ('ụ', &['Ụ']), + ('Ủ', &['ủ']), + ('ủ', &['Ủ']), + ('Ứ', &['ứ']), + ('ứ', &['Ứ']), + ('Ừ', &['ừ']), + ('ừ', &['Ừ']), + ('Ử', &['ử']), + ('ử', &['Ử']), + ('Ữ', &['ữ']), + ('ữ', &['Ữ']), + ('Ự', &['ự']), + ('ự', &['Ự']), + ('Ỳ', &['ỳ']), + ('ỳ', &['Ỳ']), + ('Ỵ', &['ỵ']), + ('ỵ', &['Ỵ']), + ('Ỷ', &['ỷ']), + ('ỷ', &['Ỷ']), + ('Ỹ', &['ỹ']), + ('ỹ', &['Ỹ']), + ('Ỻ', &['ỻ']), + ('ỻ', &['Ỻ']), + ('Ỽ', &['ỽ']), + ('ỽ', &['Ỽ']), + ('Ỿ', &['ỿ']), + ('ỿ', &['Ỿ']), + ('ἀ', &['Ἀ']), + ('ἁ', &['Ἁ']), + ('ἂ', &['Ἂ']), + ('ἃ', &['Ἃ']), + ('ἄ', &['Ἄ']), + ('ἅ', &['Ἅ']), + ('ἆ', &['Ἆ']), + ('ἇ', &['Ἇ']), + ('Ἀ', &['ἀ']), + ('Ἁ', &['ἁ']), + ('Ἂ', &['ἂ']), + ('Ἃ', &['ἃ']), + ('Ἄ', &['ἄ']), + ('Ἅ', &['ἅ']), + ('Ἆ', &['ἆ']), + ('Ἇ', &['ἇ']), + ('ἐ', &['Ἐ']), + ('ἑ', &['Ἑ']), + ('ἒ', &['Ἒ']), + ('ἓ', &['Ἓ']), + ('ἔ', &['Ἔ']), + ('ἕ', &['Ἕ']), + ('Ἐ', &['ἐ']), + ('Ἑ', &['ἑ']), + ('Ἒ', &['ἒ']), + ('Ἓ', &['ἓ']), + ('Ἔ', &['ἔ']), + ('Ἕ', &['ἕ']), + ('ἠ', &['Ἠ']), + ('ἡ', &['Ἡ']), + ('ἢ', &['Ἢ']), + ('ἣ', &['Ἣ']), + ('ἤ', &['Ἤ']), + ('ἥ', &['Ἥ']), + ('ἦ', &['Ἦ']), + ('ἧ', &['Ἧ']), + ('Ἠ', &['ἠ']), + ('Ἡ', &['ἡ']), + ('Ἢ', &['ἢ']), + ('Ἣ', &['ἣ']), + ('Ἤ', &['ἤ']), + ('Ἥ', &['ἥ']), + ('Ἦ', &['ἦ']), + ('Ἧ', &['ἧ']), + ('ἰ', &['Ἰ']), + ('ἱ', &['Ἱ']), + ('ἲ', &['Ἲ']), + ('ἳ', &['Ἳ']), + ('ἴ', &['Ἴ']), + ('ἵ', &['Ἵ']), + ('ἶ', &['Ἶ']), + ('ἷ', &['Ἷ']), + ('Ἰ', &['ἰ']), + ('Ἱ', &['ἱ']), + ('Ἲ', &['ἲ']), + ('Ἳ', &['ἳ']), + ('Ἴ', &['ἴ']), + ('Ἵ', &['ἵ']), + ('Ἶ', &['ἶ']), + ('Ἷ', &['ἷ']), + ('ὀ', &['Ὀ']), + ('ὁ', &['Ὁ']), + ('ὂ', &['Ὂ']), + ('ὃ', &['Ὃ']), + ('ὄ', &['Ὄ']), + ('ὅ', &['Ὅ']), + ('Ὀ', &['ὀ']), + ('Ὁ', &['ὁ']), + ('Ὂ', &['ὂ']), + ('Ὃ', &['ὃ']), + ('Ὄ', &['ὄ']), + ('Ὅ', &['ὅ']), + ('ὑ', &['Ὑ']), + ('ὓ', &['Ὓ']), + ('ὕ', &['Ὕ']), + ('ὗ', &['Ὗ']), + ('Ὑ', &['ὑ']), + ('Ὓ', &['ὓ']), + ('Ὕ', &['ὕ']), + ('Ὗ', &['ὗ']), + ('ὠ', &['Ὠ']), + ('ὡ', &['Ὡ']), + ('ὢ', &['Ὢ']), + ('ὣ', &['Ὣ']), + ('ὤ', &['Ὤ']), + ('ὥ', &['Ὥ']), + ('ὦ', &['Ὦ']), + ('ὧ', &['Ὧ']), + ('Ὠ', &['ὠ']), + ('Ὡ', &['ὡ']), + ('Ὢ', &['ὢ']), + ('Ὣ', &['ὣ']), + ('Ὤ', &['ὤ']), + ('Ὥ', &['ὥ']), + ('Ὦ', &['ὦ']), + ('Ὧ', &['ὧ']), + ('ὰ', &['Ὰ']), + ('ά', &['Ά']), + ('ὲ', &['Ὲ']), + ('έ', &['Έ']), + ('ὴ', &['Ὴ']), + ('ή', &['Ή']), + ('ὶ', &['Ὶ']), + ('ί', &['Ί']), + ('ὸ', &['Ὸ']), + ('ό', &['Ό']), + ('ὺ', &['Ὺ']), + ('ύ', &['Ύ']), + ('ὼ', &['Ὼ']), + ('ώ', &['Ώ']), + ('ᾀ', &['ᾈ']), + ('ᾁ', &['ᾉ']), + ('ᾂ', &['ᾊ']), + ('ᾃ', &['ᾋ']), + ('ᾄ', &['ᾌ']), + ('ᾅ', &['ᾍ']), + ('ᾆ', &['ᾎ']), + ('ᾇ', &['ᾏ']), + ('ᾈ', &['ᾀ']), + ('ᾉ', &['ᾁ']), + ('ᾊ', &['ᾂ']), + ('ᾋ', &['ᾃ']), + ('ᾌ', &['ᾄ']), + ('ᾍ', &['ᾅ']), + ('ᾎ', &['ᾆ']), + ('ᾏ', &['ᾇ']), + ('ᾐ', &['ᾘ']), + ('ᾑ', &['ᾙ']), + ('ᾒ', &['ᾚ']), + ('ᾓ', &['ᾛ']), + ('ᾔ', &['ᾜ']), + ('ᾕ', &['ᾝ']), + ('ᾖ', &['ᾞ']), + ('ᾗ', &['ᾟ']), + ('ᾘ', &['ᾐ']), + ('ᾙ', &['ᾑ']), + ('ᾚ', &['ᾒ']), + ('ᾛ', &['ᾓ']), + ('ᾜ', &['ᾔ']), + ('ᾝ', &['ᾕ']), + ('ᾞ', &['ᾖ']), + ('ᾟ', &['ᾗ']), + ('ᾠ', &['ᾨ']), + ('ᾡ', &['ᾩ']), + ('ᾢ', &['ᾪ']), + ('ᾣ', &['ᾫ']), + ('ᾤ', &['ᾬ']), + ('ᾥ', &['ᾭ']), + ('ᾦ', &['ᾮ']), + ('ᾧ', &['ᾯ']), + ('ᾨ', &['ᾠ']), + ('ᾩ', &['ᾡ']), + ('ᾪ', &['ᾢ']), + ('ᾫ', &['ᾣ']), + ('ᾬ', &['ᾤ']), + ('ᾭ', &['ᾥ']), + ('ᾮ', &['ᾦ']), + ('ᾯ', &['ᾧ']), + ('ᾰ', &['Ᾰ']), + ('ᾱ', &['Ᾱ']), + ('ᾳ', &['ᾼ']), + ('Ᾰ', &['ᾰ']), + ('Ᾱ', &['ᾱ']), + ('Ὰ', &['ὰ']), + ('Ά', &['ά']), + ('ᾼ', &['ᾳ']), + ('ι', &['\u{345}', 'Ι', 'ι']), + ('ῃ', &['ῌ']), + ('Ὲ', &['ὲ']), + ('Έ', &['έ']), + ('Ὴ', &['ὴ']), + ('Ή', &['ή']), + ('ῌ', &['ῃ']), + ('ῐ', &['Ῐ']), + ('ῑ', &['Ῑ']), + ('ΐ', &['ΐ']), + ('Ῐ', &['ῐ']), + ('Ῑ', &['ῑ']), + ('Ὶ', &['ὶ']), + ('Ί', &['ί']), + ('ῠ', &['Ῠ']), + ('ῡ', &['Ῡ']), + ('ΰ', &['ΰ']), + ('ῥ', &['Ῥ']), + ('Ῠ', &['ῠ']), + ('Ῡ', &['ῡ']), + ('Ὺ', &['ὺ']), + ('Ύ', &['ύ']), + ('Ῥ', &['ῥ']), + ('ῳ', &['ῼ']), + ('Ὸ', &['ὸ']), + ('Ό', &['ό']), + ('Ὼ', &['ὼ']), + ('Ώ', &['ώ']), + ('ῼ', &['ῳ']), + ('Ω', &['Ω', 'ω']), + ('K', &['K', 'k']), + ('Å', &['Å', 'å']), + ('Ⅎ', &['ⅎ']), + ('ⅎ', &['Ⅎ']), + ('Ⅰ', &['ⅰ']), + ('Ⅱ', &['ⅱ']), + ('Ⅲ', &['ⅲ']), + ('Ⅳ', &['ⅳ']), + ('Ⅴ', &['ⅴ']), + ('Ⅵ', &['ⅵ']), + ('Ⅶ', &['ⅶ']), + ('Ⅷ', &['ⅷ']), + ('Ⅸ', &['ⅸ']), + ('Ⅹ', &['ⅹ']), + ('Ⅺ', &['ⅺ']), + ('Ⅻ', &['ⅻ']), + ('Ⅼ', &['ⅼ']), + ('Ⅽ', &['ⅽ']), + ('Ⅾ', &['ⅾ']), + ('Ⅿ', &['ⅿ']), + ('ⅰ', &['Ⅰ']), + ('ⅱ', &['Ⅱ']), + ('ⅲ', &['Ⅲ']), + ('ⅳ', &['Ⅳ']), + ('ⅴ', &['Ⅴ']), + ('ⅵ', &['Ⅵ']), + ('ⅶ', &['Ⅶ']), + ('ⅷ', &['Ⅷ']), + ('ⅸ', &['Ⅸ']), + ('ⅹ', &['Ⅹ']), + ('ⅺ', &['Ⅺ']), + ('ⅻ', &['Ⅻ']), + ('ⅼ', &['Ⅼ']), + ('ⅽ', &['Ⅽ']), + ('ⅾ', &['Ⅾ']), + ('ⅿ', &['Ⅿ']), + ('Ↄ', &['ↄ']), + ('ↄ', &['Ↄ']), + ('Ⓐ', &['ⓐ']), + ('Ⓑ', &['ⓑ']), + ('Ⓒ', &['ⓒ']), + ('Ⓓ', &['ⓓ']), + ('Ⓔ', &['ⓔ']), + ('Ⓕ', &['ⓕ']), + ('Ⓖ', &['ⓖ']), + ('Ⓗ', &['ⓗ']), + ('Ⓘ', &['ⓘ']), + ('Ⓙ', &['ⓙ']), + ('Ⓚ', &['ⓚ']), + ('Ⓛ', &['ⓛ']), + ('Ⓜ', &['ⓜ']), + ('Ⓝ', &['ⓝ']), + ('Ⓞ', &['ⓞ']), + ('Ⓟ', &['ⓟ']), + ('Ⓠ', &['ⓠ']), + ('Ⓡ', &['ⓡ']), + ('Ⓢ', &['ⓢ']), + ('Ⓣ', &['ⓣ']), + ('Ⓤ', &['ⓤ']), + ('Ⓥ', &['ⓥ']), + ('Ⓦ', &['ⓦ']), + ('Ⓧ', &['ⓧ']), + ('Ⓨ', &['ⓨ']), + ('Ⓩ', &['ⓩ']), + ('ⓐ', &['Ⓐ']), + ('ⓑ', &['Ⓑ']), + ('ⓒ', &['Ⓒ']), + ('ⓓ', &['Ⓓ']), + ('ⓔ', &['Ⓔ']), + ('ⓕ', &['Ⓕ']), + ('ⓖ', &['Ⓖ']), + ('ⓗ', &['Ⓗ']), + ('ⓘ', &['Ⓘ']), + ('ⓙ', &['Ⓙ']), + ('ⓚ', &['Ⓚ']), + ('ⓛ', &['Ⓛ']), + ('ⓜ', &['Ⓜ']), + ('ⓝ', &['Ⓝ']), + ('ⓞ', &['Ⓞ']), + ('ⓟ', &['Ⓟ']), + ('ⓠ', &['Ⓠ']), + ('ⓡ', &['Ⓡ']), + ('ⓢ', &['Ⓢ']), + ('ⓣ', &['Ⓣ']), + ('ⓤ', &['Ⓤ']), + ('ⓥ', &['Ⓥ']), + ('ⓦ', &['Ⓦ']), + ('ⓧ', &['Ⓧ']), + ('ⓨ', &['Ⓨ']), + ('ⓩ', &['Ⓩ']), + ('Ⰰ', &['ⰰ']), + ('Ⰱ', &['ⰱ']), + ('Ⰲ', &['ⰲ']), + ('Ⰳ', &['ⰳ']), + ('Ⰴ', &['ⰴ']), + ('Ⰵ', &['ⰵ']), + ('Ⰶ', &['ⰶ']), + ('Ⰷ', &['ⰷ']), + ('Ⰸ', &['ⰸ']), + ('Ⰹ', &['ⰹ']), + ('Ⰺ', &['ⰺ']), + ('Ⰻ', &['ⰻ']), + ('Ⰼ', &['ⰼ']), + ('Ⰽ', &['ⰽ']), + ('Ⰾ', &['ⰾ']), + ('Ⰿ', &['ⰿ']), + ('Ⱀ', &['ⱀ']), + ('Ⱁ', &['ⱁ']), + ('Ⱂ', &['ⱂ']), + ('Ⱃ', &['ⱃ']), + ('Ⱄ', &['ⱄ']), + ('Ⱅ', &['ⱅ']), + ('Ⱆ', &['ⱆ']), + ('Ⱇ', &['ⱇ']), + ('Ⱈ', &['ⱈ']), + ('Ⱉ', &['ⱉ']), + ('Ⱊ', &['ⱊ']), + ('Ⱋ', &['ⱋ']), + ('Ⱌ', &['ⱌ']), + ('Ⱍ', &['ⱍ']), + ('Ⱎ', &['ⱎ']), + ('Ⱏ', &['ⱏ']), + ('Ⱐ', &['ⱐ']), + ('Ⱑ', &['ⱑ']), + ('Ⱒ', &['ⱒ']), + ('Ⱓ', &['ⱓ']), + ('Ⱔ', &['ⱔ']), + ('Ⱕ', &['ⱕ']), + ('Ⱖ', &['ⱖ']), + ('Ⱗ', &['ⱗ']), + ('Ⱘ', &['ⱘ']), + ('Ⱙ', &['ⱙ']), + ('Ⱚ', &['ⱚ']), + ('Ⱛ', &['ⱛ']), + ('Ⱜ', &['ⱜ']), + ('Ⱝ', &['ⱝ']), + ('Ⱞ', &['ⱞ']), + ('Ⱟ', &['ⱟ']), + ('ⰰ', &['Ⰰ']), + ('ⰱ', &['Ⰱ']), + ('ⰲ', &['Ⰲ']), + ('ⰳ', &['Ⰳ']), + ('ⰴ', &['Ⰴ']), + ('ⰵ', &['Ⰵ']), + ('ⰶ', &['Ⰶ']), + ('ⰷ', &['Ⰷ']), + ('ⰸ', &['Ⰸ']), + ('ⰹ', &['Ⰹ']), + ('ⰺ', &['Ⰺ']), + ('ⰻ', &['Ⰻ']), + ('ⰼ', &['Ⰼ']), + ('ⰽ', &['Ⰽ']), + ('ⰾ', &['Ⰾ']), + ('ⰿ', &['Ⰿ']), + ('ⱀ', &['Ⱀ']), + ('ⱁ', &['Ⱁ']), + ('ⱂ', &['Ⱂ']), + ('ⱃ', &['Ⱃ']), + ('ⱄ', &['Ⱄ']), + ('ⱅ', &['Ⱅ']), + ('ⱆ', &['Ⱆ']), + ('ⱇ', &['Ⱇ']), + ('ⱈ', &['Ⱈ']), + ('ⱉ', &['Ⱉ']), + ('ⱊ', &['Ⱊ']), + ('ⱋ', &['Ⱋ']), + ('ⱌ', &['Ⱌ']), + ('ⱍ', &['Ⱍ']), + ('ⱎ', &['Ⱎ']), + ('ⱏ', &['Ⱏ']), + ('ⱐ', &['Ⱐ']), + ('ⱑ', &['Ⱑ']), + ('ⱒ', &['Ⱒ']), + ('ⱓ', &['Ⱓ']), + ('ⱔ', &['Ⱔ']), + ('ⱕ', &['Ⱕ']), + ('ⱖ', &['Ⱖ']), + ('ⱗ', &['Ⱗ']), + ('ⱘ', &['Ⱘ']), + ('ⱙ', &['Ⱙ']), + ('ⱚ', &['Ⱚ']), + ('ⱛ', &['Ⱛ']), + ('ⱜ', &['Ⱜ']), + ('ⱝ', &['Ⱝ']), + ('ⱞ', &['Ⱞ']), + ('ⱟ', &['Ⱟ']), + ('Ⱡ', &['ⱡ']), + ('ⱡ', &['Ⱡ']), + ('Ɫ', &['ɫ']), + ('Ᵽ', &['ᵽ']), + ('Ɽ', &['ɽ']), + ('ⱥ', &['Ⱥ']), + ('ⱦ', &['Ⱦ']), + ('Ⱨ', &['ⱨ']), + ('ⱨ', &['Ⱨ']), + ('Ⱪ', &['ⱪ']), + ('ⱪ', &['Ⱪ']), + ('Ⱬ', &['ⱬ']), + ('ⱬ', &['Ⱬ']), + ('Ɑ', &['ɑ']), + ('Ɱ', &['ɱ']), + ('Ɐ', &['ɐ']), + ('Ɒ', &['ɒ']), + ('Ⱳ', &['ⱳ']), + ('ⱳ', &['Ⱳ']), + ('Ⱶ', &['ⱶ']), + ('ⱶ', &['Ⱶ']), + ('Ȿ', &['ȿ']), + ('Ɀ', &['ɀ']), + ('Ⲁ', &['ⲁ']), + ('ⲁ', &['Ⲁ']), + ('Ⲃ', &['ⲃ']), + ('ⲃ', &['Ⲃ']), + ('Ⲅ', &['ⲅ']), + ('ⲅ', &['Ⲅ']), + ('Ⲇ', &['ⲇ']), + ('ⲇ', &['Ⲇ']), + ('Ⲉ', &['ⲉ']), + ('ⲉ', &['Ⲉ']), + ('Ⲋ', &['ⲋ']), + ('ⲋ', &['Ⲋ']), + ('Ⲍ', &['ⲍ']), + ('ⲍ', &['Ⲍ']), + ('Ⲏ', &['ⲏ']), + ('ⲏ', &['Ⲏ']), + ('Ⲑ', &['ⲑ']), + ('ⲑ', &['Ⲑ']), + ('Ⲓ', &['ⲓ']), + ('ⲓ', &['Ⲓ']), + ('Ⲕ', &['ⲕ']), + ('ⲕ', &['Ⲕ']), + ('Ⲗ', &['ⲗ']), + ('ⲗ', &['Ⲗ']), + ('Ⲙ', &['ⲙ']), + ('ⲙ', &['Ⲙ']), + ('Ⲛ', &['ⲛ']), + ('ⲛ', &['Ⲛ']), + ('Ⲝ', &['ⲝ']), + ('ⲝ', &['Ⲝ']), + ('Ⲟ', &['ⲟ']), + ('ⲟ', &['Ⲟ']), + ('Ⲡ', &['ⲡ']), + ('ⲡ', &['Ⲡ']), + ('Ⲣ', &['ⲣ']), + ('ⲣ', &['Ⲣ']), + ('Ⲥ', &['ⲥ']), + ('ⲥ', &['Ⲥ']), + ('Ⲧ', &['ⲧ']), + ('ⲧ', &['Ⲧ']), + ('Ⲩ', &['ⲩ']), + ('ⲩ', &['Ⲩ']), + ('Ⲫ', &['ⲫ']), + ('ⲫ', &['Ⲫ']), + ('Ⲭ', &['ⲭ']), + ('ⲭ', &['Ⲭ']), + ('Ⲯ', &['ⲯ']), + ('ⲯ', &['Ⲯ']), + ('Ⲱ', &['ⲱ']), + ('ⲱ', &['Ⲱ']), + ('Ⲳ', &['ⲳ']), + ('ⲳ', &['Ⲳ']), + ('Ⲵ', &['ⲵ']), + ('ⲵ', &['Ⲵ']), + ('Ⲷ', &['ⲷ']), + ('ⲷ', &['Ⲷ']), + ('Ⲹ', &['ⲹ']), + ('ⲹ', &['Ⲹ']), + ('Ⲻ', &['ⲻ']), + ('ⲻ', &['Ⲻ']), + ('Ⲽ', &['ⲽ']), + ('ⲽ', &['Ⲽ']), + ('Ⲿ', &['ⲿ']), + ('ⲿ', &['Ⲿ']), + ('Ⳁ', &['ⳁ']), + ('ⳁ', &['Ⳁ']), + ('Ⳃ', &['ⳃ']), + ('ⳃ', &['Ⳃ']), + ('Ⳅ', &['ⳅ']), + ('ⳅ', &['Ⳅ']), + ('Ⳇ', &['ⳇ']), + ('ⳇ', &['Ⳇ']), + ('Ⳉ', &['ⳉ']), + ('ⳉ', &['Ⳉ']), + ('Ⳋ', &['ⳋ']), + ('ⳋ', &['Ⳋ']), + ('Ⳍ', &['ⳍ']), + ('ⳍ', &['Ⳍ']), + ('Ⳏ', &['ⳏ']), + ('ⳏ', &['Ⳏ']), + ('Ⳑ', &['ⳑ']), + ('ⳑ', &['Ⳑ']), + ('Ⳓ', &['ⳓ']), + ('ⳓ', &['Ⳓ']), + ('Ⳕ', &['ⳕ']), + ('ⳕ', &['Ⳕ']), + ('Ⳗ', &['ⳗ']), + ('ⳗ', &['Ⳗ']), + ('Ⳙ', &['ⳙ']), + ('ⳙ', &['Ⳙ']), + ('Ⳛ', &['ⳛ']), + ('ⳛ', &['Ⳛ']), + ('Ⳝ', &['ⳝ']), + ('ⳝ', &['Ⳝ']), + ('Ⳟ', &['ⳟ']), + ('ⳟ', &['Ⳟ']), + ('Ⳡ', &['ⳡ']), + ('ⳡ', &['Ⳡ']), + ('Ⳣ', &['ⳣ']), + ('ⳣ', &['Ⳣ']), + ('Ⳬ', &['ⳬ']), + ('ⳬ', &['Ⳬ']), + ('Ⳮ', &['ⳮ']), + ('ⳮ', &['Ⳮ']), + ('Ⳳ', &['ⳳ']), + ('ⳳ', &['Ⳳ']), + ('ⴀ', &['Ⴀ']), + ('ⴁ', &['Ⴁ']), + ('ⴂ', &['Ⴂ']), + ('ⴃ', &['Ⴃ']), + ('ⴄ', &['Ⴄ']), + ('ⴅ', &['Ⴅ']), + ('ⴆ', &['Ⴆ']), + ('ⴇ', &['Ⴇ']), + ('ⴈ', &['Ⴈ']), + ('ⴉ', &['Ⴉ']), + ('ⴊ', &['Ⴊ']), + ('ⴋ', &['Ⴋ']), + ('ⴌ', &['Ⴌ']), + ('ⴍ', &['Ⴍ']), + ('ⴎ', &['Ⴎ']), + ('ⴏ', &['Ⴏ']), + ('ⴐ', &['Ⴐ']), + ('ⴑ', &['Ⴑ']), + ('ⴒ', &['Ⴒ']), + ('ⴓ', &['Ⴓ']), + ('ⴔ', &['Ⴔ']), + ('ⴕ', &['Ⴕ']), + ('ⴖ', &['Ⴖ']), + ('ⴗ', &['Ⴗ']), + ('ⴘ', &['Ⴘ']), + ('ⴙ', &['Ⴙ']), + ('ⴚ', &['Ⴚ']), + ('ⴛ', &['Ⴛ']), + ('ⴜ', &['Ⴜ']), + ('ⴝ', &['Ⴝ']), + ('ⴞ', &['Ⴞ']), + ('ⴟ', &['Ⴟ']), + ('ⴠ', &['Ⴠ']), + ('ⴡ', &['Ⴡ']), + ('ⴢ', &['Ⴢ']), + ('ⴣ', &['Ⴣ']), + ('ⴤ', &['Ⴤ']), + ('ⴥ', &['Ⴥ']), + ('ⴧ', &['Ⴧ']), + ('ⴭ', &['Ⴭ']), + ('Ꙁ', &['ꙁ']), + ('ꙁ', &['Ꙁ']), + ('Ꙃ', &['ꙃ']), + ('ꙃ', &['Ꙃ']), + ('Ꙅ', &['ꙅ']), + ('ꙅ', &['Ꙅ']), + ('Ꙇ', &['ꙇ']), + ('ꙇ', &['Ꙇ']), + ('Ꙉ', &['ꙉ']), + ('ꙉ', &['Ꙉ']), + ('Ꙋ', &['ᲈ', 'ꙋ']), + ('ꙋ', &['ᲈ', 'Ꙋ']), + ('Ꙍ', &['ꙍ']), + ('ꙍ', &['Ꙍ']), + ('Ꙏ', &['ꙏ']), + ('ꙏ', &['Ꙏ']), + ('Ꙑ', &['ꙑ']), + ('ꙑ', &['Ꙑ']), + ('Ꙓ', &['ꙓ']), + ('ꙓ', &['Ꙓ']), + ('Ꙕ', &['ꙕ']), + ('ꙕ', &['Ꙕ']), + ('Ꙗ', &['ꙗ']), + ('ꙗ', &['Ꙗ']), + ('Ꙙ', &['ꙙ']), + ('ꙙ', &['Ꙙ']), + ('Ꙛ', &['ꙛ']), + ('ꙛ', &['Ꙛ']), + ('Ꙝ', &['ꙝ']), + ('ꙝ', &['Ꙝ']), + ('Ꙟ', &['ꙟ']), + ('ꙟ', &['Ꙟ']), + ('Ꙡ', &['ꙡ']), + ('ꙡ', &['Ꙡ']), + ('Ꙣ', &['ꙣ']), + ('ꙣ', &['Ꙣ']), + ('Ꙥ', &['ꙥ']), + ('ꙥ', &['Ꙥ']), + ('Ꙧ', &['ꙧ']), + ('ꙧ', &['Ꙧ']), + ('Ꙩ', &['ꙩ']), + ('ꙩ', &['Ꙩ']), + ('Ꙫ', &['ꙫ']), + ('ꙫ', &['Ꙫ']), + ('Ꙭ', &['ꙭ']), + ('ꙭ', &['Ꙭ']), + ('Ꚁ', &['ꚁ']), + ('ꚁ', &['Ꚁ']), + ('Ꚃ', &['ꚃ']), + ('ꚃ', &['Ꚃ']), + ('Ꚅ', &['ꚅ']), + ('ꚅ', &['Ꚅ']), + ('Ꚇ', &['ꚇ']), + ('ꚇ', &['Ꚇ']), + ('Ꚉ', &['ꚉ']), + ('ꚉ', &['Ꚉ']), + ('Ꚋ', &['ꚋ']), + ('ꚋ', &['Ꚋ']), + ('Ꚍ', &['ꚍ']), + ('ꚍ', &['Ꚍ']), + ('Ꚏ', &['ꚏ']), + ('ꚏ', &['Ꚏ']), + ('Ꚑ', &['ꚑ']), + ('ꚑ', &['Ꚑ']), + ('Ꚓ', &['ꚓ']), + ('ꚓ', &['Ꚓ']), + ('Ꚕ', &['ꚕ']), + ('ꚕ', &['Ꚕ']), + ('Ꚗ', &['ꚗ']), + ('ꚗ', &['Ꚗ']), + ('Ꚙ', &['ꚙ']), + ('ꚙ', &['Ꚙ']), + ('Ꚛ', &['ꚛ']), + ('ꚛ', &['Ꚛ']), + ('Ꜣ', &['ꜣ']), + ('ꜣ', &['Ꜣ']), + ('Ꜥ', &['ꜥ']), + ('ꜥ', &['Ꜥ']), + ('Ꜧ', &['ꜧ']), + ('ꜧ', &['Ꜧ']), + ('Ꜩ', &['ꜩ']), + ('ꜩ', &['Ꜩ']), + ('Ꜫ', &['ꜫ']), + ('ꜫ', &['Ꜫ']), + ('Ꜭ', &['ꜭ']), + ('ꜭ', &['Ꜭ']), + ('Ꜯ', &['ꜯ']), + ('ꜯ', &['Ꜯ']), + ('Ꜳ', &['ꜳ']), + ('ꜳ', &['Ꜳ']), + ('Ꜵ', &['ꜵ']), + ('ꜵ', &['Ꜵ']), + ('Ꜷ', &['ꜷ']), + ('ꜷ', &['Ꜷ']), + ('Ꜹ', &['ꜹ']), + ('ꜹ', &['Ꜹ']), + ('Ꜻ', &['ꜻ']), + ('ꜻ', &['Ꜻ']), + ('Ꜽ', &['ꜽ']), + ('ꜽ', &['Ꜽ']), + ('Ꜿ', &['ꜿ']), + ('ꜿ', &['Ꜿ']), + ('Ꝁ', &['ꝁ']), + ('ꝁ', &['Ꝁ']), + ('Ꝃ', &['ꝃ']), + ('ꝃ', &['Ꝃ']), + ('Ꝅ', &['ꝅ']), + ('ꝅ', &['Ꝅ']), + ('Ꝇ', &['ꝇ']), + ('ꝇ', &['Ꝇ']), + ('Ꝉ', &['ꝉ']), + ('ꝉ', &['Ꝉ']), + ('Ꝋ', &['ꝋ']), + ('ꝋ', &['Ꝋ']), + ('Ꝍ', &['ꝍ']), + ('ꝍ', &['Ꝍ']), + ('Ꝏ', &['ꝏ']), + ('ꝏ', &['Ꝏ']), + ('Ꝑ', &['ꝑ']), + ('ꝑ', &['Ꝑ']), + ('Ꝓ', &['ꝓ']), + ('ꝓ', &['Ꝓ']), + ('Ꝕ', &['ꝕ']), + ('ꝕ', &['Ꝕ']), + ('Ꝗ', &['ꝗ']), + ('ꝗ', &['Ꝗ']), + ('Ꝙ', &['ꝙ']), + ('ꝙ', &['Ꝙ']), + ('Ꝛ', &['ꝛ']), + ('ꝛ', &['Ꝛ']), + ('Ꝝ', &['ꝝ']), + ('ꝝ', &['Ꝝ']), + ('Ꝟ', &['ꝟ']), + ('ꝟ', &['Ꝟ']), + ('Ꝡ', &['ꝡ']), + ('ꝡ', &['Ꝡ']), + ('Ꝣ', &['ꝣ']), + ('ꝣ', &['Ꝣ']), + ('Ꝥ', &['ꝥ']), + ('ꝥ', &['Ꝥ']), + ('Ꝧ', &['ꝧ']), + ('ꝧ', &['Ꝧ']), + ('Ꝩ', &['ꝩ']), + ('ꝩ', &['Ꝩ']), + ('Ꝫ', &['ꝫ']), + ('ꝫ', &['Ꝫ']), + ('Ꝭ', &['ꝭ']), + ('ꝭ', &['Ꝭ']), + ('Ꝯ', &['ꝯ']), + ('ꝯ', &['Ꝯ']), + ('Ꝺ', &['ꝺ']), + ('ꝺ', &['Ꝺ']), + ('Ꝼ', &['ꝼ']), + ('ꝼ', &['Ꝼ']), + ('Ᵹ', &['ᵹ']), + ('Ꝿ', &['ꝿ']), + ('ꝿ', &['Ꝿ']), + ('Ꞁ', &['ꞁ']), + ('ꞁ', &['Ꞁ']), + ('Ꞃ', &['ꞃ']), + ('ꞃ', &['Ꞃ']), + ('Ꞅ', &['ꞅ']), + ('ꞅ', &['Ꞅ']), + ('Ꞇ', &['ꞇ']), + ('ꞇ', &['Ꞇ']), + ('Ꞌ', &['ꞌ']), + ('ꞌ', &['Ꞌ']), + ('Ɥ', &['ɥ']), + ('Ꞑ', &['ꞑ']), + ('ꞑ', &['Ꞑ']), + ('Ꞓ', &['ꞓ']), + ('ꞓ', &['Ꞓ']), + ('ꞔ', &['Ꞔ']), + ('Ꞗ', &['ꞗ']), + ('ꞗ', &['Ꞗ']), + ('Ꞙ', &['ꞙ']), + ('ꞙ', &['Ꞙ']), + ('Ꞛ', &['ꞛ']), + ('ꞛ', &['Ꞛ']), + ('Ꞝ', &['ꞝ']), + ('ꞝ', &['Ꞝ']), + ('Ꞟ', &['ꞟ']), + ('ꞟ', &['Ꞟ']), + ('Ꞡ', &['ꞡ']), + ('ꞡ', &['Ꞡ']), + ('Ꞣ', &['ꞣ']), + ('ꞣ', &['Ꞣ']), + ('Ꞥ', &['ꞥ']), + ('ꞥ', &['Ꞥ']), + ('Ꞧ', &['ꞧ']), + ('ꞧ', &['Ꞧ']), + ('Ꞩ', &['ꞩ']), + ('ꞩ', &['Ꞩ']), + ('Ɦ', &['ɦ']), + ('Ɜ', &['ɜ']), + ('Ɡ', &['ɡ']), + ('Ɬ', &['ɬ']), + ('Ɪ', &['ɪ']), + ('Ʞ', &['ʞ']), + ('Ʇ', &['ʇ']), + ('Ʝ', &['ʝ']), + ('Ꭓ', &['ꭓ']), + ('Ꞵ', &['ꞵ']), + ('ꞵ', &['Ꞵ']), + ('Ꞷ', &['ꞷ']), + ('ꞷ', &['Ꞷ']), + ('Ꞹ', &['ꞹ']), + ('ꞹ', &['Ꞹ']), + ('Ꞻ', &['ꞻ']), + ('ꞻ', &['Ꞻ']), + ('Ꞽ', &['ꞽ']), + ('ꞽ', &['Ꞽ']), + ('Ꞿ', &['ꞿ']), + ('ꞿ', &['Ꞿ']), + ('Ꟁ', &['ꟁ']), + ('ꟁ', &['Ꟁ']), + ('Ꟃ', &['ꟃ']), + ('ꟃ', &['Ꟃ']), + ('Ꞔ', &['ꞔ']), + ('Ʂ', &['ʂ']), + ('Ᶎ', &['ᶎ']), + ('Ꟈ', &['ꟈ']), + ('ꟈ', &['Ꟈ']), + ('Ꟊ', &['ꟊ']), + ('ꟊ', &['Ꟊ']), + ('Ɤ', &['ɤ']), + ('Ꟍ', &['ꟍ']), + ('ꟍ', &['Ꟍ']), + ('Ꟑ', &['ꟑ']), + ('ꟑ', &['Ꟑ']), + ('Ꟗ', &['ꟗ']), + ('ꟗ', &['Ꟗ']), + ('Ꟙ', &['ꟙ']), + ('ꟙ', &['Ꟙ']), + ('Ꟛ', &['ꟛ']), + ('ꟛ', &['Ꟛ']), + ('Ƛ', &['ƛ']), + ('Ꟶ', &['ꟶ']), + ('ꟶ', &['Ꟶ']), + ('ꭓ', &['Ꭓ']), + ('ꭰ', &['Ꭰ']), + ('ꭱ', &['Ꭱ']), + ('ꭲ', &['Ꭲ']), + ('ꭳ', &['Ꭳ']), + ('ꭴ', &['Ꭴ']), + ('ꭵ', &['Ꭵ']), + ('ꭶ', &['Ꭶ']), + ('ꭷ', &['Ꭷ']), + ('ꭸ', &['Ꭸ']), + ('ꭹ', &['Ꭹ']), + ('ꭺ', &['Ꭺ']), + ('ꭻ', &['Ꭻ']), + ('ꭼ', &['Ꭼ']), + ('ꭽ', &['Ꭽ']), + ('ꭾ', &['Ꭾ']), + ('ꭿ', &['Ꭿ']), + ('ꮀ', &['Ꮀ']), + ('ꮁ', &['Ꮁ']), + ('ꮂ', &['Ꮂ']), + ('ꮃ', &['Ꮃ']), + ('ꮄ', &['Ꮄ']), + ('ꮅ', &['Ꮅ']), + ('ꮆ', &['Ꮆ']), + ('ꮇ', &['Ꮇ']), + ('ꮈ', &['Ꮈ']), + ('ꮉ', &['Ꮉ']), + ('ꮊ', &['Ꮊ']), + ('ꮋ', &['Ꮋ']), + ('ꮌ', &['Ꮌ']), + ('ꮍ', &['Ꮍ']), + ('ꮎ', &['Ꮎ']), + ('ꮏ', &['Ꮏ']), + ('ꮐ', &['Ꮐ']), + ('ꮑ', &['Ꮑ']), + ('ꮒ', &['Ꮒ']), + ('ꮓ', &['Ꮓ']), + ('ꮔ', &['Ꮔ']), + ('ꮕ', &['Ꮕ']), + ('ꮖ', &['Ꮖ']), + ('ꮗ', &['Ꮗ']), + ('ꮘ', &['Ꮘ']), + ('ꮙ', &['Ꮙ']), + ('ꮚ', &['Ꮚ']), + ('ꮛ', &['Ꮛ']), + ('ꮜ', &['Ꮜ']), + ('ꮝ', &['Ꮝ']), + ('ꮞ', &['Ꮞ']), + ('ꮟ', &['Ꮟ']), + ('ꮠ', &['Ꮠ']), + ('ꮡ', &['Ꮡ']), + ('ꮢ', &['Ꮢ']), + ('ꮣ', &['Ꮣ']), + ('ꮤ', &['Ꮤ']), + ('ꮥ', &['Ꮥ']), + ('ꮦ', &['Ꮦ']), + ('ꮧ', &['Ꮧ']), + ('ꮨ', &['Ꮨ']), + ('ꮩ', &['Ꮩ']), + ('ꮪ', &['Ꮪ']), + ('ꮫ', &['Ꮫ']), + ('ꮬ', &['Ꮬ']), + ('ꮭ', &['Ꮭ']), + ('ꮮ', &['Ꮮ']), + ('ꮯ', &['Ꮯ']), + ('ꮰ', &['Ꮰ']), + ('ꮱ', &['Ꮱ']), + ('ꮲ', &['Ꮲ']), + ('ꮳ', &['Ꮳ']), + ('ꮴ', &['Ꮴ']), + ('ꮵ', &['Ꮵ']), + ('ꮶ', &['Ꮶ']), + ('ꮷ', &['Ꮷ']), + ('ꮸ', &['Ꮸ']), + ('ꮹ', &['Ꮹ']), + ('ꮺ', &['Ꮺ']), + ('ꮻ', &['Ꮻ']), + ('ꮼ', &['Ꮼ']), + ('ꮽ', &['Ꮽ']), + ('ꮾ', &['Ꮾ']), + ('ꮿ', &['Ꮿ']), + ('ſt', &['st']), + ('st', &['ſt']), + ('A', &['a']), + ('B', &['b']), + ('C', &['c']), + ('D', &['d']), + ('E', &['e']), + ('F', &['f']), + ('G', &['g']), + ('H', &['h']), + ('I', &['i']), + ('J', &['j']), + ('K', &['k']), + ('L', &['l']), + ('M', &['m']), + ('N', &['n']), + ('O', &['o']), + ('P', &['p']), + ('Q', &['q']), + ('R', &['r']), + ('S', &['s']), + ('T', &['t']), + ('U', &['u']), + ('V', &['v']), + ('W', &['w']), + ('X', &['x']), + ('Y', &['y']), + ('Z', &['z']), + ('a', &['A']), + ('b', &['B']), + ('c', &['C']), + ('d', &['D']), + ('e', &['E']), + ('f', &['F']), + ('g', &['G']), + ('h', &['H']), + ('i', &['I']), + ('j', &['J']), + ('k', &['K']), + ('l', &['L']), + ('m', &['M']), + ('n', &['N']), + ('o', &['O']), + ('p', &['P']), + ('q', &['Q']), + ('r', &['R']), + ('s', &['S']), + ('t', &['T']), + ('u', &['U']), + ('v', &['V']), + ('w', &['W']), + ('x', &['X']), + ('y', &['Y']), + ('z', &['Z']), + ('𐐀', &['𐐨']), + ('𐐁', &['𐐩']), + ('𐐂', &['𐐪']), + ('𐐃', &['𐐫']), + ('𐐄', &['𐐬']), + ('𐐅', &['𐐭']), + ('𐐆', &['𐐮']), + ('𐐇', &['𐐯']), + ('𐐈', &['𐐰']), + ('𐐉', &['𐐱']), + ('𐐊', &['𐐲']), + ('𐐋', &['𐐳']), + ('𐐌', &['𐐴']), + ('𐐍', &['𐐵']), + ('𐐎', &['𐐶']), + ('𐐏', &['𐐷']), + ('𐐐', &['𐐸']), + ('𐐑', &['𐐹']), + ('𐐒', &['𐐺']), + ('𐐓', &['𐐻']), + ('𐐔', &['𐐼']), + ('𐐕', &['𐐽']), + ('𐐖', &['𐐾']), + ('𐐗', &['𐐿']), + ('𐐘', &['𐑀']), + ('𐐙', &['𐑁']), + ('𐐚', &['𐑂']), + ('𐐛', &['𐑃']), + ('𐐜', &['𐑄']), + ('𐐝', &['𐑅']), + ('𐐞', &['𐑆']), + ('𐐟', &['𐑇']), + ('𐐠', &['𐑈']), + ('𐐡', &['𐑉']), + ('𐐢', &['𐑊']), + ('𐐣', &['𐑋']), + ('𐐤', &['𐑌']), + ('𐐥', &['𐑍']), + ('𐐦', &['𐑎']), + ('𐐧', &['𐑏']), + ('𐐨', &['𐐀']), + ('𐐩', &['𐐁']), + ('𐐪', &['𐐂']), + ('𐐫', &['𐐃']), + ('𐐬', &['𐐄']), + ('𐐭', &['𐐅']), + ('𐐮', &['𐐆']), + ('𐐯', &['𐐇']), + ('𐐰', &['𐐈']), + ('𐐱', &['𐐉']), + ('𐐲', &['𐐊']), + ('𐐳', &['𐐋']), + ('𐐴', &['𐐌']), + ('𐐵', &['𐐍']), + ('𐐶', &['𐐎']), + ('𐐷', &['𐐏']), + ('𐐸', &['𐐐']), + ('𐐹', &['𐐑']), + ('𐐺', &['𐐒']), + ('𐐻', &['𐐓']), + ('𐐼', &['𐐔']), + ('𐐽', &['𐐕']), + ('𐐾', &['𐐖']), + ('𐐿', &['𐐗']), + ('𐑀', &['𐐘']), + ('𐑁', &['𐐙']), + ('𐑂', &['𐐚']), + ('𐑃', &['𐐛']), + ('𐑄', &['𐐜']), + ('𐑅', &['𐐝']), + ('𐑆', &['𐐞']), + ('𐑇', &['𐐟']), + ('𐑈', &['𐐠']), + ('𐑉', &['𐐡']), + ('𐑊', &['𐐢']), + ('𐑋', &['𐐣']), + ('𐑌', &['𐐤']), + ('𐑍', &['𐐥']), + ('𐑎', &['𐐦']), + ('𐑏', &['𐐧']), + ('𐒰', &['𐓘']), + ('𐒱', &['𐓙']), + ('𐒲', &['𐓚']), + ('𐒳', &['𐓛']), + ('𐒴', &['𐓜']), + ('𐒵', &['𐓝']), + ('𐒶', &['𐓞']), + ('𐒷', &['𐓟']), + ('𐒸', &['𐓠']), + ('𐒹', &['𐓡']), + ('𐒺', &['𐓢']), + ('𐒻', &['𐓣']), + ('𐒼', &['𐓤']), + ('𐒽', &['𐓥']), + ('𐒾', &['𐓦']), + ('𐒿', &['𐓧']), + ('𐓀', &['𐓨']), + ('𐓁', &['𐓩']), + ('𐓂', &['𐓪']), + ('𐓃', &['𐓫']), + ('𐓄', &['𐓬']), + ('𐓅', &['𐓭']), + ('𐓆', &['𐓮']), + ('𐓇', &['𐓯']), + ('𐓈', &['𐓰']), + ('𐓉', &['𐓱']), + ('𐓊', &['𐓲']), + ('𐓋', &['𐓳']), + ('𐓌', &['𐓴']), + ('𐓍', &['𐓵']), + ('𐓎', &['𐓶']), + ('𐓏', &['𐓷']), + ('𐓐', &['𐓸']), + ('𐓑', &['𐓹']), + ('𐓒', &['𐓺']), + ('𐓓', &['𐓻']), + ('𐓘', &['𐒰']), + ('𐓙', &['𐒱']), + ('𐓚', &['𐒲']), + ('𐓛', &['𐒳']), + ('𐓜', &['𐒴']), + ('𐓝', &['𐒵']), + ('𐓞', &['𐒶']), + ('𐓟', &['𐒷']), + ('𐓠', &['𐒸']), + ('𐓡', &['𐒹']), + ('𐓢', &['𐒺']), + ('𐓣', &['𐒻']), + ('𐓤', &['𐒼']), + ('𐓥', &['𐒽']), + ('𐓦', &['𐒾']), + ('𐓧', &['𐒿']), + ('𐓨', &['𐓀']), + ('𐓩', &['𐓁']), + ('𐓪', &['𐓂']), + ('𐓫', &['𐓃']), + ('𐓬', &['𐓄']), + ('𐓭', &['𐓅']), + ('𐓮', &['𐓆']), + ('𐓯', &['𐓇']), + ('𐓰', &['𐓈']), + ('𐓱', &['𐓉']), + ('𐓲', &['𐓊']), + ('𐓳', &['𐓋']), + ('𐓴', &['𐓌']), + ('𐓵', &['𐓍']), + ('𐓶', &['𐓎']), + ('𐓷', &['𐓏']), + ('𐓸', &['𐓐']), + ('𐓹', &['𐓑']), + ('𐓺', &['𐓒']), + ('𐓻', &['𐓓']), + ('𐕰', &['𐖗']), + ('𐕱', &['𐖘']), + ('𐕲', &['𐖙']), + ('𐕳', &['𐖚']), + ('𐕴', &['𐖛']), + ('𐕵', &['𐖜']), + ('𐕶', &['𐖝']), + ('𐕷', &['𐖞']), + ('𐕸', &['𐖟']), + ('𐕹', &['𐖠']), + ('𐕺', &['𐖡']), + ('𐕼', &['𐖣']), + ('𐕽', &['𐖤']), + ('𐕾', &['𐖥']), + ('𐕿', &['𐖦']), + ('𐖀', &['𐖧']), + ('𐖁', &['𐖨']), + ('𐖂', &['𐖩']), + ('𐖃', &['𐖪']), + ('𐖄', &['𐖫']), + ('𐖅', &['𐖬']), + ('𐖆', &['𐖭']), + ('𐖇', &['𐖮']), + ('𐖈', &['𐖯']), + ('𐖉', &['𐖰']), + ('𐖊', &['𐖱']), + ('𐖌', &['𐖳']), + ('𐖍', &['𐖴']), + ('𐖎', &['𐖵']), + ('𐖏', &['𐖶']), + ('𐖐', &['𐖷']), + ('𐖑', &['𐖸']), + ('𐖒', &['𐖹']), + ('𐖔', &['𐖻']), + ('𐖕', &['𐖼']), + ('𐖗', &['𐕰']), + ('𐖘', &['𐕱']), + ('𐖙', &['𐕲']), + ('𐖚', &['𐕳']), + ('𐖛', &['𐕴']), + ('𐖜', &['𐕵']), + ('𐖝', &['𐕶']), + ('𐖞', &['𐕷']), + ('𐖟', &['𐕸']), + ('𐖠', &['𐕹']), + ('𐖡', &['𐕺']), + ('𐖣', &['𐕼']), + ('𐖤', &['𐕽']), + ('𐖥', &['𐕾']), + ('𐖦', &['𐕿']), + ('𐖧', &['𐖀']), + ('𐖨', &['𐖁']), + ('𐖩', &['𐖂']), + ('𐖪', &['𐖃']), + ('𐖫', &['𐖄']), + ('𐖬', &['𐖅']), + ('𐖭', &['𐖆']), + ('𐖮', &['𐖇']), + ('𐖯', &['𐖈']), + ('𐖰', &['𐖉']), + ('𐖱', &['𐖊']), + ('𐖳', &['𐖌']), + ('𐖴', &['𐖍']), + ('𐖵', &['𐖎']), + ('𐖶', &['𐖏']), + ('𐖷', &['𐖐']), + ('𐖸', &['𐖑']), + ('𐖹', &['𐖒']), + ('𐖻', &['𐖔']), + ('𐖼', &['𐖕']), + ('𐲀', &['𐳀']), + ('𐲁', &['𐳁']), + ('𐲂', &['𐳂']), + ('𐲃', &['𐳃']), + ('𐲄', &['𐳄']), + ('𐲅', &['𐳅']), + ('𐲆', &['𐳆']), + ('𐲇', &['𐳇']), + ('𐲈', &['𐳈']), + ('𐲉', &['𐳉']), + ('𐲊', &['𐳊']), + ('𐲋', &['𐳋']), + ('𐲌', &['𐳌']), + ('𐲍', &['𐳍']), + ('𐲎', &['𐳎']), + ('𐲏', &['𐳏']), + ('𐲐', &['𐳐']), + ('𐲑', &['𐳑']), + ('𐲒', &['𐳒']), + ('𐲓', &['𐳓']), + ('𐲔', &['𐳔']), + ('𐲕', &['𐳕']), + ('𐲖', &['𐳖']), + ('𐲗', &['𐳗']), + ('𐲘', &['𐳘']), + ('𐲙', &['𐳙']), + ('𐲚', &['𐳚']), + ('𐲛', &['𐳛']), + ('𐲜', &['𐳜']), + ('𐲝', &['𐳝']), + ('𐲞', &['𐳞']), + ('𐲟', &['𐳟']), + ('𐲠', &['𐳠']), + ('𐲡', &['𐳡']), + ('𐲢', &['𐳢']), + ('𐲣', &['𐳣']), + ('𐲤', &['𐳤']), + ('𐲥', &['𐳥']), + ('𐲦', &['𐳦']), + ('𐲧', &['𐳧']), + ('𐲨', &['𐳨']), + ('𐲩', &['𐳩']), + ('𐲪', &['𐳪']), + ('𐲫', &['𐳫']), + ('𐲬', &['𐳬']), + ('𐲭', &['𐳭']), + ('𐲮', &['𐳮']), + ('𐲯', &['𐳯']), + ('𐲰', &['𐳰']), + ('𐲱', &['𐳱']), + ('𐲲', &['𐳲']), + ('𐳀', &['𐲀']), + ('𐳁', &['𐲁']), + ('𐳂', &['𐲂']), + ('𐳃', &['𐲃']), + ('𐳄', &['𐲄']), + ('𐳅', &['𐲅']), + ('𐳆', &['𐲆']), + ('𐳇', &['𐲇']), + ('𐳈', &['𐲈']), + ('𐳉', &['𐲉']), + ('𐳊', &['𐲊']), + ('𐳋', &['𐲋']), + ('𐳌', &['𐲌']), + ('𐳍', &['𐲍']), + ('𐳎', &['𐲎']), + ('𐳏', &['𐲏']), + ('𐳐', &['𐲐']), + ('𐳑', &['𐲑']), + ('𐳒', &['𐲒']), + ('𐳓', &['𐲓']), + ('𐳔', &['𐲔']), + ('𐳕', &['𐲕']), + ('𐳖', &['𐲖']), + ('𐳗', &['𐲗']), + ('𐳘', &['𐲘']), + ('𐳙', &['𐲙']), + ('𐳚', &['𐲚']), + ('𐳛', &['𐲛']), + ('𐳜', &['𐲜']), + ('𐳝', &['𐲝']), + ('𐳞', &['𐲞']), + ('𐳟', &['𐲟']), + ('𐳠', &['𐲠']), + ('𐳡', &['𐲡']), + ('𐳢', &['𐲢']), + ('𐳣', &['𐲣']), + ('𐳤', &['𐲤']), + ('𐳥', &['𐲥']), + ('𐳦', &['𐲦']), + ('𐳧', &['𐲧']), + ('𐳨', &['𐲨']), + ('𐳩', &['𐲩']), + ('𐳪', &['𐲪']), + ('𐳫', &['𐲫']), + ('𐳬', &['𐲬']), + ('𐳭', &['𐲭']), + ('𐳮', &['𐲮']), + ('𐳯', &['𐲯']), + ('𐳰', &['𐲰']), + ('𐳱', &['𐲱']), + ('𐳲', &['𐲲']), + ('𐵐', &['𐵰']), + ('𐵑', &['𐵱']), + ('𐵒', &['𐵲']), + ('𐵓', &['𐵳']), + ('𐵔', &['𐵴']), + ('𐵕', &['𐵵']), + ('𐵖', &['𐵶']), + ('𐵗', &['𐵷']), + ('𐵘', &['𐵸']), + ('𐵙', &['𐵹']), + ('𐵚', &['𐵺']), + ('𐵛', &['𐵻']), + ('𐵜', &['𐵼']), + ('𐵝', &['𐵽']), + ('𐵞', &['𐵾']), + ('𐵟', &['𐵿']), + ('𐵠', &['𐶀']), + ('𐵡', &['𐶁']), + ('𐵢', &['𐶂']), + ('𐵣', &['𐶃']), + ('𐵤', &['𐶄']), + ('𐵥', &['𐶅']), + ('𐵰', &['𐵐']), + ('𐵱', &['𐵑']), + ('𐵲', &['𐵒']), + ('𐵳', &['𐵓']), + ('𐵴', &['𐵔']), + ('𐵵', &['𐵕']), + ('𐵶', &['𐵖']), + ('𐵷', &['𐵗']), + ('𐵸', &['𐵘']), + ('𐵹', &['𐵙']), + ('𐵺', &['𐵚']), + ('𐵻', &['𐵛']), + ('𐵼', &['𐵜']), + ('𐵽', &['𐵝']), + ('𐵾', &['𐵞']), + ('𐵿', &['𐵟']), + ('𐶀', &['𐵠']), + ('𐶁', &['𐵡']), + ('𐶂', &['𐵢']), + ('𐶃', &['𐵣']), + ('𐶄', &['𐵤']), + ('𐶅', &['𐵥']), + ('𑢠', &['𑣀']), + ('𑢡', &['𑣁']), + ('𑢢', &['𑣂']), + ('𑢣', &['𑣃']), + ('𑢤', &['𑣄']), + ('𑢥', &['𑣅']), + ('𑢦', &['𑣆']), + ('𑢧', &['𑣇']), + ('𑢨', &['𑣈']), + ('𑢩', &['𑣉']), + ('𑢪', &['𑣊']), + ('𑢫', &['𑣋']), + ('𑢬', &['𑣌']), + ('𑢭', &['𑣍']), + ('𑢮', &['𑣎']), + ('𑢯', &['𑣏']), + ('𑢰', &['𑣐']), + ('𑢱', &['𑣑']), + ('𑢲', &['𑣒']), + ('𑢳', &['𑣓']), + ('𑢴', &['𑣔']), + ('𑢵', &['𑣕']), + ('𑢶', &['𑣖']), + ('𑢷', &['𑣗']), + ('𑢸', &['𑣘']), + ('𑢹', &['𑣙']), + ('𑢺', &['𑣚']), + ('𑢻', &['𑣛']), + ('𑢼', &['𑣜']), + ('𑢽', &['𑣝']), + ('𑢾', &['𑣞']), + ('𑢿', &['𑣟']), + ('𑣀', &['𑢠']), + ('𑣁', &['𑢡']), + ('𑣂', &['𑢢']), + ('𑣃', &['𑢣']), + ('𑣄', &['𑢤']), + ('𑣅', &['𑢥']), + ('𑣆', &['𑢦']), + ('𑣇', &['𑢧']), + ('𑣈', &['𑢨']), + ('𑣉', &['𑢩']), + ('𑣊', &['𑢪']), + ('𑣋', &['𑢫']), + ('𑣌', &['𑢬']), + ('𑣍', &['𑢭']), + ('𑣎', &['𑢮']), + ('𑣏', &['𑢯']), + ('𑣐', &['𑢰']), + ('𑣑', &['𑢱']), + ('𑣒', &['𑢲']), + ('𑣓', &['𑢳']), + ('𑣔', &['𑢴']), + ('𑣕', &['𑢵']), + ('𑣖', &['𑢶']), + ('𑣗', &['𑢷']), + ('𑣘', &['𑢸']), + ('𑣙', &['𑢹']), + ('𑣚', &['𑢺']), + ('𑣛', &['𑢻']), + ('𑣜', &['𑢼']), + ('𑣝', &['𑢽']), + ('𑣞', &['𑢾']), + ('𑣟', &['𑢿']), + ('𖹀', &['𖹠']), + ('𖹁', &['𖹡']), + ('𖹂', &['𖹢']), + ('𖹃', &['𖹣']), + ('𖹄', &['𖹤']), + ('𖹅', &['𖹥']), + ('𖹆', &['𖹦']), + ('𖹇', &['𖹧']), + ('𖹈', &['𖹨']), + ('𖹉', &['𖹩']), + ('𖹊', &['𖹪']), + ('𖹋', &['𖹫']), + ('𖹌', &['𖹬']), + ('𖹍', &['𖹭']), + ('𖹎', &['𖹮']), + ('𖹏', &['𖹯']), + ('𖹐', &['𖹰']), + ('𖹑', &['𖹱']), + ('𖹒', &['𖹲']), + ('𖹓', &['𖹳']), + ('𖹔', &['𖹴']), + ('𖹕', &['𖹵']), + ('𖹖', &['𖹶']), + ('𖹗', &['𖹷']), + ('𖹘', &['𖹸']), + ('𖹙', &['𖹹']), + ('𖹚', &['𖹺']), + ('𖹛', &['𖹻']), + ('𖹜', &['𖹼']), + ('𖹝', &['𖹽']), + ('𖹞', &['𖹾']), + ('𖹟', &['𖹿']), + ('𖹠', &['𖹀']), + ('𖹡', &['𖹁']), + ('𖹢', &['𖹂']), + ('𖹣', &['𖹃']), + ('𖹤', &['𖹄']), + ('𖹥', &['𖹅']), + ('𖹦', &['𖹆']), + ('𖹧', &['𖹇']), + ('𖹨', &['𖹈']), + ('𖹩', &['𖹉']), + ('𖹪', &['𖹊']), + ('𖹫', &['𖹋']), + ('𖹬', &['𖹌']), + ('𖹭', &['𖹍']), + ('𖹮', &['𖹎']), + ('𖹯', &['𖹏']), + ('𖹰', &['𖹐']), + ('𖹱', &['𖹑']), + ('𖹲', &['𖹒']), + ('𖹳', &['𖹓']), + ('𖹴', &['𖹔']), + ('𖹵', &['𖹕']), + ('𖹶', &['𖹖']), + ('𖹷', &['𖹗']), + ('𖹸', &['𖹘']), + ('𖹹', &['𖹙']), + ('𖹺', &['𖹚']), + ('𖹻', &['𖹛']), + ('𖹼', &['𖹜']), + ('𖹽', &['𖹝']), + ('𖹾', &['𖹞']), + ('𖹿', &['𖹟']), + ('𞤀', &['𞤢']), + ('𞤁', &['𞤣']), + ('𞤂', &['𞤤']), + ('𞤃', &['𞤥']), + ('𞤄', &['𞤦']), + ('𞤅', &['𞤧']), + ('𞤆', &['𞤨']), + ('𞤇', &['𞤩']), + ('𞤈', &['𞤪']), + ('𞤉', &['𞤫']), + ('𞤊', &['𞤬']), + ('𞤋', &['𞤭']), + ('𞤌', &['𞤮']), + ('𞤍', &['𞤯']), + ('𞤎', &['𞤰']), + ('𞤏', &['𞤱']), + ('𞤐', &['𞤲']), + ('𞤑', &['𞤳']), + ('𞤒', &['𞤴']), + ('𞤓', &['𞤵']), + ('𞤔', &['𞤶']), + ('𞤕', &['𞤷']), + ('𞤖', &['𞤸']), + ('𞤗', &['𞤹']), + ('𞤘', &['𞤺']), + ('𞤙', &['𞤻']), + ('𞤚', &['𞤼']), + ('𞤛', &['𞤽']), + ('𞤜', &['𞤾']), + ('𞤝', &['𞤿']), + ('𞤞', &['𞥀']), + ('𞤟', &['𞥁']), + ('𞤠', &['𞥂']), + ('𞤡', &['𞥃']), + ('𞤢', &['𞤀']), + ('𞤣', &['𞤁']), + ('𞤤', &['𞤂']), + ('𞤥', &['𞤃']), + ('𞤦', &['𞤄']), + ('𞤧', &['𞤅']), + ('𞤨', &['𞤆']), + ('𞤩', &['𞤇']), + ('𞤪', &['𞤈']), + ('𞤫', &['𞤉']), + ('𞤬', &['𞤊']), + ('𞤭', &['𞤋']), + ('𞤮', &['𞤌']), + ('𞤯', &['𞤍']), + ('𞤰', &['𞤎']), + ('𞤱', &['𞤏']), + ('𞤲', &['𞤐']), + ('𞤳', &['𞤑']), + ('𞤴', &['𞤒']), + ('𞤵', &['𞤓']), + ('𞤶', &['𞤔']), + ('𞤷', &['𞤕']), + ('𞤸', &['𞤖']), + ('𞤹', &['𞤗']), + ('𞤺', &['𞤘']), + ('𞤻', &['𞤙']), + ('𞤼', &['𞤚']), + ('𞤽', &['𞤛']), + ('𞤾', &['𞤜']), + ('𞤿', &['𞤝']), + ('𞥀', &['𞤞']), + ('𞥁', &['𞤟']), + ('𞥂', &['𞤠']), + ('𞥃', &['𞤡']), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/general_category.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/general_category.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ff6b5384db8369de6e76faa615433d8da3e58a0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/general_category.rs @@ -0,0 +1,6717 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate general-category ucd-16.0.0 --chars --exclude surrogate +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Cased_Letter", CASED_LETTER), + ("Close_Punctuation", CLOSE_PUNCTUATION), + ("Connector_Punctuation", CONNECTOR_PUNCTUATION), + ("Control", CONTROL), + ("Currency_Symbol", CURRENCY_SYMBOL), + ("Dash_Punctuation", DASH_PUNCTUATION), + ("Decimal_Number", DECIMAL_NUMBER), + ("Enclosing_Mark", ENCLOSING_MARK), + ("Final_Punctuation", FINAL_PUNCTUATION), + ("Format", FORMAT), + ("Initial_Punctuation", INITIAL_PUNCTUATION), + ("Letter", LETTER), + ("Letter_Number", LETTER_NUMBER), + ("Line_Separator", LINE_SEPARATOR), + ("Lowercase_Letter", LOWERCASE_LETTER), + ("Mark", MARK), + ("Math_Symbol", MATH_SYMBOL), + ("Modifier_Letter", MODIFIER_LETTER), + ("Modifier_Symbol", MODIFIER_SYMBOL), + ("Nonspacing_Mark", NONSPACING_MARK), + ("Number", NUMBER), + ("Open_Punctuation", OPEN_PUNCTUATION), + ("Other", OTHER), + ("Other_Letter", OTHER_LETTER), + ("Other_Number", OTHER_NUMBER), + ("Other_Punctuation", OTHER_PUNCTUATION), + ("Other_Symbol", OTHER_SYMBOL), + ("Paragraph_Separator", PARAGRAPH_SEPARATOR), + ("Private_Use", PRIVATE_USE), + ("Punctuation", PUNCTUATION), + ("Separator", SEPARATOR), + ("Space_Separator", SPACE_SEPARATOR), + ("Spacing_Mark", SPACING_MARK), + ("Symbol", SYMBOL), + ("Titlecase_Letter", TITLECASE_LETTER), + ("Unassigned", UNASSIGNED), + ("Uppercase_Letter", UPPERCASE_LETTER), +]; + +pub const CASED_LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ƺ'), + ('Ƽ', 'ƿ'), + ('DŽ', 'ʓ'), + ('ʕ', 'ʯ'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՠ', 'ֈ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᴀ', 'ᴫ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶚ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ↄ', 'ↄ'), + ('Ⰰ', 'ⱻ'), + ('Ȿ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚛ'), + ('Ꜣ', 'ꝯ'), + ('ꝱ', 'ꞇ'), + ('Ꞌ', 'ꞎ'), + ('Ꞑ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('Ꟶ', 'ꟶ'), + ('ꟺ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭠ', 'ꭨ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞤀', '𞥃'), +]; + +pub const CLOSE_PUNCTUATION: &'static [(char, char)] = &[ + (')', ')'), + (']', ']'), + ('}', '}'), + ('༻', '༻'), + ('༽', '༽'), + ('᚜', '᚜'), + ('⁆', '⁆'), + ('⁾', '⁾'), + ('₎', '₎'), + ('⌉', '⌉'), + ('⌋', '⌋'), + ('〉', '〉'), + ('❩', '❩'), + ('❫', '❫'), + ('❭', '❭'), + ('❯', '❯'), + ('❱', '❱'), + ('❳', '❳'), + ('❵', '❵'), + ('⟆', '⟆'), + ('⟧', '⟧'), + ('⟩', '⟩'), + ('⟫', '⟫'), + ('⟭', '⟭'), + ('⟯', '⟯'), + ('⦄', '⦄'), + ('⦆', '⦆'), + ('⦈', '⦈'), + ('⦊', '⦊'), + ('⦌', '⦌'), + ('⦎', '⦎'), + ('⦐', '⦐'), + ('⦒', '⦒'), + ('⦔', '⦔'), + ('⦖', '⦖'), + ('⦘', '⦘'), + ('⧙', '⧙'), + ('⧛', '⧛'), + ('⧽', '⧽'), + ('⸣', '⸣'), + ('⸥', '⸥'), + ('⸧', '⸧'), + ('⸩', '⸩'), + ('⹖', '⹖'), + ('⹘', '⹘'), + ('⹚', '⹚'), + ('⹜', '⹜'), + ('〉', '〉'), + ('》', '》'), + ('」', '」'), + ('』', '』'), + ('】', '】'), + ('〕', '〕'), + ('〗', '〗'), + ('〙', '〙'), + ('〛', '〛'), + ('〞', '〟'), + ('﴾', '﴾'), + ('︘', '︘'), + ('︶', '︶'), + ('︸', '︸'), + ('︺', '︺'), + ('︼', '︼'), + ('︾', '︾'), + ('﹀', '﹀'), + ('﹂', '﹂'), + ('﹄', '﹄'), + ('﹈', '﹈'), + ('﹚', '﹚'), + ('﹜', '﹜'), + ('﹞', '﹞'), + (')', ')'), + (']', ']'), + ('}', '}'), + ('⦆', '⦆'), + ('」', '」'), +]; + +pub const CONNECTOR_PUNCTUATION: &'static [(char, char)] = &[ + ('_', '_'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('︳', '︴'), + ('﹍', '﹏'), + ('_', '_'), +]; + +pub const CONTROL: &'static [(char, char)] = + &[('\0', '\u{1f}'), ('\u{7f}', '\u{9f}')]; + +pub const CURRENCY_SYMBOL: &'static [(char, char)] = &[ + ('$', '$'), + ('¢', '¥'), + ('֏', '֏'), + ('؋', '؋'), + ('߾', '߿'), + ('৲', '৳'), + ('৻', '৻'), + ('૱', '૱'), + ('௹', '௹'), + ('฿', '฿'), + ('៛', '៛'), + ('₠', '⃀'), + ('꠸', '꠸'), + ('﷼', '﷼'), + ('﹩', '﹩'), + ('$', '$'), + ('¢', '£'), + ('¥', '₩'), + ('𑿝', '𑿠'), + ('𞋿', '𞋿'), + ('𞲰', '𞲰'), +]; + +pub const DASH_PUNCTUATION: &'static [(char, char)] = &[ + ('-', '-'), + ('֊', '֊'), + ('־', '־'), + ('᐀', '᐀'), + ('᠆', '᠆'), + ('‐', '―'), + ('⸗', '⸗'), + ('⸚', '⸚'), + ('⸺', '⸻'), + ('⹀', '⹀'), + ('⹝', '⹝'), + ('〜', '〜'), + ('〰', '〰'), + ('゠', '゠'), + ('︱', '︲'), + ('﹘', '﹘'), + ('﹣', '﹣'), + ('-', '-'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), +]; + +pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧙'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const ENCLOSING_MARK: &'static [(char, char)] = &[ + ('\u{488}', '\u{489}'), + ('\u{1abe}', '\u{1abe}'), + ('\u{20dd}', '\u{20e0}'), + ('\u{20e2}', '\u{20e4}'), + ('\u{a670}', '\u{a672}'), +]; + +pub const FINAL_PUNCTUATION: &'static [(char, char)] = &[ + ('»', '»'), + ('’', '’'), + ('”', '”'), + ('›', '›'), + ('⸃', '⸃'), + ('⸅', '⸅'), + ('⸊', '⸊'), + ('⸍', '⸍'), + ('⸝', '⸝'), + ('⸡', '⸡'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{600}', '\u{605}'), + ('\u{61c}', '\u{61c}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const INITIAL_PUNCTUATION: &'static [(char, char)] = &[ + ('«', '«'), + ('‘', '‘'), + ('‛', '“'), + ('‟', '‟'), + ('‹', '‹'), + ('⸂', '⸂'), + ('⸄', '⸄'), + ('⸉', '⸉'), + ('⸌', '⸌'), + ('⸜', '⸜'), + ('⸠', '⸠'), +]; + +pub const LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛱ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ↄ', 'ↄ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '〆'), + ('〱', '〵'), + ('〻', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛥ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍀'), + ('𐍂', '𐍉'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const LETTER_NUMBER: &'static [(char, char)] = &[ + ('ᛮ', 'ᛰ'), + ('Ⅰ', 'ↂ'), + ('ↅ', 'ↈ'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('ꛦ', 'ꛯ'), + ('𐅀', '𐅴'), + ('𐍁', '𐍁'), + ('𐍊', '𐍊'), + ('𐏑', '𐏕'), + ('𒐀', '𒑮'), +]; + +pub const LINE_SEPARATOR: &'static [(char, char)] = + &[('\u{2028}', '\u{2028}')]; + +pub const LOWERCASE_LETTER: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʯ'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᴫ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶚ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ↄ', 'ↄ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱻ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝱ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꟺ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭠ', 'ꭨ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞤢', '𞥃'), +]; + +pub const MARK: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const MATH_SYMBOL: &'static [(char, char)] = &[ + ('+', '+'), + ('<', '>'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('±', '±'), + ('×', '×'), + ('÷', '÷'), + ('϶', '϶'), + ('؆', '؈'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('⁺', '⁼'), + ('₊', '₌'), + ('℘', '℘'), + ('⅀', '⅄'), + ('⅋', '⅋'), + ('←', '↔'), + ('↚', '↛'), + ('↠', '↠'), + ('↣', '↣'), + ('↦', '↦'), + ('↮', '↮'), + ('⇎', '⇏'), + ('⇒', '⇒'), + ('⇔', '⇔'), + ('⇴', '⋿'), + ('⌠', '⌡'), + ('⍼', '⍼'), + ('⎛', '⎳'), + ('⏜', '⏡'), + ('▷', '▷'), + ('◁', '◁'), + ('◸', '◿'), + ('♯', '♯'), + ('⟀', '⟄'), + ('⟇', '⟥'), + ('⟰', '⟿'), + ('⤀', '⦂'), + ('⦙', '⧗'), + ('⧜', '⧻'), + ('⧾', '⫿'), + ('⬰', '⭄'), + ('⭇', '⭌'), + ('﬩', '﬩'), + ('﹢', '﹢'), + ('﹤', '﹦'), + ('+', '+'), + ('<', '>'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('←', '↓'), + ('𐶎', '𐶏'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), + ('𞻰', '𞻱'), +]; + +pub const MODIFIER_LETTER: &'static [(char, char)] = &[ + ('ʰ', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('ʹ', 'ʹ'), + ('ͺ', 'ͺ'), + ('ՙ', 'ՙ'), + ('ـ', 'ـ'), + ('ۥ', 'ۦ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࣉ', 'ࣉ'), + ('ॱ', 'ॱ'), + ('ๆ', 'ๆ'), + ('ໆ', 'ໆ'), + ('ჼ', 'ჼ'), + ('ៗ', 'ៗ'), + ('ᡃ', 'ᡃ'), + ('ᪧ', 'ᪧ'), + ('ᱸ', 'ᱽ'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', 'ᶿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ⱼ', 'ⱽ'), + ('ⵯ', 'ⵯ'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('〱', '〵'), + ('〻', '〻'), + ('ゝ', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꓸ', 'ꓽ'), + ('ꘌ', 'ꘌ'), + ('ꙿ', 'ꙿ'), + ('ꚜ', 'ꚝ'), + ('ꜗ', 'ꜟ'), + ('ꝰ', 'ꝰ'), + ('ꞈ', 'ꞈ'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('ꧏ', 'ꧏ'), + ('ꧦ', 'ꧦ'), + ('ꩰ', 'ꩰ'), + ('ꫝ', 'ꫝ'), + ('ꫳ', 'ꫴ'), + ('ꭜ', 'ꭟ'), + ('ꭩ', 'ꭩ'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐵎', '𐵎'), + ('𐵯', '𐵯'), + ('𖭀', '𖭃'), + ('𖵀', '𖵂'), + ('𖵫', '𖵬'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𞀰', '𞁭'), + ('𞄷', '𞄽'), + ('𞓫', '𞓫'), + ('𞥋', '𞥋'), +]; + +pub const MODIFIER_SYMBOL: &'static [(char, char)] = &[ + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('¯', '¯'), + ('´', '´'), + ('¸', '¸'), + ('˂', '˅'), + ('˒', '˟'), + ('˥', '˫'), + ('˭', '˭'), + ('˯', '˿'), + ('͵', '͵'), + ('΄', '΅'), + ('࢈', '࢈'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('゛', '゜'), + ('꜀', '꜖'), + ('꜠', '꜡'), + ('꞉', '꞊'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﮲', '﯂'), + ('^', '^'), + ('`', '`'), + (' ̄', ' ̄'), + ('🏻', '🏿'), +]; + +pub const NONSPACING_MARK: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{487}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3f}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b56}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cbf}'), + ('\u{cc6}', '\u{cc6}'), + ('\u{ccc}', '\u{ccd}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1714}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b36}', '\u{1b3a}'), + ('\u{1b3c}', '\u{1b3c}'), + ('\u{1b42}', '\u{1b42}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1ba9}'), + ('\u{1bab}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf1}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302d}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11234}'), + ('\u{11236}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{11340}', '\u{11340}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113ce}', '\u{113ce}'), + ('\u{113d0}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b5}'), + ('\u{116b7}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193b}', '\u{1193c}'), + ('\u{1193e}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f40}'), + ('\u{11f42}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('²', '³'), + ('¹', '¹'), + ('¼', '¾'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('৴', '৹'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('୲', '୷'), + ('௦', '௲'), + ('౦', '౯'), + ('౸', '౾'), + ('೦', '೯'), + ('൘', '൞'), + ('൦', '൸'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༳'), + ('၀', '၉'), + ('႐', '႙'), + ('፩', '፼'), + ('ᛮ', 'ᛰ'), + ('០', '៩'), + ('៰', '៹'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('⁰', '⁰'), + ('⁴', '⁹'), + ('₀', '₉'), + ('⅐', 'ↂ'), + ('ↅ', '↉'), + ('①', '⒛'), + ('⓪', '⓿'), + ('❶', '➓'), + ('⳽', '⳽'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('㆒', '㆕'), + ('㈠', '㈩'), + ('㉈', '㉏'), + ('㉑', '㉟'), + ('㊀', '㊉'), + ('㊱', '㊿'), + ('꘠', '꘩'), + ('ꛦ', 'ꛯ'), + ('꠰', '꠵'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐄇', '𐄳'), + ('𐅀', '𐅸'), + ('𐆊', '𐆋'), + ('𐋡', '𐋻'), + ('𐌠', '𐌣'), + ('𐍁', '𐍁'), + ('𐍊', '𐍊'), + ('𐏑', '𐏕'), + ('𐒠', '𐒩'), + ('𐡘', '𐡟'), + ('𐡹', '𐡿'), + ('𐢧', '𐢯'), + ('𐣻', '𐣿'), + ('𐤖', '𐤛'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐩀', '𐩈'), + ('𐩽', '𐩾'), + ('𐪝', '𐪟'), + ('𐫫', '𐫯'), + ('𐭘', '𐭟'), + ('𐭸', '𐭿'), + ('𐮩', '𐮯'), + ('𐳺', '𐳿'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𐹠', '𐹾'), + ('𐼝', '𐼦'), + ('𐽑', '𐽔'), + ('𐿅', '𐿋'), + ('𑁒', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑇡', '𑇴'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜻'), + ('𑣠', '𑣲'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱬'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𑿀', '𑿔'), + ('𒐀', '𒑮'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖵰', '𖵹'), + ('𖺀', '𖺖'), + ('𜳰', '𜳹'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝍠', '𝍸'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞣇', '𞣏'), + ('𞥐', '𞥙'), + ('𞱱', '𞲫'), + ('𞲭', '𞲯'), + ('𞲱', '𞲴'), + ('𞴁', '𞴭'), + ('𞴯', '𞴽'), + ('🄀', '🄌'), + ('🯰', '🯹'), +]; + +pub const OPEN_PUNCTUATION: &'static [(char, char)] = &[ + ('(', '('), + ('[', '['), + ('{', '{'), + ('༺', '༺'), + ('༼', '༼'), + ('᚛', '᚛'), + ('‚', '‚'), + ('„', '„'), + ('⁅', '⁅'), + ('⁽', '⁽'), + ('₍', '₍'), + ('⌈', '⌈'), + ('⌊', '⌊'), + ('〈', '〈'), + ('❨', '❨'), + ('❪', '❪'), + ('❬', '❬'), + ('❮', '❮'), + ('❰', '❰'), + ('❲', '❲'), + ('❴', '❴'), + ('⟅', '⟅'), + ('⟦', '⟦'), + ('⟨', '⟨'), + ('⟪', '⟪'), + ('⟬', '⟬'), + ('⟮', '⟮'), + ('⦃', '⦃'), + ('⦅', '⦅'), + ('⦇', '⦇'), + ('⦉', '⦉'), + ('⦋', '⦋'), + ('⦍', '⦍'), + ('⦏', '⦏'), + ('⦑', '⦑'), + ('⦓', '⦓'), + ('⦕', '⦕'), + ('⦗', '⦗'), + ('⧘', '⧘'), + ('⧚', '⧚'), + ('⧼', '⧼'), + ('⸢', '⸢'), + ('⸤', '⸤'), + ('⸦', '⸦'), + ('⸨', '⸨'), + ('⹂', '⹂'), + ('⹕', '⹕'), + ('⹗', '⹗'), + ('⹙', '⹙'), + ('⹛', '⹛'), + ('〈', '〈'), + ('《', '《'), + ('「', '「'), + ('『', '『'), + ('【', '【'), + ('〔', '〔'), + ('〖', '〖'), + ('〘', '〘'), + ('〚', '〚'), + ('〝', '〝'), + ('﴿', '﴿'), + ('︗', '︗'), + ('︵', '︵'), + ('︷', '︷'), + ('︹', '︹'), + ('︻', '︻'), + ('︽', '︽'), + ('︿', '︿'), + ('﹁', '﹁'), + ('﹃', '﹃'), + ('﹇', '﹇'), + ('﹙', '﹙'), + ('﹛', '﹛'), + ('﹝', '﹝'), + ('(', '('), + ('[', '['), + ('{', '{'), + ('⦅', '⦅'), + ('「', '「'), +]; + +pub const OTHER: &'static [(char, char)] = &[ + ('\0', '\u{1f}'), + ('\u{7f}', '\u{9f}'), + ('\u{ad}', '\u{ad}'), + ('\u{378}', '\u{379}'), + ('\u{380}', '\u{383}'), + ('\u{38b}', '\u{38b}'), + ('\u{38d}', '\u{38d}'), + ('\u{3a2}', '\u{3a2}'), + ('\u{530}', '\u{530}'), + ('\u{557}', '\u{558}'), + ('\u{58b}', '\u{58c}'), + ('\u{590}', '\u{590}'), + ('\u{5c8}', '\u{5cf}'), + ('\u{5eb}', '\u{5ee}'), + ('\u{5f5}', '\u{605}'), + ('\u{61c}', '\u{61c}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70e}', '\u{70f}'), + ('\u{74b}', '\u{74c}'), + ('\u{7b2}', '\u{7bf}'), + ('\u{7fb}', '\u{7fc}'), + ('\u{82e}', '\u{82f}'), + ('\u{83f}', '\u{83f}'), + ('\u{85c}', '\u{85d}'), + ('\u{85f}', '\u{85f}'), + ('\u{86b}', '\u{86f}'), + ('\u{88f}', '\u{896}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{984}', '\u{984}'), + ('\u{98d}', '\u{98e}'), + ('\u{991}', '\u{992}'), + ('\u{9a9}', '\u{9a9}'), + ('\u{9b1}', '\u{9b1}'), + ('\u{9b3}', '\u{9b5}'), + ('\u{9ba}', '\u{9bb}'), + ('\u{9c5}', '\u{9c6}'), + ('\u{9c9}', '\u{9ca}'), + ('\u{9cf}', '\u{9d6}'), + ('\u{9d8}', '\u{9db}'), + ('\u{9de}', '\u{9de}'), + ('\u{9e4}', '\u{9e5}'), + ('\u{9ff}', '\u{a00}'), + ('\u{a04}', '\u{a04}'), + ('\u{a0b}', '\u{a0e}'), + ('\u{a11}', '\u{a12}'), + ('\u{a29}', '\u{a29}'), + ('\u{a31}', '\u{a31}'), + ('\u{a34}', '\u{a34}'), + ('\u{a37}', '\u{a37}'), + ('\u{a3a}', '\u{a3b}'), + ('\u{a3d}', '\u{a3d}'), + ('\u{a43}', '\u{a46}'), + ('\u{a49}', '\u{a4a}'), + ('\u{a4e}', '\u{a50}'), + ('\u{a52}', '\u{a58}'), + ('\u{a5d}', '\u{a5d}'), + ('\u{a5f}', '\u{a65}'), + ('\u{a77}', '\u{a80}'), + ('\u{a84}', '\u{a84}'), + ('\u{a8e}', '\u{a8e}'), + ('\u{a92}', '\u{a92}'), + ('\u{aa9}', '\u{aa9}'), + ('\u{ab1}', '\u{ab1}'), + ('\u{ab4}', '\u{ab4}'), + ('\u{aba}', '\u{abb}'), + ('\u{ac6}', '\u{ac6}'), + ('\u{aca}', '\u{aca}'), + ('\u{ace}', '\u{acf}'), + ('\u{ad1}', '\u{adf}'), + ('\u{ae4}', '\u{ae5}'), + ('\u{af2}', '\u{af8}'), + ('\u{b00}', '\u{b00}'), + ('\u{b04}', '\u{b04}'), + ('\u{b0d}', '\u{b0e}'), + ('\u{b11}', '\u{b12}'), + ('\u{b29}', '\u{b29}'), + ('\u{b31}', '\u{b31}'), + ('\u{b34}', '\u{b34}'), + ('\u{b3a}', '\u{b3b}'), + ('\u{b45}', '\u{b46}'), + ('\u{b49}', '\u{b4a}'), + ('\u{b4e}', '\u{b54}'), + ('\u{b58}', '\u{b5b}'), + ('\u{b5e}', '\u{b5e}'), + ('\u{b64}', '\u{b65}'), + ('\u{b78}', '\u{b81}'), + ('\u{b84}', '\u{b84}'), + ('\u{b8b}', '\u{b8d}'), + ('\u{b91}', '\u{b91}'), + ('\u{b96}', '\u{b98}'), + ('\u{b9b}', '\u{b9b}'), + ('\u{b9d}', '\u{b9d}'), + ('\u{ba0}', '\u{ba2}'), + ('\u{ba5}', '\u{ba7}'), + ('\u{bab}', '\u{bad}'), + ('\u{bba}', '\u{bbd}'), + ('\u{bc3}', '\u{bc5}'), + ('\u{bc9}', '\u{bc9}'), + ('\u{bce}', '\u{bcf}'), + ('\u{bd1}', '\u{bd6}'), + ('\u{bd8}', '\u{be5}'), + ('\u{bfb}', '\u{bff}'), + ('\u{c0d}', '\u{c0d}'), + ('\u{c11}', '\u{c11}'), + ('\u{c29}', '\u{c29}'), + ('\u{c3a}', '\u{c3b}'), + ('\u{c45}', '\u{c45}'), + ('\u{c49}', '\u{c49}'), + ('\u{c4e}', '\u{c54}'), + ('\u{c57}', '\u{c57}'), + ('\u{c5b}', '\u{c5c}'), + ('\u{c5e}', '\u{c5f}'), + ('\u{c64}', '\u{c65}'), + ('\u{c70}', '\u{c76}'), + ('\u{c8d}', '\u{c8d}'), + ('\u{c91}', '\u{c91}'), + ('\u{ca9}', '\u{ca9}'), + ('\u{cb4}', '\u{cb4}'), + ('\u{cba}', '\u{cbb}'), + ('\u{cc5}', '\u{cc5}'), + ('\u{cc9}', '\u{cc9}'), + ('\u{cce}', '\u{cd4}'), + ('\u{cd7}', '\u{cdc}'), + ('\u{cdf}', '\u{cdf}'), + ('\u{ce4}', '\u{ce5}'), + ('\u{cf0}', '\u{cf0}'), + ('\u{cf4}', '\u{cff}'), + ('\u{d0d}', '\u{d0d}'), + ('\u{d11}', '\u{d11}'), + ('\u{d45}', '\u{d45}'), + ('\u{d49}', '\u{d49}'), + ('\u{d50}', '\u{d53}'), + ('\u{d64}', '\u{d65}'), + ('\u{d80}', '\u{d80}'), + ('\u{d84}', '\u{d84}'), + ('\u{d97}', '\u{d99}'), + ('\u{db2}', '\u{db2}'), + ('\u{dbc}', '\u{dbc}'), + ('\u{dbe}', '\u{dbf}'), + ('\u{dc7}', '\u{dc9}'), + ('\u{dcb}', '\u{dce}'), + ('\u{dd5}', '\u{dd5}'), + ('\u{dd7}', '\u{dd7}'), + ('\u{de0}', '\u{de5}'), + ('\u{df0}', '\u{df1}'), + ('\u{df5}', '\u{e00}'), + ('\u{e3b}', '\u{e3e}'), + ('\u{e5c}', '\u{e80}'), + ('\u{e83}', '\u{e83}'), + ('\u{e85}', '\u{e85}'), + ('\u{e8b}', '\u{e8b}'), + ('\u{ea4}', '\u{ea4}'), + ('\u{ea6}', '\u{ea6}'), + ('\u{ebe}', '\u{ebf}'), + ('\u{ec5}', '\u{ec5}'), + ('\u{ec7}', '\u{ec7}'), + ('\u{ecf}', '\u{ecf}'), + ('\u{eda}', '\u{edb}'), + ('\u{ee0}', '\u{eff}'), + ('\u{f48}', '\u{f48}'), + ('\u{f6d}', '\u{f70}'), + ('\u{f98}', '\u{f98}'), + ('\u{fbd}', '\u{fbd}'), + ('\u{fcd}', '\u{fcd}'), + ('\u{fdb}', '\u{fff}'), + ('\u{10c6}', '\u{10c6}'), + ('\u{10c8}', '\u{10cc}'), + ('\u{10ce}', '\u{10cf}'), + ('\u{1249}', '\u{1249}'), + ('\u{124e}', '\u{124f}'), + ('\u{1257}', '\u{1257}'), + ('\u{1259}', '\u{1259}'), + ('\u{125e}', '\u{125f}'), + ('\u{1289}', '\u{1289}'), + ('\u{128e}', '\u{128f}'), + ('\u{12b1}', '\u{12b1}'), + ('\u{12b6}', '\u{12b7}'), + ('\u{12bf}', '\u{12bf}'), + ('\u{12c1}', '\u{12c1}'), + ('\u{12c6}', '\u{12c7}'), + ('\u{12d7}', '\u{12d7}'), + ('\u{1311}', '\u{1311}'), + ('\u{1316}', '\u{1317}'), + ('\u{135b}', '\u{135c}'), + ('\u{137d}', '\u{137f}'), + ('\u{139a}', '\u{139f}'), + ('\u{13f6}', '\u{13f7}'), + ('\u{13fe}', '\u{13ff}'), + ('\u{169d}', '\u{169f}'), + ('\u{16f9}', '\u{16ff}'), + ('\u{1716}', '\u{171e}'), + ('\u{1737}', '\u{173f}'), + ('\u{1754}', '\u{175f}'), + ('\u{176d}', '\u{176d}'), + ('\u{1771}', '\u{1771}'), + ('\u{1774}', '\u{177f}'), + ('\u{17de}', '\u{17df}'), + ('\u{17ea}', '\u{17ef}'), + ('\u{17fa}', '\u{17ff}'), + ('\u{180e}', '\u{180e}'), + ('\u{181a}', '\u{181f}'), + ('\u{1879}', '\u{187f}'), + ('\u{18ab}', '\u{18af}'), + ('\u{18f6}', '\u{18ff}'), + ('\u{191f}', '\u{191f}'), + ('\u{192c}', '\u{192f}'), + ('\u{193c}', '\u{193f}'), + ('\u{1941}', '\u{1943}'), + ('\u{196e}', '\u{196f}'), + ('\u{1975}', '\u{197f}'), + ('\u{19ac}', '\u{19af}'), + ('\u{19ca}', '\u{19cf}'), + ('\u{19db}', '\u{19dd}'), + ('\u{1a1c}', '\u{1a1d}'), + ('\u{1a5f}', '\u{1a5f}'), + ('\u{1a7d}', '\u{1a7e}'), + ('\u{1a8a}', '\u{1a8f}'), + ('\u{1a9a}', '\u{1a9f}'), + ('\u{1aae}', '\u{1aaf}'), + ('\u{1acf}', '\u{1aff}'), + ('\u{1b4d}', '\u{1b4d}'), + ('\u{1bf4}', '\u{1bfb}'), + ('\u{1c38}', '\u{1c3a}'), + ('\u{1c4a}', '\u{1c4c}'), + ('\u{1c8b}', '\u{1c8f}'), + ('\u{1cbb}', '\u{1cbc}'), + ('\u{1cc8}', '\u{1ccf}'), + ('\u{1cfb}', '\u{1cff}'), + ('\u{1f16}', '\u{1f17}'), + ('\u{1f1e}', '\u{1f1f}'), + ('\u{1f46}', '\u{1f47}'), + ('\u{1f4e}', '\u{1f4f}'), + ('\u{1f58}', '\u{1f58}'), + ('\u{1f5a}', '\u{1f5a}'), + ('\u{1f5c}', '\u{1f5c}'), + ('\u{1f5e}', '\u{1f5e}'), + ('\u{1f7e}', '\u{1f7f}'), + ('\u{1fb5}', '\u{1fb5}'), + ('\u{1fc5}', '\u{1fc5}'), + ('\u{1fd4}', '\u{1fd5}'), + ('\u{1fdc}', '\u{1fdc}'), + ('\u{1ff0}', '\u{1ff1}'), + ('\u{1ff5}', '\u{1ff5}'), + ('\u{1fff}', '\u{1fff}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('\u{2072}', '\u{2073}'), + ('\u{208f}', '\u{208f}'), + ('\u{209d}', '\u{209f}'), + ('\u{20c1}', '\u{20cf}'), + ('\u{20f1}', '\u{20ff}'), + ('\u{218c}', '\u{218f}'), + ('\u{242a}', '\u{243f}'), + ('\u{244b}', '\u{245f}'), + ('\u{2b74}', '\u{2b75}'), + ('\u{2b96}', '\u{2b96}'), + ('\u{2cf4}', '\u{2cf8}'), + ('\u{2d26}', '\u{2d26}'), + ('\u{2d28}', '\u{2d2c}'), + ('\u{2d2e}', '\u{2d2f}'), + ('\u{2d68}', '\u{2d6e}'), + ('\u{2d71}', '\u{2d7e}'), + ('\u{2d97}', '\u{2d9f}'), + ('\u{2da7}', '\u{2da7}'), + ('\u{2daf}', '\u{2daf}'), + ('\u{2db7}', '\u{2db7}'), + ('\u{2dbf}', '\u{2dbf}'), + ('\u{2dc7}', '\u{2dc7}'), + ('\u{2dcf}', '\u{2dcf}'), + ('\u{2dd7}', '\u{2dd7}'), + ('\u{2ddf}', '\u{2ddf}'), + ('\u{2e5e}', '\u{2e7f}'), + ('\u{2e9a}', '\u{2e9a}'), + ('\u{2ef4}', '\u{2eff}'), + ('\u{2fd6}', '\u{2fef}'), + ('\u{3040}', '\u{3040}'), + ('\u{3097}', '\u{3098}'), + ('\u{3100}', '\u{3104}'), + ('\u{3130}', '\u{3130}'), + ('\u{318f}', '\u{318f}'), + ('\u{31e6}', '\u{31ee}'), + ('\u{321f}', '\u{321f}'), + ('\u{a48d}', '\u{a48f}'), + ('\u{a4c7}', '\u{a4cf}'), + ('\u{a62c}', '\u{a63f}'), + ('\u{a6f8}', '\u{a6ff}'), + ('\u{a7ce}', '\u{a7cf}'), + ('\u{a7d2}', '\u{a7d2}'), + ('\u{a7d4}', '\u{a7d4}'), + ('\u{a7dd}', '\u{a7f1}'), + ('\u{a82d}', '\u{a82f}'), + ('\u{a83a}', '\u{a83f}'), + ('\u{a878}', '\u{a87f}'), + ('\u{a8c6}', '\u{a8cd}'), + ('\u{a8da}', '\u{a8df}'), + ('\u{a954}', '\u{a95e}'), + ('\u{a97d}', '\u{a97f}'), + ('\u{a9ce}', '\u{a9ce}'), + ('\u{a9da}', '\u{a9dd}'), + ('\u{a9ff}', '\u{a9ff}'), + ('\u{aa37}', '\u{aa3f}'), + ('\u{aa4e}', '\u{aa4f}'), + ('\u{aa5a}', '\u{aa5b}'), + ('\u{aac3}', '\u{aada}'), + ('\u{aaf7}', '\u{ab00}'), + ('\u{ab07}', '\u{ab08}'), + ('\u{ab0f}', '\u{ab10}'), + ('\u{ab17}', '\u{ab1f}'), + ('\u{ab27}', '\u{ab27}'), + ('\u{ab2f}', '\u{ab2f}'), + ('\u{ab6c}', '\u{ab6f}'), + ('\u{abee}', '\u{abef}'), + ('\u{abfa}', '\u{abff}'), + ('\u{d7a4}', '\u{d7af}'), + ('\u{d7c7}', '\u{d7ca}'), + ('\u{d7fc}', '\u{f8ff}'), + ('\u{fa6e}', '\u{fa6f}'), + ('\u{fada}', '\u{faff}'), + ('\u{fb07}', '\u{fb12}'), + ('\u{fb18}', '\u{fb1c}'), + ('\u{fb37}', '\u{fb37}'), + ('\u{fb3d}', '\u{fb3d}'), + ('\u{fb3f}', '\u{fb3f}'), + ('\u{fb42}', '\u{fb42}'), + ('\u{fb45}', '\u{fb45}'), + ('\u{fbc3}', '\u{fbd2}'), + ('\u{fd90}', '\u{fd91}'), + ('\u{fdc8}', '\u{fdce}'), + ('\u{fdd0}', '\u{fdef}'), + ('\u{fe1a}', '\u{fe1f}'), + ('\u{fe53}', '\u{fe53}'), + ('\u{fe67}', '\u{fe67}'), + ('\u{fe6c}', '\u{fe6f}'), + ('\u{fe75}', '\u{fe75}'), + ('\u{fefd}', '\u{ff00}'), + ('\u{ffbf}', '\u{ffc1}'), + ('\u{ffc8}', '\u{ffc9}'), + ('\u{ffd0}', '\u{ffd1}'), + ('\u{ffd8}', '\u{ffd9}'), + ('\u{ffdd}', '\u{ffdf}'), + ('\u{ffe7}', '\u{ffe7}'), + ('\u{ffef}', '\u{fffb}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1000c}', '\u{1000c}'), + ('\u{10027}', '\u{10027}'), + ('\u{1003b}', '\u{1003b}'), + ('\u{1003e}', '\u{1003e}'), + ('\u{1004e}', '\u{1004f}'), + ('\u{1005e}', '\u{1007f}'), + ('\u{100fb}', '\u{100ff}'), + ('\u{10103}', '\u{10106}'), + ('\u{10134}', '\u{10136}'), + ('\u{1018f}', '\u{1018f}'), + ('\u{1019d}', '\u{1019f}'), + ('\u{101a1}', '\u{101cf}'), + ('\u{101fe}', '\u{1027f}'), + ('\u{1029d}', '\u{1029f}'), + ('\u{102d1}', '\u{102df}'), + ('\u{102fc}', '\u{102ff}'), + ('\u{10324}', '\u{1032c}'), + ('\u{1034b}', '\u{1034f}'), + ('\u{1037b}', '\u{1037f}'), + ('\u{1039e}', '\u{1039e}'), + ('\u{103c4}', '\u{103c7}'), + ('\u{103d6}', '\u{103ff}'), + ('\u{1049e}', '\u{1049f}'), + ('\u{104aa}', '\u{104af}'), + ('\u{104d4}', '\u{104d7}'), + ('\u{104fc}', '\u{104ff}'), + ('\u{10528}', '\u{1052f}'), + ('\u{10564}', '\u{1056e}'), + ('\u{1057b}', '\u{1057b}'), + ('\u{1058b}', '\u{1058b}'), + ('\u{10593}', '\u{10593}'), + ('\u{10596}', '\u{10596}'), + ('\u{105a2}', '\u{105a2}'), + ('\u{105b2}', '\u{105b2}'), + ('\u{105ba}', '\u{105ba}'), + ('\u{105bd}', '\u{105bf}'), + ('\u{105f4}', '\u{105ff}'), + ('\u{10737}', '\u{1073f}'), + ('\u{10756}', '\u{1075f}'), + ('\u{10768}', '\u{1077f}'), + ('\u{10786}', '\u{10786}'), + ('\u{107b1}', '\u{107b1}'), + ('\u{107bb}', '\u{107ff}'), + ('\u{10806}', '\u{10807}'), + ('\u{10809}', '\u{10809}'), + ('\u{10836}', '\u{10836}'), + ('\u{10839}', '\u{1083b}'), + ('\u{1083d}', '\u{1083e}'), + ('\u{10856}', '\u{10856}'), + ('\u{1089f}', '\u{108a6}'), + ('\u{108b0}', '\u{108df}'), + ('\u{108f3}', '\u{108f3}'), + ('\u{108f6}', '\u{108fa}'), + ('\u{1091c}', '\u{1091e}'), + ('\u{1093a}', '\u{1093e}'), + ('\u{10940}', '\u{1097f}'), + ('\u{109b8}', '\u{109bb}'), + ('\u{109d0}', '\u{109d1}'), + ('\u{10a04}', '\u{10a04}'), + ('\u{10a07}', '\u{10a0b}'), + ('\u{10a14}', '\u{10a14}'), + ('\u{10a18}', '\u{10a18}'), + ('\u{10a36}', '\u{10a37}'), + ('\u{10a3b}', '\u{10a3e}'), + ('\u{10a49}', '\u{10a4f}'), + ('\u{10a59}', '\u{10a5f}'), + ('\u{10aa0}', '\u{10abf}'), + ('\u{10ae7}', '\u{10aea}'), + ('\u{10af7}', '\u{10aff}'), + ('\u{10b36}', '\u{10b38}'), + ('\u{10b56}', '\u{10b57}'), + ('\u{10b73}', '\u{10b77}'), + ('\u{10b92}', '\u{10b98}'), + ('\u{10b9d}', '\u{10ba8}'), + ('\u{10bb0}', '\u{10bff}'), + ('\u{10c49}', '\u{10c7f}'), + ('\u{10cb3}', '\u{10cbf}'), + ('\u{10cf3}', '\u{10cf9}'), + ('\u{10d28}', '\u{10d2f}'), + ('\u{10d3a}', '\u{10d3f}'), + ('\u{10d66}', '\u{10d68}'), + ('\u{10d86}', '\u{10d8d}'), + ('\u{10d90}', '\u{10e5f}'), + ('\u{10e7f}', '\u{10e7f}'), + ('\u{10eaa}', '\u{10eaa}'), + ('\u{10eae}', '\u{10eaf}'), + ('\u{10eb2}', '\u{10ec1}'), + ('\u{10ec5}', '\u{10efb}'), + ('\u{10f28}', '\u{10f2f}'), + ('\u{10f5a}', '\u{10f6f}'), + ('\u{10f8a}', '\u{10faf}'), + ('\u{10fcc}', '\u{10fdf}'), + ('\u{10ff7}', '\u{10fff}'), + ('\u{1104e}', '\u{11051}'), + ('\u{11076}', '\u{1107e}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110c3}', '\u{110cf}'), + ('\u{110e9}', '\u{110ef}'), + ('\u{110fa}', '\u{110ff}'), + ('\u{11135}', '\u{11135}'), + ('\u{11148}', '\u{1114f}'), + ('\u{11177}', '\u{1117f}'), + ('\u{111e0}', '\u{111e0}'), + ('\u{111f5}', '\u{111ff}'), + ('\u{11212}', '\u{11212}'), + ('\u{11242}', '\u{1127f}'), + ('\u{11287}', '\u{11287}'), + ('\u{11289}', '\u{11289}'), + ('\u{1128e}', '\u{1128e}'), + ('\u{1129e}', '\u{1129e}'), + ('\u{112aa}', '\u{112af}'), + ('\u{112eb}', '\u{112ef}'), + ('\u{112fa}', '\u{112ff}'), + ('\u{11304}', '\u{11304}'), + ('\u{1130d}', '\u{1130e}'), + ('\u{11311}', '\u{11312}'), + ('\u{11329}', '\u{11329}'), + ('\u{11331}', '\u{11331}'), + ('\u{11334}', '\u{11334}'), + ('\u{1133a}', '\u{1133a}'), + ('\u{11345}', '\u{11346}'), + ('\u{11349}', '\u{1134a}'), + ('\u{1134e}', '\u{1134f}'), + ('\u{11351}', '\u{11356}'), + ('\u{11358}', '\u{1135c}'), + ('\u{11364}', '\u{11365}'), + ('\u{1136d}', '\u{1136f}'), + ('\u{11375}', '\u{1137f}'), + ('\u{1138a}', '\u{1138a}'), + ('\u{1138c}', '\u{1138d}'), + ('\u{1138f}', '\u{1138f}'), + ('\u{113b6}', '\u{113b6}'), + ('\u{113c1}', '\u{113c1}'), + ('\u{113c3}', '\u{113c4}'), + ('\u{113c6}', '\u{113c6}'), + ('\u{113cb}', '\u{113cb}'), + ('\u{113d6}', '\u{113d6}'), + ('\u{113d9}', '\u{113e0}'), + ('\u{113e3}', '\u{113ff}'), + ('\u{1145c}', '\u{1145c}'), + ('\u{11462}', '\u{1147f}'), + ('\u{114c8}', '\u{114cf}'), + ('\u{114da}', '\u{1157f}'), + ('\u{115b6}', '\u{115b7}'), + ('\u{115de}', '\u{115ff}'), + ('\u{11645}', '\u{1164f}'), + ('\u{1165a}', '\u{1165f}'), + ('\u{1166d}', '\u{1167f}'), + ('\u{116ba}', '\u{116bf}'), + ('\u{116ca}', '\u{116cf}'), + ('\u{116e4}', '\u{116ff}'), + ('\u{1171b}', '\u{1171c}'), + ('\u{1172c}', '\u{1172f}'), + ('\u{11747}', '\u{117ff}'), + ('\u{1183c}', '\u{1189f}'), + ('\u{118f3}', '\u{118fe}'), + ('\u{11907}', '\u{11908}'), + ('\u{1190a}', '\u{1190b}'), + ('\u{11914}', '\u{11914}'), + ('\u{11917}', '\u{11917}'), + ('\u{11936}', '\u{11936}'), + ('\u{11939}', '\u{1193a}'), + ('\u{11947}', '\u{1194f}'), + ('\u{1195a}', '\u{1199f}'), + ('\u{119a8}', '\u{119a9}'), + ('\u{119d8}', '\u{119d9}'), + ('\u{119e5}', '\u{119ff}'), + ('\u{11a48}', '\u{11a4f}'), + ('\u{11aa3}', '\u{11aaf}'), + ('\u{11af9}', '\u{11aff}'), + ('\u{11b0a}', '\u{11bbf}'), + ('\u{11be2}', '\u{11bef}'), + ('\u{11bfa}', '\u{11bff}'), + ('\u{11c09}', '\u{11c09}'), + ('\u{11c37}', '\u{11c37}'), + ('\u{11c46}', '\u{11c4f}'), + ('\u{11c6d}', '\u{11c6f}'), + ('\u{11c90}', '\u{11c91}'), + ('\u{11ca8}', '\u{11ca8}'), + ('\u{11cb7}', '\u{11cff}'), + ('\u{11d07}', '\u{11d07}'), + ('\u{11d0a}', '\u{11d0a}'), + ('\u{11d37}', '\u{11d39}'), + ('\u{11d3b}', '\u{11d3b}'), + ('\u{11d3e}', '\u{11d3e}'), + ('\u{11d48}', '\u{11d4f}'), + ('\u{11d5a}', '\u{11d5f}'), + ('\u{11d66}', '\u{11d66}'), + ('\u{11d69}', '\u{11d69}'), + ('\u{11d8f}', '\u{11d8f}'), + ('\u{11d92}', '\u{11d92}'), + ('\u{11d99}', '\u{11d9f}'), + ('\u{11daa}', '\u{11edf}'), + ('\u{11ef9}', '\u{11eff}'), + ('\u{11f11}', '\u{11f11}'), + ('\u{11f3b}', '\u{11f3d}'), + ('\u{11f5b}', '\u{11faf}'), + ('\u{11fb1}', '\u{11fbf}'), + ('\u{11ff2}', '\u{11ffe}'), + ('\u{1239a}', '\u{123ff}'), + ('\u{1246f}', '\u{1246f}'), + ('\u{12475}', '\u{1247f}'), + ('\u{12544}', '\u{12f8f}'), + ('\u{12ff3}', '\u{12fff}'), + ('\u{13430}', '\u{1343f}'), + ('\u{13456}', '\u{1345f}'), + ('\u{143fb}', '\u{143ff}'), + ('\u{14647}', '\u{160ff}'), + ('\u{1613a}', '\u{167ff}'), + ('\u{16a39}', '\u{16a3f}'), + ('\u{16a5f}', '\u{16a5f}'), + ('\u{16a6a}', '\u{16a6d}'), + ('\u{16abf}', '\u{16abf}'), + ('\u{16aca}', '\u{16acf}'), + ('\u{16aee}', '\u{16aef}'), + ('\u{16af6}', '\u{16aff}'), + ('\u{16b46}', '\u{16b4f}'), + ('\u{16b5a}', '\u{16b5a}'), + ('\u{16b62}', '\u{16b62}'), + ('\u{16b78}', '\u{16b7c}'), + ('\u{16b90}', '\u{16d3f}'), + ('\u{16d7a}', '\u{16e3f}'), + ('\u{16e9b}', '\u{16eff}'), + ('\u{16f4b}', '\u{16f4e}'), + ('\u{16f88}', '\u{16f8e}'), + ('\u{16fa0}', '\u{16fdf}'), + ('\u{16fe5}', '\u{16fef}'), + ('\u{16ff2}', '\u{16fff}'), + ('\u{187f8}', '\u{187ff}'), + ('\u{18cd6}', '\u{18cfe}'), + ('\u{18d09}', '\u{1afef}'), + ('\u{1aff4}', '\u{1aff4}'), + ('\u{1affc}', '\u{1affc}'), + ('\u{1afff}', '\u{1afff}'), + ('\u{1b123}', '\u{1b131}'), + ('\u{1b133}', '\u{1b14f}'), + ('\u{1b153}', '\u{1b154}'), + ('\u{1b156}', '\u{1b163}'), + ('\u{1b168}', '\u{1b16f}'), + ('\u{1b2fc}', '\u{1bbff}'), + ('\u{1bc6b}', '\u{1bc6f}'), + ('\u{1bc7d}', '\u{1bc7f}'), + ('\u{1bc89}', '\u{1bc8f}'), + ('\u{1bc9a}', '\u{1bc9b}'), + ('\u{1bca0}', '\u{1cbff}'), + ('\u{1ccfa}', '\u{1ccff}'), + ('\u{1ceb4}', '\u{1ceff}'), + ('\u{1cf2e}', '\u{1cf2f}'), + ('\u{1cf47}', '\u{1cf4f}'), + ('\u{1cfc4}', '\u{1cfff}'), + ('\u{1d0f6}', '\u{1d0ff}'), + ('\u{1d127}', '\u{1d128}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{1d1eb}', '\u{1d1ff}'), + ('\u{1d246}', '\u{1d2bf}'), + ('\u{1d2d4}', '\u{1d2df}'), + ('\u{1d2f4}', '\u{1d2ff}'), + ('\u{1d357}', '\u{1d35f}'), + ('\u{1d379}', '\u{1d3ff}'), + ('\u{1d455}', '\u{1d455}'), + ('\u{1d49d}', '\u{1d49d}'), + ('\u{1d4a0}', '\u{1d4a1}'), + ('\u{1d4a3}', '\u{1d4a4}'), + ('\u{1d4a7}', '\u{1d4a8}'), + ('\u{1d4ad}', '\u{1d4ad}'), + ('\u{1d4ba}', '\u{1d4ba}'), + ('\u{1d4bc}', '\u{1d4bc}'), + ('\u{1d4c4}', '\u{1d4c4}'), + ('\u{1d506}', '\u{1d506}'), + ('\u{1d50b}', '\u{1d50c}'), + ('\u{1d515}', '\u{1d515}'), + ('\u{1d51d}', '\u{1d51d}'), + ('\u{1d53a}', '\u{1d53a}'), + ('\u{1d53f}', '\u{1d53f}'), + ('\u{1d545}', '\u{1d545}'), + ('\u{1d547}', '\u{1d549}'), + ('\u{1d551}', '\u{1d551}'), + ('\u{1d6a6}', '\u{1d6a7}'), + ('\u{1d7cc}', '\u{1d7cd}'), + ('\u{1da8c}', '\u{1da9a}'), + ('\u{1daa0}', '\u{1daa0}'), + ('\u{1dab0}', '\u{1deff}'), + ('\u{1df1f}', '\u{1df24}'), + ('\u{1df2b}', '\u{1dfff}'), + ('\u{1e007}', '\u{1e007}'), + ('\u{1e019}', '\u{1e01a}'), + ('\u{1e022}', '\u{1e022}'), + ('\u{1e025}', '\u{1e025}'), + ('\u{1e02b}', '\u{1e02f}'), + ('\u{1e06e}', '\u{1e08e}'), + ('\u{1e090}', '\u{1e0ff}'), + ('\u{1e12d}', '\u{1e12f}'), + ('\u{1e13e}', '\u{1e13f}'), + ('\u{1e14a}', '\u{1e14d}'), + ('\u{1e150}', '\u{1e28f}'), + ('\u{1e2af}', '\u{1e2bf}'), + ('\u{1e2fa}', '\u{1e2fe}'), + ('\u{1e300}', '\u{1e4cf}'), + ('\u{1e4fa}', '\u{1e5cf}'), + ('\u{1e5fb}', '\u{1e5fe}'), + ('\u{1e600}', '\u{1e7df}'), + ('\u{1e7e7}', '\u{1e7e7}'), + ('\u{1e7ec}', '\u{1e7ec}'), + ('\u{1e7ef}', '\u{1e7ef}'), + ('\u{1e7ff}', '\u{1e7ff}'), + ('\u{1e8c5}', '\u{1e8c6}'), + ('\u{1e8d7}', '\u{1e8ff}'), + ('\u{1e94c}', '\u{1e94f}'), + ('\u{1e95a}', '\u{1e95d}'), + ('\u{1e960}', '\u{1ec70}'), + ('\u{1ecb5}', '\u{1ed00}'), + ('\u{1ed3e}', '\u{1edff}'), + ('\u{1ee04}', '\u{1ee04}'), + ('\u{1ee20}', '\u{1ee20}'), + ('\u{1ee23}', '\u{1ee23}'), + ('\u{1ee25}', '\u{1ee26}'), + ('\u{1ee28}', '\u{1ee28}'), + ('\u{1ee33}', '\u{1ee33}'), + ('\u{1ee38}', '\u{1ee38}'), + ('\u{1ee3a}', '\u{1ee3a}'), + ('\u{1ee3c}', '\u{1ee41}'), + ('\u{1ee43}', '\u{1ee46}'), + ('\u{1ee48}', '\u{1ee48}'), + ('\u{1ee4a}', '\u{1ee4a}'), + ('\u{1ee4c}', '\u{1ee4c}'), + ('\u{1ee50}', '\u{1ee50}'), + ('\u{1ee53}', '\u{1ee53}'), + ('\u{1ee55}', '\u{1ee56}'), + ('\u{1ee58}', '\u{1ee58}'), + ('\u{1ee5a}', '\u{1ee5a}'), + ('\u{1ee5c}', '\u{1ee5c}'), + ('\u{1ee5e}', '\u{1ee5e}'), + ('\u{1ee60}', '\u{1ee60}'), + ('\u{1ee63}', '\u{1ee63}'), + ('\u{1ee65}', '\u{1ee66}'), + ('\u{1ee6b}', '\u{1ee6b}'), + ('\u{1ee73}', '\u{1ee73}'), + ('\u{1ee78}', '\u{1ee78}'), + ('\u{1ee7d}', '\u{1ee7d}'), + ('\u{1ee7f}', '\u{1ee7f}'), + ('\u{1ee8a}', '\u{1ee8a}'), + ('\u{1ee9c}', '\u{1eea0}'), + ('\u{1eea4}', '\u{1eea4}'), + ('\u{1eeaa}', '\u{1eeaa}'), + ('\u{1eebc}', '\u{1eeef}'), + ('\u{1eef2}', '\u{1efff}'), + ('\u{1f02c}', '\u{1f02f}'), + ('\u{1f094}', '\u{1f09f}'), + ('\u{1f0af}', '\u{1f0b0}'), + ('\u{1f0c0}', '\u{1f0c0}'), + ('\u{1f0d0}', '\u{1f0d0}'), + ('\u{1f0f6}', '\u{1f0ff}'), + ('\u{1f1ae}', '\u{1f1e5}'), + ('\u{1f203}', '\u{1f20f}'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '\u{1f24f}'), + ('\u{1f252}', '\u{1f25f}'), + ('\u{1f266}', '\u{1f2ff}'), + ('\u{1f6d8}', '\u{1f6db}'), + ('\u{1f6ed}', '\u{1f6ef}'), + ('\u{1f6fd}', '\u{1f6ff}'), + ('\u{1f777}', '\u{1f77a}'), + ('\u{1f7da}', '\u{1f7df}'), + ('\u{1f7ec}', '\u{1f7ef}'), + ('\u{1f7f1}', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8af}'), + ('\u{1f8bc}', '\u{1f8bf}'), + ('\u{1f8c2}', '\u{1f8ff}'), + ('\u{1fa54}', '\u{1fa5f}'), + ('\u{1fa6e}', '\u{1fa6f}'), + ('\u{1fa7d}', '\u{1fa7f}'), + ('\u{1fa8a}', '\u{1fa8e}'), + ('\u{1fac7}', '\u{1facd}'), + ('\u{1fadd}', '\u{1fade}'), + ('\u{1faea}', '\u{1faef}'), + ('\u{1faf9}', '\u{1faff}'), + ('\u{1fb93}', '\u{1fb93}'), + ('\u{1fbfa}', '\u{1ffff}'), + ('\u{2a6e0}', '\u{2a6ff}'), + ('\u{2b73a}', '\u{2b73f}'), + ('\u{2b81e}', '\u{2b81f}'), + ('\u{2cea2}', '\u{2ceaf}'), + ('\u{2ebe1}', '\u{2ebef}'), + ('\u{2ee5e}', '\u{2f7ff}'), + ('\u{2fa1e}', '\u{2ffff}'), + ('\u{3134b}', '\u{3134f}'), + ('\u{323b0}', '\u{e00ff}'), + ('\u{e01f0}', '\u{10ffff}'), +]; + +pub const OTHER_LETTER: &'static [(char, char)] = &[ + ('ª', 'ª'), + ('º', 'º'), + ('ƻ', 'ƻ'), + ('ǀ', 'ǃ'), + ('ʔ', 'ʔ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ؿ'), + ('ف', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ࠀ', 'ࠕ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣈ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॲ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๅ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('ᄀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛱ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡂ'), + ('ᡄ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱷ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ℵ', 'ℸ'), + ('ⴰ', 'ⵧ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('〆', '〆'), + ('〼', '〼'), + ('ぁ', 'ゖ'), + ('ゟ', 'ゟ'), + ('ァ', 'ヺ'), + ('ヿ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꀔ'), + ('ꀖ', 'ꒌ'), + ('ꓐ', 'ꓷ'), + ('ꔀ', 'ꘋ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('ꙮ', 'ꙮ'), + ('ꚠ', 'ꛥ'), + ('ꞏ', 'ꞏ'), + ('ꟷ', 'ꟷ'), + ('ꟻ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧠ', 'ꧤ'), + ('ꧧ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩯ'), + ('ꩱ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫜ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫲ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꯀ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('ヲ', 'ッ'), + ('ア', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍀'), + ('𐍂', '𐍉'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐑐', '𐒝'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐴀', '𐴣'), + ('𐵊', '𐵍'), + ('𐵏', '𐵏'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵃', '𖵪'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝼊', '𝼊'), + ('𞄀', '𞄬'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓪'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const OTHER_NUMBER: &'static [(char, char)] = &[ + ('²', '³'), + ('¹', '¹'), + ('¼', '¾'), + ('৴', '৹'), + ('୲', '୷'), + ('௰', '௲'), + ('౸', '౾'), + ('൘', '൞'), + ('൰', '൸'), + ('༪', '༳'), + ('፩', '፼'), + ('៰', '៹'), + ('᧚', '᧚'), + ('⁰', '⁰'), + ('⁴', '⁹'), + ('₀', '₉'), + ('⅐', '⅟'), + ('↉', '↉'), + ('①', '⒛'), + ('⓪', '⓿'), + ('❶', '➓'), + ('⳽', '⳽'), + ('㆒', '㆕'), + ('㈠', '㈩'), + ('㉈', '㉏'), + ('㉑', '㉟'), + ('㊀', '㊉'), + ('㊱', '㊿'), + ('꠰', '꠵'), + ('𐄇', '𐄳'), + ('𐅵', '𐅸'), + ('𐆊', '𐆋'), + ('𐋡', '𐋻'), + ('𐌠', '𐌣'), + ('𐡘', '𐡟'), + ('𐡹', '𐡿'), + ('𐢧', '𐢯'), + ('𐣻', '𐣿'), + ('𐤖', '𐤛'), + ('𐦼', '𐦽'), + ('𐧀', '𐧏'), + ('𐧒', '𐧿'), + ('𐩀', '𐩈'), + ('𐩽', '𐩾'), + ('𐪝', '𐪟'), + ('𐫫', '𐫯'), + ('𐭘', '𐭟'), + ('𐭸', '𐭿'), + ('𐮩', '𐮯'), + ('𐳺', '𐳿'), + ('𐹠', '𐹾'), + ('𐼝', '𐼦'), + ('𐽑', '𐽔'), + ('𐿅', '𐿋'), + ('𑁒', '𑁥'), + ('𑇡', '𑇴'), + ('𑜺', '𑜻'), + ('𑣪', '𑣲'), + ('𑱚', '𑱬'), + ('𑿀', '𑿔'), + ('𖭛', '𖭡'), + ('𖺀', '𖺖'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝍠', '𝍸'), + ('𞣇', '𞣏'), + ('𞱱', '𞲫'), + ('𞲭', '𞲯'), + ('𞲱', '𞲴'), + ('𞴁', '𞴭'), + ('𞴯', '𞴽'), + ('🄀', '🄌'), +]; + +pub const OTHER_PUNCTUATION: &'static [(char, char)] = &[ + ('!', '#'), + ('%', '\''), + ('*', '*'), + (',', ','), + ('.', '/'), + (':', ';'), + ('?', '@'), + ('\\', '\\'), + ('¡', '¡'), + ('§', '§'), + ('¶', '·'), + ('¿', '¿'), + (';', ';'), + ('·', '·'), + ('՚', '՟'), + ('։', '։'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('׳', '״'), + ('؉', '؊'), + ('،', '؍'), + ('؛', '؛'), + ('؝', '؟'), + ('٪', '٭'), + ('۔', '۔'), + ('܀', '܍'), + ('߷', '߹'), + ('࠰', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('॰', '॰'), + ('৽', '৽'), + ('੶', '੶'), + ('૰', '૰'), + ('౷', '౷'), + ('಄', '಄'), + ('෴', '෴'), + ('๏', '๏'), + ('๚', '๛'), + ('༄', '༒'), + ('༔', '༔'), + ('྅', '྅'), + ('࿐', '࿔'), + ('࿙', '࿚'), + ('၊', '၏'), + ('჻', '჻'), + ('፠', '፨'), + ('᙮', '᙮'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៘', '៚'), + ('᠀', '᠅'), + ('᠇', '᠊'), + ('᥄', '᥅'), + ('᨞', '᨟'), + ('᪠', '᪦'), + ('᪨', '᪭'), + ('᭎', '᭏'), + ('᭚', '᭠'), + ('᭽', '᭿'), + ('᯼', '᯿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('᳀', '᳇'), + ('᳓', '᳓'), + ('‖', '‗'), + ('†', '‧'), + ('‰', '‸'), + ('※', '‾'), + ('⁁', '⁃'), + ('⁇', '⁑'), + ('⁓', '⁓'), + ('⁕', '⁞'), + ('⳹', '⳼'), + ('⳾', '⳿'), + ('⵰', '⵰'), + ('⸀', '⸁'), + ('⸆', '⸈'), + ('⸋', '⸋'), + ('⸎', '⸖'), + ('⸘', '⸙'), + ('⸛', '⸛'), + ('⸞', '⸟'), + ('⸪', '⸮'), + ('⸰', '⸹'), + ('⸼', '⸿'), + ('⹁', '⹁'), + ('⹃', '⹏'), + ('⹒', '⹔'), + ('、', '〃'), + ('〽', '〽'), + ('・', '・'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꙳', '꙳'), + ('꙾', '꙾'), + ('꛲', '꛷'), + ('꡴', '꡷'), + ('꣎', '꣏'), + ('꣸', '꣺'), + ('꣼', '꣼'), + ('꤮', '꤯'), + ('꥟', '꥟'), + ('꧁', '꧍'), + ('꧞', '꧟'), + ('꩜', '꩟'), + ('꫞', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︐', '︖'), + ('︙', '︙'), + ('︰', '︰'), + ('﹅', '﹆'), + ('﹉', '﹌'), + ('﹐', '﹒'), + ('﹔', '﹗'), + ('﹟', '﹡'), + ('﹨', '﹨'), + ('﹪', '﹫'), + ('!', '#'), + ('%', '''), + ('*', '*'), + (',', ','), + ('.', '/'), + (':', ';'), + ('?', '@'), + ('\', '\'), + ('。', '。'), + ('、', '・'), + ('𐄀', '𐄂'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐕯', '𐕯'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐤿', '𐤿'), + ('𐩐', '𐩘'), + ('𐩿', '𐩿'), + ('𐫰', '𐫶'), + ('𐬹', '𐬿'), + ('𐮙', '𐮜'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑅀', '𑅃'), + ('𑅴', '𑅵'), + ('𑇅', '𑇈'), + ('𑇍', '𑇍'), + ('𑇛', '𑇛'), + ('𑇝', '𑇟'), + ('𑈸', '𑈽'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑏗', '𑏘'), + ('𑑋', '𑑏'), + ('𑑚', '𑑛'), + ('𑑝', '𑑝'), + ('𑓆', '𑓆'), + ('𑗁', '𑗗'), + ('𑙁', '𑙃'), + ('𑙠', '𑙬'), + ('𑚹', '𑚹'), + ('𑜼', '𑜾'), + ('𑠻', '𑠻'), + ('𑥄', '𑥆'), + ('𑧢', '𑧢'), + ('𑨿', '𑩆'), + ('𑪚', '𑪜'), + ('𑪞', '𑪢'), + ('𑬀', '𑬉'), + ('𑯡', '𑯡'), + ('𑱁', '𑱅'), + ('𑱰', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽏'), + ('𑿿', '𑿿'), + ('𒑰', '𒑴'), + ('𒿱', '𒿲'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬻'), + ('𖭄', '𖭄'), + ('𖵭', '𖵯'), + ('𖺗', '𖺚'), + ('𖿢', '𖿢'), + ('𛲟', '𛲟'), + ('𝪇', '𝪋'), + ('𞗿', '𞗿'), + ('𞥞', '𞥟'), +]; + +pub const OTHER_SYMBOL: &'static [(char, char)] = &[ + ('¦', '¦'), + ('©', '©'), + ('®', '®'), + ('°', '°'), + ('҂', '҂'), + ('֍', '֎'), + ('؎', '؏'), + ('۞', '۞'), + ('۩', '۩'), + ('۽', '۾'), + ('߶', '߶'), + ('৺', '৺'), + ('୰', '୰'), + ('௳', '௸'), + ('௺', '௺'), + ('౿', '౿'), + ('൏', '൏'), + ('൹', '൹'), + ('༁', '༃'), + ('༓', '༓'), + ('༕', '༗'), + ('༚', '༟'), + ('༴', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿏'), + ('࿕', '࿘'), + ('႞', '႟'), + ('᎐', '᎙'), + ('᙭', '᙭'), + ('᥀', '᥀'), + ('᧞', '᧿'), + ('᭡', '᭪'), + ('᭴', '᭼'), + ('℀', '℁'), + ('℃', '℆'), + ('℈', '℉'), + ('℔', '℔'), + ('№', '℗'), + ('℞', '℣'), + ('℥', '℥'), + ('℧', '℧'), + ('℩', '℩'), + ('℮', '℮'), + ('℺', '℻'), + ('⅊', '⅊'), + ('⅌', '⅍'), + ('⅏', '⅏'), + ('↊', '↋'), + ('↕', '↙'), + ('↜', '↟'), + ('↡', '↢'), + ('↤', '↥'), + ('↧', '↭'), + ('↯', '⇍'), + ('⇐', '⇑'), + ('⇓', '⇓'), + ('⇕', '⇳'), + ('⌀', '⌇'), + ('⌌', '⌟'), + ('⌢', '⌨'), + ('⌫', '⍻'), + ('⍽', '⎚'), + ('⎴', '⏛'), + ('⏢', '␩'), + ('⑀', '⑊'), + ('⒜', 'ⓩ'), + ('─', '▶'), + ('▸', '◀'), + ('◂', '◷'), + ('☀', '♮'), + ('♰', '❧'), + ('➔', '➿'), + ('⠀', '⣿'), + ('⬀', '⬯'), + ('⭅', '⭆'), + ('⭍', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⳥', '⳪'), + ('⹐', '⹑'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('〄', '〄'), + ('〒', '〓'), + ('〠', '〠'), + ('〶', '〷'), + ('〾', '〿'), + ('㆐', '㆑'), + ('㆖', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈀', '㈞'), + ('㈪', '㉇'), + ('㉐', '㉐'), + ('㉠', '㉿'), + ('㊊', '㊰'), + ('㋀', '㏿'), + ('䷀', '䷿'), + ('꒐', '꓆'), + ('꠨', '꠫'), + ('꠶', '꠷'), + ('꠹', '꠹'), + ('꩷', '꩹'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷽', '﷿'), + ('¦', '¦'), + ('│', '│'), + ('■', '○'), + ('', '�'), + ('𐄷', '𐄿'), + ('𐅹', '𐆉'), + ('𐆌', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐡷', '𐡸'), + ('𐫈', '𐫈'), + ('𑜿', '𑜿'), + ('𑿕', '𑿜'), + ('𑿡', '𑿱'), + ('𖬼', '𖬿'), + ('𖭅', '𖭅'), + ('𛲜', '𛲜'), + ('𜰀', '𜳯'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝌀', '𝍖'), + ('𝠀', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪆'), + ('𞅏', '𞅏'), + ('𞲬', '𞲬'), + ('𞴮', '𞴮'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄍', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🏺'), + ('🐀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯯'), +]; + +pub const PARAGRAPH_SEPARATOR: &'static [(char, char)] = + &[('\u{2029}', '\u{2029}')]; + +pub const PRIVATE_USE: &'static [(char, char)] = &[ + ('\u{e000}', '\u{f8ff}'), + ('\u{f0000}', '\u{ffffd}'), + ('\u{100000}', '\u{10fffd}'), +]; + +pub const PUNCTUATION: &'static [(char, char)] = &[ + ('!', '#'), + ('%', '*'), + (',', '/'), + (':', ';'), + ('?', '@'), + ('[', ']'), + ('_', '_'), + ('{', '{'), + ('}', '}'), + ('¡', '¡'), + ('§', '§'), + ('«', '«'), + ('¶', '·'), + ('»', '»'), + ('¿', '¿'), + (';', ';'), + ('·', '·'), + ('՚', '՟'), + ('։', '֊'), + ('־', '־'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('׳', '״'), + ('؉', '؊'), + ('،', '؍'), + ('؛', '؛'), + ('؝', '؟'), + ('٪', '٭'), + ('۔', '۔'), + ('܀', '܍'), + ('߷', '߹'), + ('࠰', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('॰', '॰'), + ('৽', '৽'), + ('੶', '੶'), + ('૰', '૰'), + ('౷', '౷'), + ('಄', '಄'), + ('෴', '෴'), + ('๏', '๏'), + ('๚', '๛'), + ('༄', '༒'), + ('༔', '༔'), + ('༺', '༽'), + ('྅', '྅'), + ('࿐', '࿔'), + ('࿙', '࿚'), + ('၊', '၏'), + ('჻', '჻'), + ('፠', '፨'), + ('᐀', '᐀'), + ('᙮', '᙮'), + ('᚛', '᚜'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៘', '៚'), + ('᠀', '᠊'), + ('᥄', '᥅'), + ('᨞', '᨟'), + ('᪠', '᪦'), + ('᪨', '᪭'), + ('᭎', '᭏'), + ('᭚', '᭠'), + ('᭽', '᭿'), + ('᯼', '᯿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('᳀', '᳇'), + ('᳓', '᳓'), + ('‐', '‧'), + ('‰', '⁃'), + ('⁅', '⁑'), + ('⁓', '⁞'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⌈', '⌋'), + ('〈', '〉'), + ('❨', '❵'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('⳹', '⳼'), + ('⳾', '⳿'), + ('⵰', '⵰'), + ('⸀', '⸮'), + ('⸰', '⹏'), + ('⹒', '⹝'), + ('、', '〃'), + ('〈', '】'), + ('〔', '〟'), + ('〰', '〰'), + ('〽', '〽'), + ('゠', '゠'), + ('・', '・'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꙳', '꙳'), + ('꙾', '꙾'), + ('꛲', '꛷'), + ('꡴', '꡷'), + ('꣎', '꣏'), + ('꣸', '꣺'), + ('꣼', '꣼'), + ('꤮', '꤯'), + ('꥟', '꥟'), + ('꧁', '꧍'), + ('꧞', '꧟'), + ('꩜', '꩟'), + ('꫞', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('﴾', '﴿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹡'), + ('﹣', '﹣'), + ('﹨', '﹨'), + ('﹪', '﹫'), + ('!', '#'), + ('%', '*'), + (',', '/'), + (':', ';'), + ('?', '@'), + ('[', ']'), + ('_', '_'), + ('{', '{'), + ('}', '}'), + ('⦅', '・'), + ('𐄀', '𐄂'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐕯', '𐕯'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐤿', '𐤿'), + ('𐩐', '𐩘'), + ('𐩿', '𐩿'), + ('𐫰', '𐫶'), + ('𐬹', '𐬿'), + ('𐮙', '𐮜'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑅀', '𑅃'), + ('𑅴', '𑅵'), + ('𑇅', '𑇈'), + ('𑇍', '𑇍'), + ('𑇛', '𑇛'), + ('𑇝', '𑇟'), + ('𑈸', '𑈽'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑏗', '𑏘'), + ('𑑋', '𑑏'), + ('𑑚', '𑑛'), + ('𑑝', '𑑝'), + ('𑓆', '𑓆'), + ('𑗁', '𑗗'), + ('𑙁', '𑙃'), + ('𑙠', '𑙬'), + ('𑚹', '𑚹'), + ('𑜼', '𑜾'), + ('𑠻', '𑠻'), + ('𑥄', '𑥆'), + ('𑧢', '𑧢'), + ('𑨿', '𑩆'), + ('𑪚', '𑪜'), + ('𑪞', '𑪢'), + ('𑬀', '𑬉'), + ('𑯡', '𑯡'), + ('𑱁', '𑱅'), + ('𑱰', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽏'), + ('𑿿', '𑿿'), + ('𒑰', '𒑴'), + ('𒿱', '𒿲'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬻'), + ('𖭄', '𖭄'), + ('𖵭', '𖵯'), + ('𖺗', '𖺚'), + ('𖿢', '𖿢'), + ('𛲟', '𛲟'), + ('𝪇', '𝪋'), + ('𞗿', '𞗿'), + ('𞥞', '𞥟'), +]; + +pub const SEPARATOR: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const SPACE_SEPARATOR: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const SPACING_MARK: &'static [(char, char)] = &[ + ('ः', 'ः'), + ('ऻ', 'ऻ'), + ('ा', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॏ'), + ('ং', 'ঃ'), + ('\u{9be}', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('\u{9d7}', '\u{9d7}'), + ('ਃ', 'ਃ'), + ('ਾ', 'ੀ'), + ('ઃ', 'ઃ'), + ('ા', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ଂ', 'ଃ'), + ('\u{b3e}', '\u{b3e}'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b57}', '\u{b57}'), + ('\u{bbe}', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('\u{bd7}', '\u{bd7}'), + ('ఁ', 'ః'), + ('ు', 'ౄ'), + ('ಂ', 'ಃ'), + ('ಾ', 'ಾ'), + ('\u{cc0}', 'ೄ'), + ('\u{cc7}', '\u{cc8}'), + ('\u{cca}', '\u{ccb}'), + ('\u{cd5}', '\u{cd6}'), + ('ೳ', 'ೳ'), + ('ം', 'ഃ'), + ('\u{d3e}', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('\u{d57}', '\u{d57}'), + ('ං', 'ඃ'), + ('\u{dcf}', 'ෑ'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('༾', '༿'), + ('ཿ', 'ཿ'), + ('ါ', 'ာ'), + ('ေ', 'ေ'), + ('း', 'း'), + ('ျ', 'ြ'), + ('ၖ', 'ၗ'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('ႃ', 'ႄ'), + ('ႇ', 'ႌ'), + ('ႏ', 'ႏ'), + ('ႚ', 'ႜ'), + ('\u{1715}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('ᨙ', 'ᨚ'), + ('ᩕ', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩡ', 'ᩡ'), + ('ᩣ', 'ᩤ'), + ('ᩭ', 'ᩲ'), + ('ᬄ', 'ᬄ'), + ('\u{1b35}', '\u{1b35}'), + ('\u{1b3b}', '\u{1b3b}'), + ('\u{1b3d}', 'ᭁ'), + ('\u{1b43}', '\u{1b44}'), + ('ᮂ', 'ᮂ'), + ('ᮡ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('\u{1baa}', '\u{1baa}'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('\u{1bf2}', '\u{1bf3}'), + ('ᰤ', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᳡', '᳡'), + ('᳷', '᳷'), + ('\u{302e}', '\u{302f}'), + ('ꠣ', 'ꠤ'), + ('ꠧ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('ꥒ', '\u{a953}'), + ('ꦃ', 'ꦃ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', '\u{a9c0}'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩍ', 'ꩍ'), + ('ꩻ', 'ꩻ'), + ('ꩽ', 'ꩽ'), + ('ꫫ', 'ꫫ'), + ('ꫮ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', 'ꯪ'), + ('꯬', '꯬'), + ('𑀀', '𑀀'), + ('𑀂', '𑀂'), + ('𑂂', '𑂂'), + ('𑂰', '𑂲'), + ('𑂷', '𑂸'), + ('𑄬', '𑄬'), + ('𑅅', '𑅆'), + ('𑆂', '𑆂'), + ('𑆳', '𑆵'), + ('𑆿', '\u{111c0}'), + ('𑇎', '𑇎'), + ('𑈬', '𑈮'), + ('𑈲', '𑈳'), + ('\u{11235}', '\u{11235}'), + ('𑋠', '𑋢'), + ('𑌂', '𑌃'), + ('\u{1133e}', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{113b8}', '𑎺'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('\u{113cf}', '\u{113cf}'), + ('𑐵', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('\u{114b0}', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒾'), + ('𑓁', '𑓁'), + ('\u{115af}', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑘰', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('\u{116b6}', '\u{116b6}'), + ('𑜞', '𑜞'), + ('𑜠', '𑜡'), + ('𑜦', '𑜦'), + ('𑠬', '𑠮'), + ('𑠸', '𑠸'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193d}', '\u{1193d}'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '𑧓'), + ('𑧜', '𑧟'), + ('𑧤', '𑧤'), + ('𑨹', '𑨹'), + ('𑩗', '𑩘'), + ('𑪗', '𑪗'), + ('𑰯', '𑰯'), + ('𑰾', '𑰾'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑶊', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑻵', '𑻶'), + ('𑼃', '𑼃'), + ('𑼴', '𑼵'), + ('𑼾', '𑼿'), + ('\u{11f41}', '\u{11f41}'), + ('𖄪', '𖄬'), + ('𖽑', '𖾇'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1d165}', '\u{1d166}'), + ('\u{1d16d}', '\u{1d172}'), +]; + +pub const SYMBOL: &'static [(char, char)] = &[ + ('$', '$'), + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('`', '`'), + ('|', '|'), + ('~', '~'), + ('¢', '¦'), + ('¨', '©'), + ('¬', '¬'), + ('®', '±'), + ('´', '´'), + ('¸', '¸'), + ('×', '×'), + ('÷', '÷'), + ('˂', '˅'), + ('˒', '˟'), + ('˥', '˫'), + ('˭', '˭'), + ('˯', '˿'), + ('͵', '͵'), + ('΄', '΅'), + ('϶', '϶'), + ('҂', '҂'), + ('֍', '֏'), + ('؆', '؈'), + ('؋', '؋'), + ('؎', '؏'), + ('۞', '۞'), + ('۩', '۩'), + ('۽', '۾'), + ('߶', '߶'), + ('߾', '߿'), + ('࢈', '࢈'), + ('৲', '৳'), + ('৺', '৻'), + ('૱', '૱'), + ('୰', '୰'), + ('௳', '௺'), + ('౿', '౿'), + ('൏', '൏'), + ('൹', '൹'), + ('฿', '฿'), + ('༁', '༃'), + ('༓', '༓'), + ('༕', '༗'), + ('༚', '༟'), + ('༴', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿏'), + ('࿕', '࿘'), + ('႞', '႟'), + ('᎐', '᎙'), + ('᙭', '᙭'), + ('៛', '៛'), + ('᥀', '᥀'), + ('᧞', '᧿'), + ('᭡', '᭪'), + ('᭴', '᭼'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('⁺', '⁼'), + ('₊', '₌'), + ('₠', '⃀'), + ('℀', '℁'), + ('℃', '℆'), + ('℈', '℉'), + ('℔', '℔'), + ('№', '℘'), + ('℞', '℣'), + ('℥', '℥'), + ('℧', '℧'), + ('℩', '℩'), + ('℮', '℮'), + ('℺', '℻'), + ('⅀', '⅄'), + ('⅊', '⅍'), + ('⅏', '⅏'), + ('↊', '↋'), + ('←', '⌇'), + ('⌌', '⌨'), + ('⌫', '␩'), + ('⑀', '⑊'), + ('⒜', 'ⓩ'), + ('─', '❧'), + ('➔', '⟄'), + ('⟇', '⟥'), + ('⟰', '⦂'), + ('⦙', '⧗'), + ('⧜', '⧻'), + ('⧾', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⳥', '⳪'), + ('⹐', '⹑'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('〄', '〄'), + ('〒', '〓'), + ('〠', '〠'), + ('〶', '〷'), + ('〾', '〿'), + ('゛', '゜'), + ('㆐', '㆑'), + ('㆖', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈀', '㈞'), + ('㈪', '㉇'), + ('㉐', '㉐'), + ('㉠', '㉿'), + ('㊊', '㊰'), + ('㋀', '㏿'), + ('䷀', '䷿'), + ('꒐', '꓆'), + ('꜀', '꜖'), + ('꜠', '꜡'), + ('꞉', '꞊'), + ('꠨', '꠫'), + ('꠶', '꠹'), + ('꩷', '꩹'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﬩', '﬩'), + ('﮲', '﯂'), + ('﵀', '﵏'), + ('﷏', '﷏'), + ('﷼', '﷿'), + ('﹢', '﹢'), + ('﹤', '﹦'), + ('﹩', '﹩'), + ('$', '$'), + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('`', '`'), + ('|', '|'), + ('~', '~'), + ('¢', '₩'), + ('│', '○'), + ('', '�'), + ('𐄷', '𐄿'), + ('𐅹', '𐆉'), + ('𐆌', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐡷', '𐡸'), + ('𐫈', '𐫈'), + ('𐶎', '𐶏'), + ('𑜿', '𑜿'), + ('𑿕', '𑿱'), + ('𖬼', '𖬿'), + ('𖭅', '𖭅'), + ('𛲜', '𛲜'), + ('𜰀', '𜳯'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝌀', '𝍖'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), + ('𝠀', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪆'), + ('𞅏', '𞅏'), + ('𞋿', '𞋿'), + ('𞲬', '𞲬'), + ('𞲰', '𞲰'), + ('𞴮', '𞴮'), + ('𞻰', '𞻱'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄍', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯯'), +]; + +pub const TITLECASE_LETTER: &'static [(char, char)] = &[ + ('Dž', 'Dž'), + ('Lj', 'Lj'), + ('Nj', 'Nj'), + ('Dz', 'Dz'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('ᾼ', 'ᾼ'), + ('ῌ', 'ῌ'), + ('ῼ', 'ῼ'), +]; + +pub const UNASSIGNED: &'static [(char, char)] = &[ + ('\u{378}', '\u{379}'), + ('\u{380}', '\u{383}'), + ('\u{38b}', '\u{38b}'), + ('\u{38d}', '\u{38d}'), + ('\u{3a2}', '\u{3a2}'), + ('\u{530}', '\u{530}'), + ('\u{557}', '\u{558}'), + ('\u{58b}', '\u{58c}'), + ('\u{590}', '\u{590}'), + ('\u{5c8}', '\u{5cf}'), + ('\u{5eb}', '\u{5ee}'), + ('\u{5f5}', '\u{5ff}'), + ('\u{70e}', '\u{70e}'), + ('\u{74b}', '\u{74c}'), + ('\u{7b2}', '\u{7bf}'), + ('\u{7fb}', '\u{7fc}'), + ('\u{82e}', '\u{82f}'), + ('\u{83f}', '\u{83f}'), + ('\u{85c}', '\u{85d}'), + ('\u{85f}', '\u{85f}'), + ('\u{86b}', '\u{86f}'), + ('\u{88f}', '\u{88f}'), + ('\u{892}', '\u{896}'), + ('\u{984}', '\u{984}'), + ('\u{98d}', '\u{98e}'), + ('\u{991}', '\u{992}'), + ('\u{9a9}', '\u{9a9}'), + ('\u{9b1}', '\u{9b1}'), + ('\u{9b3}', '\u{9b5}'), + ('\u{9ba}', '\u{9bb}'), + ('\u{9c5}', '\u{9c6}'), + ('\u{9c9}', '\u{9ca}'), + ('\u{9cf}', '\u{9d6}'), + ('\u{9d8}', '\u{9db}'), + ('\u{9de}', '\u{9de}'), + ('\u{9e4}', '\u{9e5}'), + ('\u{9ff}', '\u{a00}'), + ('\u{a04}', '\u{a04}'), + ('\u{a0b}', '\u{a0e}'), + ('\u{a11}', '\u{a12}'), + ('\u{a29}', '\u{a29}'), + ('\u{a31}', '\u{a31}'), + ('\u{a34}', '\u{a34}'), + ('\u{a37}', '\u{a37}'), + ('\u{a3a}', '\u{a3b}'), + ('\u{a3d}', '\u{a3d}'), + ('\u{a43}', '\u{a46}'), + ('\u{a49}', '\u{a4a}'), + ('\u{a4e}', '\u{a50}'), + ('\u{a52}', '\u{a58}'), + ('\u{a5d}', '\u{a5d}'), + ('\u{a5f}', '\u{a65}'), + ('\u{a77}', '\u{a80}'), + ('\u{a84}', '\u{a84}'), + ('\u{a8e}', '\u{a8e}'), + ('\u{a92}', '\u{a92}'), + ('\u{aa9}', '\u{aa9}'), + ('\u{ab1}', '\u{ab1}'), + ('\u{ab4}', '\u{ab4}'), + ('\u{aba}', '\u{abb}'), + ('\u{ac6}', '\u{ac6}'), + ('\u{aca}', '\u{aca}'), + ('\u{ace}', '\u{acf}'), + ('\u{ad1}', '\u{adf}'), + ('\u{ae4}', '\u{ae5}'), + ('\u{af2}', '\u{af8}'), + ('\u{b00}', '\u{b00}'), + ('\u{b04}', '\u{b04}'), + ('\u{b0d}', '\u{b0e}'), + ('\u{b11}', '\u{b12}'), + ('\u{b29}', '\u{b29}'), + ('\u{b31}', '\u{b31}'), + ('\u{b34}', '\u{b34}'), + ('\u{b3a}', '\u{b3b}'), + ('\u{b45}', '\u{b46}'), + ('\u{b49}', '\u{b4a}'), + ('\u{b4e}', '\u{b54}'), + ('\u{b58}', '\u{b5b}'), + ('\u{b5e}', '\u{b5e}'), + ('\u{b64}', '\u{b65}'), + ('\u{b78}', '\u{b81}'), + ('\u{b84}', '\u{b84}'), + ('\u{b8b}', '\u{b8d}'), + ('\u{b91}', '\u{b91}'), + ('\u{b96}', '\u{b98}'), + ('\u{b9b}', '\u{b9b}'), + ('\u{b9d}', '\u{b9d}'), + ('\u{ba0}', '\u{ba2}'), + ('\u{ba5}', '\u{ba7}'), + ('\u{bab}', '\u{bad}'), + ('\u{bba}', '\u{bbd}'), + ('\u{bc3}', '\u{bc5}'), + ('\u{bc9}', '\u{bc9}'), + ('\u{bce}', '\u{bcf}'), + ('\u{bd1}', '\u{bd6}'), + ('\u{bd8}', '\u{be5}'), + ('\u{bfb}', '\u{bff}'), + ('\u{c0d}', '\u{c0d}'), + ('\u{c11}', '\u{c11}'), + ('\u{c29}', '\u{c29}'), + ('\u{c3a}', '\u{c3b}'), + ('\u{c45}', '\u{c45}'), + ('\u{c49}', '\u{c49}'), + ('\u{c4e}', '\u{c54}'), + ('\u{c57}', '\u{c57}'), + ('\u{c5b}', '\u{c5c}'), + ('\u{c5e}', '\u{c5f}'), + ('\u{c64}', '\u{c65}'), + ('\u{c70}', '\u{c76}'), + ('\u{c8d}', '\u{c8d}'), + ('\u{c91}', '\u{c91}'), + ('\u{ca9}', '\u{ca9}'), + ('\u{cb4}', '\u{cb4}'), + ('\u{cba}', '\u{cbb}'), + ('\u{cc5}', '\u{cc5}'), + ('\u{cc9}', '\u{cc9}'), + ('\u{cce}', '\u{cd4}'), + ('\u{cd7}', '\u{cdc}'), + ('\u{cdf}', '\u{cdf}'), + ('\u{ce4}', '\u{ce5}'), + ('\u{cf0}', '\u{cf0}'), + ('\u{cf4}', '\u{cff}'), + ('\u{d0d}', '\u{d0d}'), + ('\u{d11}', '\u{d11}'), + ('\u{d45}', '\u{d45}'), + ('\u{d49}', '\u{d49}'), + ('\u{d50}', '\u{d53}'), + ('\u{d64}', '\u{d65}'), + ('\u{d80}', '\u{d80}'), + ('\u{d84}', '\u{d84}'), + ('\u{d97}', '\u{d99}'), + ('\u{db2}', '\u{db2}'), + ('\u{dbc}', '\u{dbc}'), + ('\u{dbe}', '\u{dbf}'), + ('\u{dc7}', '\u{dc9}'), + ('\u{dcb}', '\u{dce}'), + ('\u{dd5}', '\u{dd5}'), + ('\u{dd7}', '\u{dd7}'), + ('\u{de0}', '\u{de5}'), + ('\u{df0}', '\u{df1}'), + ('\u{df5}', '\u{e00}'), + ('\u{e3b}', '\u{e3e}'), + ('\u{e5c}', '\u{e80}'), + ('\u{e83}', '\u{e83}'), + ('\u{e85}', '\u{e85}'), + ('\u{e8b}', '\u{e8b}'), + ('\u{ea4}', '\u{ea4}'), + ('\u{ea6}', '\u{ea6}'), + ('\u{ebe}', '\u{ebf}'), + ('\u{ec5}', '\u{ec5}'), + ('\u{ec7}', '\u{ec7}'), + ('\u{ecf}', '\u{ecf}'), + ('\u{eda}', '\u{edb}'), + ('\u{ee0}', '\u{eff}'), + ('\u{f48}', '\u{f48}'), + ('\u{f6d}', '\u{f70}'), + ('\u{f98}', '\u{f98}'), + ('\u{fbd}', '\u{fbd}'), + ('\u{fcd}', '\u{fcd}'), + ('\u{fdb}', '\u{fff}'), + ('\u{10c6}', '\u{10c6}'), + ('\u{10c8}', '\u{10cc}'), + ('\u{10ce}', '\u{10cf}'), + ('\u{1249}', '\u{1249}'), + ('\u{124e}', '\u{124f}'), + ('\u{1257}', '\u{1257}'), + ('\u{1259}', '\u{1259}'), + ('\u{125e}', '\u{125f}'), + ('\u{1289}', '\u{1289}'), + ('\u{128e}', '\u{128f}'), + ('\u{12b1}', '\u{12b1}'), + ('\u{12b6}', '\u{12b7}'), + ('\u{12bf}', '\u{12bf}'), + ('\u{12c1}', '\u{12c1}'), + ('\u{12c6}', '\u{12c7}'), + ('\u{12d7}', '\u{12d7}'), + ('\u{1311}', '\u{1311}'), + ('\u{1316}', '\u{1317}'), + ('\u{135b}', '\u{135c}'), + ('\u{137d}', '\u{137f}'), + ('\u{139a}', '\u{139f}'), + ('\u{13f6}', '\u{13f7}'), + ('\u{13fe}', '\u{13ff}'), + ('\u{169d}', '\u{169f}'), + ('\u{16f9}', '\u{16ff}'), + ('\u{1716}', '\u{171e}'), + ('\u{1737}', '\u{173f}'), + ('\u{1754}', '\u{175f}'), + ('\u{176d}', '\u{176d}'), + ('\u{1771}', '\u{1771}'), + ('\u{1774}', '\u{177f}'), + ('\u{17de}', '\u{17df}'), + ('\u{17ea}', '\u{17ef}'), + ('\u{17fa}', '\u{17ff}'), + ('\u{181a}', '\u{181f}'), + ('\u{1879}', '\u{187f}'), + ('\u{18ab}', '\u{18af}'), + ('\u{18f6}', '\u{18ff}'), + ('\u{191f}', '\u{191f}'), + ('\u{192c}', '\u{192f}'), + ('\u{193c}', '\u{193f}'), + ('\u{1941}', '\u{1943}'), + ('\u{196e}', '\u{196f}'), + ('\u{1975}', '\u{197f}'), + ('\u{19ac}', '\u{19af}'), + ('\u{19ca}', '\u{19cf}'), + ('\u{19db}', '\u{19dd}'), + ('\u{1a1c}', '\u{1a1d}'), + ('\u{1a5f}', '\u{1a5f}'), + ('\u{1a7d}', '\u{1a7e}'), + ('\u{1a8a}', '\u{1a8f}'), + ('\u{1a9a}', '\u{1a9f}'), + ('\u{1aae}', '\u{1aaf}'), + ('\u{1acf}', '\u{1aff}'), + ('\u{1b4d}', '\u{1b4d}'), + ('\u{1bf4}', '\u{1bfb}'), + ('\u{1c38}', '\u{1c3a}'), + ('\u{1c4a}', '\u{1c4c}'), + ('\u{1c8b}', '\u{1c8f}'), + ('\u{1cbb}', '\u{1cbc}'), + ('\u{1cc8}', '\u{1ccf}'), + ('\u{1cfb}', '\u{1cff}'), + ('\u{1f16}', '\u{1f17}'), + ('\u{1f1e}', '\u{1f1f}'), + ('\u{1f46}', '\u{1f47}'), + ('\u{1f4e}', '\u{1f4f}'), + ('\u{1f58}', '\u{1f58}'), + ('\u{1f5a}', '\u{1f5a}'), + ('\u{1f5c}', '\u{1f5c}'), + ('\u{1f5e}', '\u{1f5e}'), + ('\u{1f7e}', '\u{1f7f}'), + ('\u{1fb5}', '\u{1fb5}'), + ('\u{1fc5}', '\u{1fc5}'), + ('\u{1fd4}', '\u{1fd5}'), + ('\u{1fdc}', '\u{1fdc}'), + ('\u{1ff0}', '\u{1ff1}'), + ('\u{1ff5}', '\u{1ff5}'), + ('\u{1fff}', '\u{1fff}'), + ('\u{2065}', '\u{2065}'), + ('\u{2072}', '\u{2073}'), + ('\u{208f}', '\u{208f}'), + ('\u{209d}', '\u{209f}'), + ('\u{20c1}', '\u{20cf}'), + ('\u{20f1}', '\u{20ff}'), + ('\u{218c}', '\u{218f}'), + ('\u{242a}', '\u{243f}'), + ('\u{244b}', '\u{245f}'), + ('\u{2b74}', '\u{2b75}'), + ('\u{2b96}', '\u{2b96}'), + ('\u{2cf4}', '\u{2cf8}'), + ('\u{2d26}', '\u{2d26}'), + ('\u{2d28}', '\u{2d2c}'), + ('\u{2d2e}', '\u{2d2f}'), + ('\u{2d68}', '\u{2d6e}'), + ('\u{2d71}', '\u{2d7e}'), + ('\u{2d97}', '\u{2d9f}'), + ('\u{2da7}', '\u{2da7}'), + ('\u{2daf}', '\u{2daf}'), + ('\u{2db7}', '\u{2db7}'), + ('\u{2dbf}', '\u{2dbf}'), + ('\u{2dc7}', '\u{2dc7}'), + ('\u{2dcf}', '\u{2dcf}'), + ('\u{2dd7}', '\u{2dd7}'), + ('\u{2ddf}', '\u{2ddf}'), + ('\u{2e5e}', '\u{2e7f}'), + ('\u{2e9a}', '\u{2e9a}'), + ('\u{2ef4}', '\u{2eff}'), + ('\u{2fd6}', '\u{2fef}'), + ('\u{3040}', '\u{3040}'), + ('\u{3097}', '\u{3098}'), + ('\u{3100}', '\u{3104}'), + ('\u{3130}', '\u{3130}'), + ('\u{318f}', '\u{318f}'), + ('\u{31e6}', '\u{31ee}'), + ('\u{321f}', '\u{321f}'), + ('\u{a48d}', '\u{a48f}'), + ('\u{a4c7}', '\u{a4cf}'), + ('\u{a62c}', '\u{a63f}'), + ('\u{a6f8}', '\u{a6ff}'), + ('\u{a7ce}', '\u{a7cf}'), + ('\u{a7d2}', '\u{a7d2}'), + ('\u{a7d4}', '\u{a7d4}'), + ('\u{a7dd}', '\u{a7f1}'), + ('\u{a82d}', '\u{a82f}'), + ('\u{a83a}', '\u{a83f}'), + ('\u{a878}', '\u{a87f}'), + ('\u{a8c6}', '\u{a8cd}'), + ('\u{a8da}', '\u{a8df}'), + ('\u{a954}', '\u{a95e}'), + ('\u{a97d}', '\u{a97f}'), + ('\u{a9ce}', '\u{a9ce}'), + ('\u{a9da}', '\u{a9dd}'), + ('\u{a9ff}', '\u{a9ff}'), + ('\u{aa37}', '\u{aa3f}'), + ('\u{aa4e}', '\u{aa4f}'), + ('\u{aa5a}', '\u{aa5b}'), + ('\u{aac3}', '\u{aada}'), + ('\u{aaf7}', '\u{ab00}'), + ('\u{ab07}', '\u{ab08}'), + ('\u{ab0f}', '\u{ab10}'), + ('\u{ab17}', '\u{ab1f}'), + ('\u{ab27}', '\u{ab27}'), + ('\u{ab2f}', '\u{ab2f}'), + ('\u{ab6c}', '\u{ab6f}'), + ('\u{abee}', '\u{abef}'), + ('\u{abfa}', '\u{abff}'), + ('\u{d7a4}', '\u{d7af}'), + ('\u{d7c7}', '\u{d7ca}'), + ('\u{d7fc}', '\u{d7ff}'), + ('\u{fa6e}', '\u{fa6f}'), + ('\u{fada}', '\u{faff}'), + ('\u{fb07}', '\u{fb12}'), + ('\u{fb18}', '\u{fb1c}'), + ('\u{fb37}', '\u{fb37}'), + ('\u{fb3d}', '\u{fb3d}'), + ('\u{fb3f}', '\u{fb3f}'), + ('\u{fb42}', '\u{fb42}'), + ('\u{fb45}', '\u{fb45}'), + ('\u{fbc3}', '\u{fbd2}'), + ('\u{fd90}', '\u{fd91}'), + ('\u{fdc8}', '\u{fdce}'), + ('\u{fdd0}', '\u{fdef}'), + ('\u{fe1a}', '\u{fe1f}'), + ('\u{fe53}', '\u{fe53}'), + ('\u{fe67}', '\u{fe67}'), + ('\u{fe6c}', '\u{fe6f}'), + ('\u{fe75}', '\u{fe75}'), + ('\u{fefd}', '\u{fefe}'), + ('\u{ff00}', '\u{ff00}'), + ('\u{ffbf}', '\u{ffc1}'), + ('\u{ffc8}', '\u{ffc9}'), + ('\u{ffd0}', '\u{ffd1}'), + ('\u{ffd8}', '\u{ffd9}'), + ('\u{ffdd}', '\u{ffdf}'), + ('\u{ffe7}', '\u{ffe7}'), + ('\u{ffef}', '\u{fff8}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1000c}', '\u{1000c}'), + ('\u{10027}', '\u{10027}'), + ('\u{1003b}', '\u{1003b}'), + ('\u{1003e}', '\u{1003e}'), + ('\u{1004e}', '\u{1004f}'), + ('\u{1005e}', '\u{1007f}'), + ('\u{100fb}', '\u{100ff}'), + ('\u{10103}', '\u{10106}'), + ('\u{10134}', '\u{10136}'), + ('\u{1018f}', '\u{1018f}'), + ('\u{1019d}', '\u{1019f}'), + ('\u{101a1}', '\u{101cf}'), + ('\u{101fe}', '\u{1027f}'), + ('\u{1029d}', '\u{1029f}'), + ('\u{102d1}', '\u{102df}'), + ('\u{102fc}', '\u{102ff}'), + ('\u{10324}', '\u{1032c}'), + ('\u{1034b}', '\u{1034f}'), + ('\u{1037b}', '\u{1037f}'), + ('\u{1039e}', '\u{1039e}'), + ('\u{103c4}', '\u{103c7}'), + ('\u{103d6}', '\u{103ff}'), + ('\u{1049e}', '\u{1049f}'), + ('\u{104aa}', '\u{104af}'), + ('\u{104d4}', '\u{104d7}'), + ('\u{104fc}', '\u{104ff}'), + ('\u{10528}', '\u{1052f}'), + ('\u{10564}', '\u{1056e}'), + ('\u{1057b}', '\u{1057b}'), + ('\u{1058b}', '\u{1058b}'), + ('\u{10593}', '\u{10593}'), + ('\u{10596}', '\u{10596}'), + ('\u{105a2}', '\u{105a2}'), + ('\u{105b2}', '\u{105b2}'), + ('\u{105ba}', '\u{105ba}'), + ('\u{105bd}', '\u{105bf}'), + ('\u{105f4}', '\u{105ff}'), + ('\u{10737}', '\u{1073f}'), + ('\u{10756}', '\u{1075f}'), + ('\u{10768}', '\u{1077f}'), + ('\u{10786}', '\u{10786}'), + ('\u{107b1}', '\u{107b1}'), + ('\u{107bb}', '\u{107ff}'), + ('\u{10806}', '\u{10807}'), + ('\u{10809}', '\u{10809}'), + ('\u{10836}', '\u{10836}'), + ('\u{10839}', '\u{1083b}'), + ('\u{1083d}', '\u{1083e}'), + ('\u{10856}', '\u{10856}'), + ('\u{1089f}', '\u{108a6}'), + ('\u{108b0}', '\u{108df}'), + ('\u{108f3}', '\u{108f3}'), + ('\u{108f6}', '\u{108fa}'), + ('\u{1091c}', '\u{1091e}'), + ('\u{1093a}', '\u{1093e}'), + ('\u{10940}', '\u{1097f}'), + ('\u{109b8}', '\u{109bb}'), + ('\u{109d0}', '\u{109d1}'), + ('\u{10a04}', '\u{10a04}'), + ('\u{10a07}', '\u{10a0b}'), + ('\u{10a14}', '\u{10a14}'), + ('\u{10a18}', '\u{10a18}'), + ('\u{10a36}', '\u{10a37}'), + ('\u{10a3b}', '\u{10a3e}'), + ('\u{10a49}', '\u{10a4f}'), + ('\u{10a59}', '\u{10a5f}'), + ('\u{10aa0}', '\u{10abf}'), + ('\u{10ae7}', '\u{10aea}'), + ('\u{10af7}', '\u{10aff}'), + ('\u{10b36}', '\u{10b38}'), + ('\u{10b56}', '\u{10b57}'), + ('\u{10b73}', '\u{10b77}'), + ('\u{10b92}', '\u{10b98}'), + ('\u{10b9d}', '\u{10ba8}'), + ('\u{10bb0}', '\u{10bff}'), + ('\u{10c49}', '\u{10c7f}'), + ('\u{10cb3}', '\u{10cbf}'), + ('\u{10cf3}', '\u{10cf9}'), + ('\u{10d28}', '\u{10d2f}'), + ('\u{10d3a}', '\u{10d3f}'), + ('\u{10d66}', '\u{10d68}'), + ('\u{10d86}', '\u{10d8d}'), + ('\u{10d90}', '\u{10e5f}'), + ('\u{10e7f}', '\u{10e7f}'), + ('\u{10eaa}', '\u{10eaa}'), + ('\u{10eae}', '\u{10eaf}'), + ('\u{10eb2}', '\u{10ec1}'), + ('\u{10ec5}', '\u{10efb}'), + ('\u{10f28}', '\u{10f2f}'), + ('\u{10f5a}', '\u{10f6f}'), + ('\u{10f8a}', '\u{10faf}'), + ('\u{10fcc}', '\u{10fdf}'), + ('\u{10ff7}', '\u{10fff}'), + ('\u{1104e}', '\u{11051}'), + ('\u{11076}', '\u{1107e}'), + ('\u{110c3}', '\u{110cc}'), + ('\u{110ce}', '\u{110cf}'), + ('\u{110e9}', '\u{110ef}'), + ('\u{110fa}', '\u{110ff}'), + ('\u{11135}', '\u{11135}'), + ('\u{11148}', '\u{1114f}'), + ('\u{11177}', '\u{1117f}'), + ('\u{111e0}', '\u{111e0}'), + ('\u{111f5}', '\u{111ff}'), + ('\u{11212}', '\u{11212}'), + ('\u{11242}', '\u{1127f}'), + ('\u{11287}', '\u{11287}'), + ('\u{11289}', '\u{11289}'), + ('\u{1128e}', '\u{1128e}'), + ('\u{1129e}', '\u{1129e}'), + ('\u{112aa}', '\u{112af}'), + ('\u{112eb}', '\u{112ef}'), + ('\u{112fa}', '\u{112ff}'), + ('\u{11304}', '\u{11304}'), + ('\u{1130d}', '\u{1130e}'), + ('\u{11311}', '\u{11312}'), + ('\u{11329}', '\u{11329}'), + ('\u{11331}', '\u{11331}'), + ('\u{11334}', '\u{11334}'), + ('\u{1133a}', '\u{1133a}'), + ('\u{11345}', '\u{11346}'), + ('\u{11349}', '\u{1134a}'), + ('\u{1134e}', '\u{1134f}'), + ('\u{11351}', '\u{11356}'), + ('\u{11358}', '\u{1135c}'), + ('\u{11364}', '\u{11365}'), + ('\u{1136d}', '\u{1136f}'), + ('\u{11375}', '\u{1137f}'), + ('\u{1138a}', '\u{1138a}'), + ('\u{1138c}', '\u{1138d}'), + ('\u{1138f}', '\u{1138f}'), + ('\u{113b6}', '\u{113b6}'), + ('\u{113c1}', '\u{113c1}'), + ('\u{113c3}', '\u{113c4}'), + ('\u{113c6}', '\u{113c6}'), + ('\u{113cb}', '\u{113cb}'), + ('\u{113d6}', '\u{113d6}'), + ('\u{113d9}', '\u{113e0}'), + ('\u{113e3}', '\u{113ff}'), + ('\u{1145c}', '\u{1145c}'), + ('\u{11462}', '\u{1147f}'), + ('\u{114c8}', '\u{114cf}'), + ('\u{114da}', '\u{1157f}'), + ('\u{115b6}', '\u{115b7}'), + ('\u{115de}', '\u{115ff}'), + ('\u{11645}', '\u{1164f}'), + ('\u{1165a}', '\u{1165f}'), + ('\u{1166d}', '\u{1167f}'), + ('\u{116ba}', '\u{116bf}'), + ('\u{116ca}', '\u{116cf}'), + ('\u{116e4}', '\u{116ff}'), + ('\u{1171b}', '\u{1171c}'), + ('\u{1172c}', '\u{1172f}'), + ('\u{11747}', '\u{117ff}'), + ('\u{1183c}', '\u{1189f}'), + ('\u{118f3}', '\u{118fe}'), + ('\u{11907}', '\u{11908}'), + ('\u{1190a}', '\u{1190b}'), + ('\u{11914}', '\u{11914}'), + ('\u{11917}', '\u{11917}'), + ('\u{11936}', '\u{11936}'), + ('\u{11939}', '\u{1193a}'), + ('\u{11947}', '\u{1194f}'), + ('\u{1195a}', '\u{1199f}'), + ('\u{119a8}', '\u{119a9}'), + ('\u{119d8}', '\u{119d9}'), + ('\u{119e5}', '\u{119ff}'), + ('\u{11a48}', '\u{11a4f}'), + ('\u{11aa3}', '\u{11aaf}'), + ('\u{11af9}', '\u{11aff}'), + ('\u{11b0a}', '\u{11bbf}'), + ('\u{11be2}', '\u{11bef}'), + ('\u{11bfa}', '\u{11bff}'), + ('\u{11c09}', '\u{11c09}'), + ('\u{11c37}', '\u{11c37}'), + ('\u{11c46}', '\u{11c4f}'), + ('\u{11c6d}', '\u{11c6f}'), + ('\u{11c90}', '\u{11c91}'), + ('\u{11ca8}', '\u{11ca8}'), + ('\u{11cb7}', '\u{11cff}'), + ('\u{11d07}', '\u{11d07}'), + ('\u{11d0a}', '\u{11d0a}'), + ('\u{11d37}', '\u{11d39}'), + ('\u{11d3b}', '\u{11d3b}'), + ('\u{11d3e}', '\u{11d3e}'), + ('\u{11d48}', '\u{11d4f}'), + ('\u{11d5a}', '\u{11d5f}'), + ('\u{11d66}', '\u{11d66}'), + ('\u{11d69}', '\u{11d69}'), + ('\u{11d8f}', '\u{11d8f}'), + ('\u{11d92}', '\u{11d92}'), + ('\u{11d99}', '\u{11d9f}'), + ('\u{11daa}', '\u{11edf}'), + ('\u{11ef9}', '\u{11eff}'), + ('\u{11f11}', '\u{11f11}'), + ('\u{11f3b}', '\u{11f3d}'), + ('\u{11f5b}', '\u{11faf}'), + ('\u{11fb1}', '\u{11fbf}'), + ('\u{11ff2}', '\u{11ffe}'), + ('\u{1239a}', '\u{123ff}'), + ('\u{1246f}', '\u{1246f}'), + ('\u{12475}', '\u{1247f}'), + ('\u{12544}', '\u{12f8f}'), + ('\u{12ff3}', '\u{12fff}'), + ('\u{13456}', '\u{1345f}'), + ('\u{143fb}', '\u{143ff}'), + ('\u{14647}', '\u{160ff}'), + ('\u{1613a}', '\u{167ff}'), + ('\u{16a39}', '\u{16a3f}'), + ('\u{16a5f}', '\u{16a5f}'), + ('\u{16a6a}', '\u{16a6d}'), + ('\u{16abf}', '\u{16abf}'), + ('\u{16aca}', '\u{16acf}'), + ('\u{16aee}', '\u{16aef}'), + ('\u{16af6}', '\u{16aff}'), + ('\u{16b46}', '\u{16b4f}'), + ('\u{16b5a}', '\u{16b5a}'), + ('\u{16b62}', '\u{16b62}'), + ('\u{16b78}', '\u{16b7c}'), + ('\u{16b90}', '\u{16d3f}'), + ('\u{16d7a}', '\u{16e3f}'), + ('\u{16e9b}', '\u{16eff}'), + ('\u{16f4b}', '\u{16f4e}'), + ('\u{16f88}', '\u{16f8e}'), + ('\u{16fa0}', '\u{16fdf}'), + ('\u{16fe5}', '\u{16fef}'), + ('\u{16ff2}', '\u{16fff}'), + ('\u{187f8}', '\u{187ff}'), + ('\u{18cd6}', '\u{18cfe}'), + ('\u{18d09}', '\u{1afef}'), + ('\u{1aff4}', '\u{1aff4}'), + ('\u{1affc}', '\u{1affc}'), + ('\u{1afff}', '\u{1afff}'), + ('\u{1b123}', '\u{1b131}'), + ('\u{1b133}', '\u{1b14f}'), + ('\u{1b153}', '\u{1b154}'), + ('\u{1b156}', '\u{1b163}'), + ('\u{1b168}', '\u{1b16f}'), + ('\u{1b2fc}', '\u{1bbff}'), + ('\u{1bc6b}', '\u{1bc6f}'), + ('\u{1bc7d}', '\u{1bc7f}'), + ('\u{1bc89}', '\u{1bc8f}'), + ('\u{1bc9a}', '\u{1bc9b}'), + ('\u{1bca4}', '\u{1cbff}'), + ('\u{1ccfa}', '\u{1ccff}'), + ('\u{1ceb4}', '\u{1ceff}'), + ('\u{1cf2e}', '\u{1cf2f}'), + ('\u{1cf47}', '\u{1cf4f}'), + ('\u{1cfc4}', '\u{1cfff}'), + ('\u{1d0f6}', '\u{1d0ff}'), + ('\u{1d127}', '\u{1d128}'), + ('\u{1d1eb}', '\u{1d1ff}'), + ('\u{1d246}', '\u{1d2bf}'), + ('\u{1d2d4}', '\u{1d2df}'), + ('\u{1d2f4}', '\u{1d2ff}'), + ('\u{1d357}', '\u{1d35f}'), + ('\u{1d379}', '\u{1d3ff}'), + ('\u{1d455}', '\u{1d455}'), + ('\u{1d49d}', '\u{1d49d}'), + ('\u{1d4a0}', '\u{1d4a1}'), + ('\u{1d4a3}', '\u{1d4a4}'), + ('\u{1d4a7}', '\u{1d4a8}'), + ('\u{1d4ad}', '\u{1d4ad}'), + ('\u{1d4ba}', '\u{1d4ba}'), + ('\u{1d4bc}', '\u{1d4bc}'), + ('\u{1d4c4}', '\u{1d4c4}'), + ('\u{1d506}', '\u{1d506}'), + ('\u{1d50b}', '\u{1d50c}'), + ('\u{1d515}', '\u{1d515}'), + ('\u{1d51d}', '\u{1d51d}'), + ('\u{1d53a}', '\u{1d53a}'), + ('\u{1d53f}', '\u{1d53f}'), + ('\u{1d545}', '\u{1d545}'), + ('\u{1d547}', '\u{1d549}'), + ('\u{1d551}', '\u{1d551}'), + ('\u{1d6a6}', '\u{1d6a7}'), + ('\u{1d7cc}', '\u{1d7cd}'), + ('\u{1da8c}', '\u{1da9a}'), + ('\u{1daa0}', '\u{1daa0}'), + ('\u{1dab0}', '\u{1deff}'), + ('\u{1df1f}', '\u{1df24}'), + ('\u{1df2b}', '\u{1dfff}'), + ('\u{1e007}', '\u{1e007}'), + ('\u{1e019}', '\u{1e01a}'), + ('\u{1e022}', '\u{1e022}'), + ('\u{1e025}', '\u{1e025}'), + ('\u{1e02b}', '\u{1e02f}'), + ('\u{1e06e}', '\u{1e08e}'), + ('\u{1e090}', '\u{1e0ff}'), + ('\u{1e12d}', '\u{1e12f}'), + ('\u{1e13e}', '\u{1e13f}'), + ('\u{1e14a}', '\u{1e14d}'), + ('\u{1e150}', '\u{1e28f}'), + ('\u{1e2af}', '\u{1e2bf}'), + ('\u{1e2fa}', '\u{1e2fe}'), + ('\u{1e300}', '\u{1e4cf}'), + ('\u{1e4fa}', '\u{1e5cf}'), + ('\u{1e5fb}', '\u{1e5fe}'), + ('\u{1e600}', '\u{1e7df}'), + ('\u{1e7e7}', '\u{1e7e7}'), + ('\u{1e7ec}', '\u{1e7ec}'), + ('\u{1e7ef}', '\u{1e7ef}'), + ('\u{1e7ff}', '\u{1e7ff}'), + ('\u{1e8c5}', '\u{1e8c6}'), + ('\u{1e8d7}', '\u{1e8ff}'), + ('\u{1e94c}', '\u{1e94f}'), + ('\u{1e95a}', '\u{1e95d}'), + ('\u{1e960}', '\u{1ec70}'), + ('\u{1ecb5}', '\u{1ed00}'), + ('\u{1ed3e}', '\u{1edff}'), + ('\u{1ee04}', '\u{1ee04}'), + ('\u{1ee20}', '\u{1ee20}'), + ('\u{1ee23}', '\u{1ee23}'), + ('\u{1ee25}', '\u{1ee26}'), + ('\u{1ee28}', '\u{1ee28}'), + ('\u{1ee33}', '\u{1ee33}'), + ('\u{1ee38}', '\u{1ee38}'), + ('\u{1ee3a}', '\u{1ee3a}'), + ('\u{1ee3c}', '\u{1ee41}'), + ('\u{1ee43}', '\u{1ee46}'), + ('\u{1ee48}', '\u{1ee48}'), + ('\u{1ee4a}', '\u{1ee4a}'), + ('\u{1ee4c}', '\u{1ee4c}'), + ('\u{1ee50}', '\u{1ee50}'), + ('\u{1ee53}', '\u{1ee53}'), + ('\u{1ee55}', '\u{1ee56}'), + ('\u{1ee58}', '\u{1ee58}'), + ('\u{1ee5a}', '\u{1ee5a}'), + ('\u{1ee5c}', '\u{1ee5c}'), + ('\u{1ee5e}', '\u{1ee5e}'), + ('\u{1ee60}', '\u{1ee60}'), + ('\u{1ee63}', '\u{1ee63}'), + ('\u{1ee65}', '\u{1ee66}'), + ('\u{1ee6b}', '\u{1ee6b}'), + ('\u{1ee73}', '\u{1ee73}'), + ('\u{1ee78}', '\u{1ee78}'), + ('\u{1ee7d}', '\u{1ee7d}'), + ('\u{1ee7f}', '\u{1ee7f}'), + ('\u{1ee8a}', '\u{1ee8a}'), + ('\u{1ee9c}', '\u{1eea0}'), + ('\u{1eea4}', '\u{1eea4}'), + ('\u{1eeaa}', '\u{1eeaa}'), + ('\u{1eebc}', '\u{1eeef}'), + ('\u{1eef2}', '\u{1efff}'), + ('\u{1f02c}', '\u{1f02f}'), + ('\u{1f094}', '\u{1f09f}'), + ('\u{1f0af}', '\u{1f0b0}'), + ('\u{1f0c0}', '\u{1f0c0}'), + ('\u{1f0d0}', '\u{1f0d0}'), + ('\u{1f0f6}', '\u{1f0ff}'), + ('\u{1f1ae}', '\u{1f1e5}'), + ('\u{1f203}', '\u{1f20f}'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '\u{1f24f}'), + ('\u{1f252}', '\u{1f25f}'), + ('\u{1f266}', '\u{1f2ff}'), + ('\u{1f6d8}', '\u{1f6db}'), + ('\u{1f6ed}', '\u{1f6ef}'), + ('\u{1f6fd}', '\u{1f6ff}'), + ('\u{1f777}', '\u{1f77a}'), + ('\u{1f7da}', '\u{1f7df}'), + ('\u{1f7ec}', '\u{1f7ef}'), + ('\u{1f7f1}', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8af}'), + ('\u{1f8bc}', '\u{1f8bf}'), + ('\u{1f8c2}', '\u{1f8ff}'), + ('\u{1fa54}', '\u{1fa5f}'), + ('\u{1fa6e}', '\u{1fa6f}'), + ('\u{1fa7d}', '\u{1fa7f}'), + ('\u{1fa8a}', '\u{1fa8e}'), + ('\u{1fac7}', '\u{1facd}'), + ('\u{1fadd}', '\u{1fade}'), + ('\u{1faea}', '\u{1faef}'), + ('\u{1faf9}', '\u{1faff}'), + ('\u{1fb93}', '\u{1fb93}'), + ('\u{1fbfa}', '\u{1ffff}'), + ('\u{2a6e0}', '\u{2a6ff}'), + ('\u{2b73a}', '\u{2b73f}'), + ('\u{2b81e}', '\u{2b81f}'), + ('\u{2cea2}', '\u{2ceaf}'), + ('\u{2ebe1}', '\u{2ebef}'), + ('\u{2ee5e}', '\u{2f7ff}'), + ('\u{2fa1e}', '\u{2ffff}'), + ('\u{3134b}', '\u{3134f}'), + ('\u{323b0}', '\u{e0000}'), + ('\u{e0002}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{effff}'), + ('\u{ffffe}', '\u{fffff}'), + ('\u{10fffe}', '\u{10ffff}'), +]; + +pub const UPPERCASE_LETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'DŽ'), + ('LJ', 'LJ'), + ('NJ', 'NJ'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'DZ'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('Ᾰ', 'Ά'), + ('Ὲ', 'Ή'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'Ώ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ↄ', 'Ↄ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/grapheme_cluster_break.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/grapheme_cluster_break.rs new file mode 100644 index 0000000000000000000000000000000000000000..6a6ec2af5f25fac99a74dd0ecb980f30a092f09a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/grapheme_cluster_break.rs @@ -0,0 +1,1420 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate grapheme-cluster-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("CR", CR), + ("Control", CONTROL), + ("Extend", EXTEND), + ("L", L), + ("LF", LF), + ("LV", LV), + ("LVT", LVT), + ("Prepend", PREPEND), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("SpacingMark", SPACINGMARK), + ("T", T), + ("V", V), + ("ZWJ", ZWJ), +]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const CONTROL: &'static [(char, char)] = &[ + ('\0', '\t'), + ('\u{b}', '\u{c}'), + ('\u{e}', '\u{1f}'), + ('\u{7f}', '\u{9f}'), + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200b}'), + ('\u{200e}', '\u{200f}'), + ('\u{2028}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff0}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0000}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{e0fff}'), +]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const L: &'static [(char, char)] = &[('ᄀ', 'ᅟ'), ('ꥠ', 'ꥼ')]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const LV: &'static [(char, char)] = &[ + ('가', '가'), + ('개', '개'), + ('갸', '갸'), + ('걔', '걔'), + ('거', '거'), + ('게', '게'), + ('겨', '겨'), + ('계', '계'), + ('고', '고'), + ('과', '과'), + ('괘', '괘'), + ('괴', '괴'), + ('교', '교'), + ('구', '구'), + ('궈', '궈'), + ('궤', '궤'), + ('귀', '귀'), + ('규', '규'), + ('그', '그'), + ('긔', '긔'), + ('기', '기'), + ('까', '까'), + ('깨', '깨'), + ('꺄', '꺄'), + ('꺠', '꺠'), + ('꺼', '꺼'), + ('께', '께'), + ('껴', '껴'), + ('꼐', '꼐'), + ('꼬', '꼬'), + ('꽈', '꽈'), + ('꽤', '꽤'), + ('꾀', '꾀'), + ('꾜', '꾜'), + ('꾸', '꾸'), + ('꿔', '꿔'), + ('꿰', '꿰'), + ('뀌', '뀌'), + ('뀨', '뀨'), + ('끄', '끄'), + ('끠', '끠'), + ('끼', '끼'), + ('나', '나'), + ('내', '내'), + ('냐', '냐'), + ('냬', '냬'), + ('너', '너'), + ('네', '네'), + ('녀', '녀'), + ('녜', '녜'), + ('노', '노'), + ('놔', '놔'), + ('놰', '놰'), + ('뇌', '뇌'), + ('뇨', '뇨'), + ('누', '누'), + ('눠', '눠'), + ('눼', '눼'), + ('뉘', '뉘'), + ('뉴', '뉴'), + ('느', '느'), + ('늬', '늬'), + ('니', '니'), + ('다', '다'), + ('대', '대'), + ('댜', '댜'), + ('댸', '댸'), + ('더', '더'), + ('데', '데'), + ('뎌', '뎌'), + ('뎨', '뎨'), + ('도', '도'), + ('돠', '돠'), + ('돼', '돼'), + ('되', '되'), + ('됴', '됴'), + ('두', '두'), + ('둬', '둬'), + ('뒈', '뒈'), + ('뒤', '뒤'), + ('듀', '듀'), + ('드', '드'), + ('듸', '듸'), + ('디', '디'), + ('따', '따'), + ('때', '때'), + ('땨', '땨'), + ('떄', '떄'), + ('떠', '떠'), + ('떼', '떼'), + ('뗘', '뗘'), + ('뗴', '뗴'), + ('또', '또'), + ('똬', '똬'), + ('뙈', '뙈'), + ('뙤', '뙤'), + ('뚀', '뚀'), + ('뚜', '뚜'), + ('뚸', '뚸'), + ('뛔', '뛔'), + ('뛰', '뛰'), + ('뜌', '뜌'), + ('뜨', '뜨'), + ('띄', '띄'), + ('띠', '띠'), + ('라', '라'), + ('래', '래'), + ('랴', '랴'), + ('럐', '럐'), + ('러', '러'), + ('레', '레'), + ('려', '려'), + ('례', '례'), + ('로', '로'), + ('롸', '롸'), + ('뢔', '뢔'), + ('뢰', '뢰'), + ('료', '료'), + ('루', '루'), + ('뤄', '뤄'), + ('뤠', '뤠'), + ('뤼', '뤼'), + ('류', '류'), + ('르', '르'), + ('릐', '릐'), + ('리', '리'), + ('마', '마'), + ('매', '매'), + ('먀', '먀'), + ('먜', '먜'), + ('머', '머'), + ('메', '메'), + ('며', '며'), + ('몌', '몌'), + ('모', '모'), + ('뫄', '뫄'), + ('뫠', '뫠'), + ('뫼', '뫼'), + ('묘', '묘'), + ('무', '무'), + ('뭐', '뭐'), + ('뭬', '뭬'), + ('뮈', '뮈'), + ('뮤', '뮤'), + ('므', '므'), + ('믜', '믜'), + ('미', '미'), + ('바', '바'), + ('배', '배'), + ('뱌', '뱌'), + ('뱨', '뱨'), + ('버', '버'), + ('베', '베'), + ('벼', '벼'), + ('볘', '볘'), + ('보', '보'), + ('봐', '봐'), + ('봬', '봬'), + ('뵈', '뵈'), + ('뵤', '뵤'), + ('부', '부'), + ('붜', '붜'), + ('붸', '붸'), + ('뷔', '뷔'), + ('뷰', '뷰'), + ('브', '브'), + ('븨', '븨'), + ('비', '비'), + ('빠', '빠'), + ('빼', '빼'), + ('뺘', '뺘'), + ('뺴', '뺴'), + ('뻐', '뻐'), + ('뻬', '뻬'), + ('뼈', '뼈'), + ('뼤', '뼤'), + ('뽀', '뽀'), + ('뽜', '뽜'), + ('뽸', '뽸'), + ('뾔', '뾔'), + ('뾰', '뾰'), + ('뿌', '뿌'), + ('뿨', '뿨'), + ('쀄', '쀄'), + ('쀠', '쀠'), + ('쀼', '쀼'), + ('쁘', '쁘'), + ('쁴', '쁴'), + ('삐', '삐'), + ('사', '사'), + ('새', '새'), + ('샤', '샤'), + ('섀', '섀'), + ('서', '서'), + ('세', '세'), + ('셔', '셔'), + ('셰', '셰'), + ('소', '소'), + ('솨', '솨'), + ('쇄', '쇄'), + ('쇠', '쇠'), + ('쇼', '쇼'), + ('수', '수'), + ('숴', '숴'), + ('쉐', '쉐'), + ('쉬', '쉬'), + ('슈', '슈'), + ('스', '스'), + ('싀', '싀'), + ('시', '시'), + ('싸', '싸'), + ('쌔', '쌔'), + ('쌰', '쌰'), + ('썌', '썌'), + ('써', '써'), + ('쎄', '쎄'), + ('쎠', '쎠'), + ('쎼', '쎼'), + ('쏘', '쏘'), + ('쏴', '쏴'), + ('쐐', '쐐'), + ('쐬', '쐬'), + ('쑈', '쑈'), + ('쑤', '쑤'), + ('쒀', '쒀'), + ('쒜', '쒜'), + ('쒸', '쒸'), + ('쓔', '쓔'), + ('쓰', '쓰'), + ('씌', '씌'), + ('씨', '씨'), + ('아', '아'), + ('애', '애'), + ('야', '야'), + ('얘', '얘'), + ('어', '어'), + ('에', '에'), + ('여', '여'), + ('예', '예'), + ('오', '오'), + ('와', '와'), + ('왜', '왜'), + ('외', '외'), + ('요', '요'), + ('우', '우'), + ('워', '워'), + ('웨', '웨'), + ('위', '위'), + ('유', '유'), + ('으', '으'), + ('의', '의'), + ('이', '이'), + ('자', '자'), + ('재', '재'), + ('쟈', '쟈'), + ('쟤', '쟤'), + ('저', '저'), + ('제', '제'), + ('져', '져'), + ('졔', '졔'), + ('조', '조'), + ('좌', '좌'), + ('좨', '좨'), + ('죄', '죄'), + ('죠', '죠'), + ('주', '주'), + ('줘', '줘'), + ('줴', '줴'), + ('쥐', '쥐'), + ('쥬', '쥬'), + ('즈', '즈'), + ('즤', '즤'), + ('지', '지'), + ('짜', '짜'), + ('째', '째'), + ('쨔', '쨔'), + ('쨰', '쨰'), + ('쩌', '쩌'), + ('쩨', '쩨'), + ('쪄', '쪄'), + ('쪠', '쪠'), + ('쪼', '쪼'), + ('쫘', '쫘'), + ('쫴', '쫴'), + ('쬐', '쬐'), + ('쬬', '쬬'), + ('쭈', '쭈'), + ('쭤', '쭤'), + ('쮀', '쮀'), + ('쮜', '쮜'), + ('쮸', '쮸'), + ('쯔', '쯔'), + ('쯰', '쯰'), + ('찌', '찌'), + ('차', '차'), + ('채', '채'), + ('챠', '챠'), + ('챼', '챼'), + ('처', '처'), + ('체', '체'), + ('쳐', '쳐'), + ('쳬', '쳬'), + ('초', '초'), + ('촤', '촤'), + ('쵀', '쵀'), + ('최', '최'), + ('쵸', '쵸'), + ('추', '추'), + ('춰', '춰'), + ('췌', '췌'), + ('취', '취'), + ('츄', '츄'), + ('츠', '츠'), + ('츼', '츼'), + ('치', '치'), + ('카', '카'), + ('캐', '캐'), + ('캬', '캬'), + ('컈', '컈'), + ('커', '커'), + ('케', '케'), + ('켜', '켜'), + ('켸', '켸'), + ('코', '코'), + ('콰', '콰'), + ('쾌', '쾌'), + ('쾨', '쾨'), + ('쿄', '쿄'), + ('쿠', '쿠'), + ('쿼', '쿼'), + ('퀘', '퀘'), + ('퀴', '퀴'), + ('큐', '큐'), + ('크', '크'), + ('킈', '킈'), + ('키', '키'), + ('타', '타'), + ('태', '태'), + ('탸', '탸'), + ('턔', '턔'), + ('터', '터'), + ('테', '테'), + ('텨', '텨'), + ('톄', '톄'), + ('토', '토'), + ('톼', '톼'), + ('퇘', '퇘'), + ('퇴', '퇴'), + ('툐', '툐'), + ('투', '투'), + ('퉈', '퉈'), + ('퉤', '퉤'), + ('튀', '튀'), + ('튜', '튜'), + ('트', '트'), + ('틔', '틔'), + ('티', '티'), + ('파', '파'), + ('패', '패'), + ('퍄', '퍄'), + ('퍠', '퍠'), + ('퍼', '퍼'), + ('페', '페'), + ('펴', '펴'), + ('폐', '폐'), + ('포', '포'), + ('퐈', '퐈'), + ('퐤', '퐤'), + ('푀', '푀'), + ('표', '표'), + ('푸', '푸'), + ('풔', '풔'), + ('풰', '풰'), + ('퓌', '퓌'), + ('퓨', '퓨'), + ('프', '프'), + ('픠', '픠'), + ('피', '피'), + ('하', '하'), + ('해', '해'), + ('햐', '햐'), + ('햬', '햬'), + ('허', '허'), + ('헤', '헤'), + ('혀', '혀'), + ('혜', '혜'), + ('호', '호'), + ('화', '화'), + ('홰', '홰'), + ('회', '회'), + ('효', '효'), + ('후', '후'), + ('훠', '훠'), + ('훼', '훼'), + ('휘', '휘'), + ('휴', '휴'), + ('흐', '흐'), + ('희', '희'), + ('히', '히'), +]; + +pub const LVT: &'static [(char, char)] = &[ + ('각', '갛'), + ('객', '갷'), + ('갹', '걓'), + ('걕', '걯'), + ('걱', '겋'), + ('겍', '겧'), + ('격', '곃'), + ('곅', '곟'), + ('곡', '곻'), + ('곽', '괗'), + ('괙', '괳'), + ('괵', '굏'), + ('굑', '굫'), + ('국', '궇'), + ('궉', '궣'), + ('궥', '궿'), + ('귁', '귛'), + ('귝', '귷'), + ('극', '긓'), + ('긕', '긯'), + ('긱', '깋'), + ('깍', '깧'), + ('깩', '꺃'), + ('꺅', '꺟'), + ('꺡', '꺻'), + ('꺽', '껗'), + ('껙', '껳'), + ('껵', '꼏'), + ('꼑', '꼫'), + ('꼭', '꽇'), + ('꽉', '꽣'), + ('꽥', '꽿'), + ('꾁', '꾛'), + ('꾝', '꾷'), + ('꾹', '꿓'), + ('꿕', '꿯'), + ('꿱', '뀋'), + ('뀍', '뀧'), + ('뀩', '끃'), + ('끅', '끟'), + ('끡', '끻'), + ('끽', '낗'), + ('낙', '낳'), + ('낵', '냏'), + ('냑', '냫'), + ('냭', '넇'), + ('넉', '넣'), + ('넥', '넿'), + ('녁', '녛'), + ('녝', '녷'), + ('녹', '놓'), + ('놕', '놯'), + ('놱', '뇋'), + ('뇍', '뇧'), + ('뇩', '눃'), + ('눅', '눟'), + ('눡', '눻'), + ('눽', '뉗'), + ('뉙', '뉳'), + ('뉵', '늏'), + ('늑', '늫'), + ('늭', '닇'), + ('닉', '닣'), + ('닥', '닿'), + ('댁', '댛'), + ('댝', '댷'), + ('댹', '덓'), + ('덕', '덯'), + ('덱', '뎋'), + ('뎍', '뎧'), + ('뎩', '돃'), + ('독', '돟'), + ('돡', '돻'), + ('돽', '됗'), + ('됙', '됳'), + ('됵', '둏'), + ('둑', '둫'), + ('둭', '뒇'), + ('뒉', '뒣'), + ('뒥', '뒿'), + ('듁', '듛'), + ('득', '듷'), + ('듹', '딓'), + ('딕', '딯'), + ('딱', '땋'), + ('땍', '땧'), + ('땩', '떃'), + ('떅', '떟'), + ('떡', '떻'), + ('떽', '뗗'), + ('뗙', '뗳'), + ('뗵', '똏'), + ('똑', '똫'), + ('똭', '뙇'), + ('뙉', '뙣'), + ('뙥', '뙿'), + ('뚁', '뚛'), + ('뚝', '뚷'), + ('뚹', '뛓'), + ('뛕', '뛯'), + ('뛱', '뜋'), + ('뜍', '뜧'), + ('뜩', '띃'), + ('띅', '띟'), + ('띡', '띻'), + ('락', '랗'), + ('랙', '랳'), + ('략', '럏'), + ('럑', '럫'), + ('럭', '렇'), + ('렉', '렣'), + ('력', '렿'), + ('롁', '롛'), + ('록', '롷'), + ('롹', '뢓'), + ('뢕', '뢯'), + ('뢱', '룋'), + ('룍', '룧'), + ('룩', '뤃'), + ('뤅', '뤟'), + ('뤡', '뤻'), + ('뤽', '륗'), + ('륙', '륳'), + ('륵', '릏'), + ('릑', '릫'), + ('릭', '맇'), + ('막', '맣'), + ('맥', '맿'), + ('먁', '먛'), + ('먝', '먷'), + ('먹', '멓'), + ('멕', '멯'), + ('멱', '몋'), + ('몍', '몧'), + ('목', '뫃'), + ('뫅', '뫟'), + ('뫡', '뫻'), + ('뫽', '묗'), + ('묙', '묳'), + ('묵', '뭏'), + ('뭑', '뭫'), + ('뭭', '뮇'), + ('뮉', '뮣'), + ('뮥', '뮿'), + ('믁', '믛'), + ('믝', '믷'), + ('믹', '밓'), + ('박', '밯'), + ('백', '뱋'), + ('뱍', '뱧'), + ('뱩', '벃'), + ('벅', '벟'), + ('벡', '벻'), + ('벽', '볗'), + ('볙', '볳'), + ('복', '봏'), + ('봑', '봫'), + ('봭', '뵇'), + ('뵉', '뵣'), + ('뵥', '뵿'), + ('북', '붛'), + ('붝', '붷'), + ('붹', '뷓'), + ('뷕', '뷯'), + ('뷱', '븋'), + ('븍', '븧'), + ('븩', '빃'), + ('빅', '빟'), + ('빡', '빻'), + ('빽', '뺗'), + ('뺙', '뺳'), + ('뺵', '뻏'), + ('뻑', '뻫'), + ('뻭', '뼇'), + ('뼉', '뼣'), + ('뼥', '뼿'), + ('뽁', '뽛'), + ('뽝', '뽷'), + ('뽹', '뾓'), + ('뾕', '뾯'), + ('뾱', '뿋'), + ('뿍', '뿧'), + ('뿩', '쀃'), + ('쀅', '쀟'), + ('쀡', '쀻'), + ('쀽', '쁗'), + ('쁙', '쁳'), + ('쁵', '삏'), + ('삑', '삫'), + ('삭', '샇'), + ('색', '샣'), + ('샥', '샿'), + ('섁', '섛'), + ('석', '섷'), + ('섹', '셓'), + ('셕', '셯'), + ('셱', '솋'), + ('속', '솧'), + ('솩', '쇃'), + ('쇅', '쇟'), + ('쇡', '쇻'), + ('쇽', '숗'), + ('숙', '숳'), + ('숵', '쉏'), + ('쉑', '쉫'), + ('쉭', '슇'), + ('슉', '슣'), + ('슥', '슿'), + ('싁', '싛'), + ('식', '싷'), + ('싹', '쌓'), + ('쌕', '쌯'), + ('쌱', '썋'), + ('썍', '썧'), + ('썩', '쎃'), + ('쎅', '쎟'), + ('쎡', '쎻'), + ('쎽', '쏗'), + ('쏙', '쏳'), + ('쏵', '쐏'), + ('쐑', '쐫'), + ('쐭', '쑇'), + ('쑉', '쑣'), + ('쑥', '쑿'), + ('쒁', '쒛'), + ('쒝', '쒷'), + ('쒹', '쓓'), + ('쓕', '쓯'), + ('쓱', '씋'), + ('씍', '씧'), + ('씩', '앃'), + ('악', '앟'), + ('액', '앻'), + ('약', '얗'), + ('얙', '얳'), + ('억', '엏'), + ('엑', '엫'), + ('역', '옇'), + ('옉', '옣'), + ('옥', '옿'), + ('왁', '왛'), + ('왝', '왷'), + ('왹', '욓'), + ('욕', '욯'), + ('욱', '웋'), + ('웍', '웧'), + ('웩', '윃'), + ('윅', '윟'), + ('육', '윻'), + ('윽', '읗'), + ('읙', '읳'), + ('익', '잏'), + ('작', '잫'), + ('잭', '쟇'), + ('쟉', '쟣'), + ('쟥', '쟿'), + ('적', '젛'), + ('젝', '젷'), + ('젹', '졓'), + ('졕', '졯'), + ('족', '좋'), + ('좍', '좧'), + ('좩', '죃'), + ('죅', '죟'), + ('죡', '죻'), + ('죽', '줗'), + ('줙', '줳'), + ('줵', '쥏'), + ('쥑', '쥫'), + ('쥭', '즇'), + ('즉', '즣'), + ('즥', '즿'), + ('직', '짛'), + ('짝', '짷'), + ('짹', '쨓'), + ('쨕', '쨯'), + ('쨱', '쩋'), + ('쩍', '쩧'), + ('쩩', '쪃'), + ('쪅', '쪟'), + ('쪡', '쪻'), + ('쪽', '쫗'), + ('쫙', '쫳'), + ('쫵', '쬏'), + ('쬑', '쬫'), + ('쬭', '쭇'), + ('쭉', '쭣'), + ('쭥', '쭿'), + ('쮁', '쮛'), + ('쮝', '쮷'), + ('쮹', '쯓'), + ('쯕', '쯯'), + ('쯱', '찋'), + ('찍', '찧'), + ('착', '챃'), + ('책', '챟'), + ('챡', '챻'), + ('챽', '첗'), + ('척', '첳'), + ('첵', '쳏'), + ('쳑', '쳫'), + ('쳭', '촇'), + ('촉', '촣'), + ('촥', '촿'), + ('쵁', '쵛'), + ('쵝', '쵷'), + ('쵹', '춓'), + ('축', '춯'), + ('춱', '췋'), + ('췍', '췧'), + ('췩', '츃'), + ('츅', '츟'), + ('측', '츻'), + ('츽', '칗'), + ('칙', '칳'), + ('칵', '캏'), + ('캑', '캫'), + ('캭', '컇'), + ('컉', '컣'), + ('컥', '컿'), + ('켁', '켛'), + ('켝', '켷'), + ('켹', '콓'), + ('콕', '콯'), + ('콱', '쾋'), + ('쾍', '쾧'), + ('쾩', '쿃'), + ('쿅', '쿟'), + ('쿡', '쿻'), + ('쿽', '퀗'), + ('퀙', '퀳'), + ('퀵', '큏'), + ('큑', '큫'), + ('큭', '킇'), + ('킉', '킣'), + ('킥', '킿'), + ('탁', '탛'), + ('택', '탷'), + ('탹', '턓'), + ('턕', '턯'), + ('턱', '텋'), + ('텍', '텧'), + ('텩', '톃'), + ('톅', '톟'), + ('톡', '톻'), + ('톽', '퇗'), + ('퇙', '퇳'), + ('퇵', '툏'), + ('툑', '툫'), + ('툭', '퉇'), + ('퉉', '퉣'), + ('퉥', '퉿'), + ('튁', '튛'), + ('튝', '튷'), + ('특', '틓'), + ('틕', '틯'), + ('틱', '팋'), + ('팍', '팧'), + ('팩', '퍃'), + ('퍅', '퍟'), + ('퍡', '퍻'), + ('퍽', '펗'), + ('펙', '펳'), + ('펵', '폏'), + ('폑', '폫'), + ('폭', '퐇'), + ('퐉', '퐣'), + ('퐥', '퐿'), + ('푁', '푛'), + ('푝', '푷'), + ('푹', '풓'), + ('풕', '풯'), + ('풱', '퓋'), + ('퓍', '퓧'), + ('퓩', '픃'), + ('픅', '픟'), + ('픡', '픻'), + ('픽', '핗'), + ('학', '핳'), + ('핵', '햏'), + ('햑', '햫'), + ('햭', '헇'), + ('헉', '헣'), + ('헥', '헿'), + ('혁', '혛'), + ('혝', '혷'), + ('혹', '홓'), + ('확', '홯'), + ('홱', '횋'), + ('획', '횧'), + ('횩', '훃'), + ('훅', '훟'), + ('훡', '훻'), + ('훽', '휗'), + ('휙', '휳'), + ('휵', '흏'), + ('흑', '흫'), + ('흭', '힇'), + ('힉', '힣'), +]; + +pub const PREPEND: &'static [(char, char)] = &[ + ('\u{600}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('ൎ', 'ൎ'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑇂', '𑇃'), + ('𑏑', '𑏑'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑨺', '𑨺'), + ('𑪄', '𑪉'), + ('𑵆', '𑵆'), + ('𑼂', '𑼂'), +]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SPACINGMARK: &'static [(char, char)] = &[ + ('ः', 'ः'), + ('ऻ', 'ऻ'), + ('ा', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॏ'), + ('ং', 'ঃ'), + ('ি', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ਃ', 'ਃ'), + ('ਾ', 'ੀ'), + ('ઃ', 'ઃ'), + ('ા', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ଂ', 'ଃ'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('ி', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ఁ', 'ః'), + ('ు', 'ౄ'), + ('ಂ', 'ಃ'), + ('ಾ', 'ಾ'), + ('ು', 'ು'), + ('ೃ', 'ೄ'), + ('ೳ', 'ೳ'), + ('ം', 'ഃ'), + ('ി', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ං', 'ඃ'), + ('ැ', 'ෑ'), + ('ෘ', 'ෞ'), + ('ෲ', 'ෳ'), + ('ำ', 'ำ'), + ('ຳ', 'ຳ'), + ('༾', '༿'), + ('ཿ', 'ཿ'), + ('ေ', 'ေ'), + ('ျ', 'ြ'), + ('ၖ', 'ၗ'), + ('ႄ', 'ႄ'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('ᨙ', 'ᨚ'), + ('ᩕ', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩭ', 'ᩲ'), + ('ᬄ', 'ᬄ'), + ('ᬾ', 'ᭁ'), + ('ᮂ', 'ᮂ'), + ('ᮡ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('ᰤ', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᳡', '᳡'), + ('᳷', '᳷'), + ('ꠣ', 'ꠤ'), + ('ꠧ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('ꥒ', 'ꥒ'), + ('ꦃ', 'ꦃ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', 'ꦿ'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩍ', 'ꩍ'), + ('ꫫ', 'ꫫ'), + ('ꫮ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', 'ꯪ'), + ('꯬', '꯬'), + ('𑀀', '𑀀'), + ('𑀂', '𑀂'), + ('𑂂', '𑂂'), + ('𑂰', '𑂲'), + ('𑂷', '𑂸'), + ('𑄬', '𑄬'), + ('𑅅', '𑅆'), + ('𑆂', '𑆂'), + ('𑆳', '𑆵'), + ('𑆿', '𑆿'), + ('𑇎', '𑇎'), + ('𑈬', '𑈮'), + ('𑈲', '𑈳'), + ('𑋠', '𑋢'), + ('𑌂', '𑌃'), + ('𑌿', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍢', '𑍣'), + ('𑎹', '𑎺'), + ('𑏊', '𑏊'), + ('𑏌', '𑏍'), + ('𑐵', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('𑒱', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒼'), + ('𑒾', '𑒾'), + ('𑓁', '𑓁'), + ('𑖰', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑘰', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('𑜞', '𑜞'), + ('𑜦', '𑜦'), + ('𑠬', '𑠮'), + ('𑠸', '𑠸'), + ('𑤱', '𑤵'), + ('𑤷', '𑤸'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '𑧓'), + ('𑧜', '𑧟'), + ('𑧤', '𑧤'), + ('𑨹', '𑨹'), + ('𑩗', '𑩘'), + ('𑪗', '𑪗'), + ('𑰯', '𑰯'), + ('𑰾', '𑰾'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑶊', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑻵', '𑻶'), + ('𑼃', '𑼃'), + ('𑼴', '𑼵'), + ('𑼾', '𑼿'), + ('𖄪', '𖄬'), + ('𖽑', '𖾇'), +]; + +pub const T: &'static [(char, char)] = &[('ᆨ', 'ᇿ'), ('ퟋ', 'ퟻ')]; + +pub const V: &'static [(char, char)] = + &[('ᅠ', 'ᆧ'), ('ힰ', 'ퟆ'), ('𖵣', '𖵣'), ('𖵧', '𖵪')]; + +pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..20736c7ac813e4f40f726260232570c1216c2767 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/mod.rs @@ -0,0 +1,57 @@ +#[cfg(feature = "unicode-age")] +pub mod age; + +#[cfg(feature = "unicode-case")] +pub mod case_folding_simple; + +#[cfg(feature = "unicode-gencat")] +pub mod general_category; + +#[cfg(feature = "unicode-segment")] +pub mod grapheme_cluster_break; + +#[cfg(all(feature = "unicode-perl", not(feature = "unicode-gencat")))] +#[allow(dead_code)] +pub mod perl_decimal; + +#[cfg(all(feature = "unicode-perl", not(feature = "unicode-bool")))] +#[allow(dead_code)] +pub mod perl_space; + +#[cfg(feature = "unicode-perl")] +pub mod perl_word; + +#[cfg(feature = "unicode-bool")] +pub mod property_bool; + +#[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", +))] +pub mod property_names; + +#[cfg(any( + feature = "unicode-age", + feature = "unicode-bool", + feature = "unicode-gencat", + feature = "unicode-perl", + feature = "unicode-script", + feature = "unicode-segment", +))] +pub mod property_values; + +#[cfg(feature = "unicode-script")] +pub mod script; + +#[cfg(feature = "unicode-script")] +pub mod script_extension; + +#[cfg(feature = "unicode-segment")] +pub mod sentence_break; + +#[cfg(feature = "unicode-segment")] +pub mod word_break; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_decimal.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_decimal.rs new file mode 100644 index 0000000000000000000000000000000000000000..18996c2bfcb0f42a105d29fdc6ddcc618135b4e0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_decimal.rs @@ -0,0 +1,84 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate general-category ucd-16.0.0 --chars --include decimalnumber +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = + &[("Decimal_Number", DECIMAL_NUMBER)]; + +pub const DECIMAL_NUMBER: &'static [(char, char)] = &[ + ('0', '9'), + ('٠', '٩'), + ('۰', '۹'), + ('߀', '߉'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧙'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_space.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_space.rs new file mode 100644 index 0000000000000000000000000000000000000000..c969e3733add9a96d7f0c6d0cb5ac54eb10af7e8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_space.rs @@ -0,0 +1,23 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool ucd-16.0.0 --chars --include whitespace +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = + &[("White_Space", WHITE_SPACE)]; + +pub const WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_word.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_word.rs new file mode 100644 index 0000000000000000000000000000000000000000..21c8c0f9c839c83eab4ec38fa0fe62b06b4336f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/perl_word.rs @@ -0,0 +1,806 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate perl-word ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PERL_WORD: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧙'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', '\u{309a}'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_bool.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_bool.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d62edc42317baa4a7ca60fbb0c06dca14a8734c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_bool.rs @@ -0,0 +1,12095 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-bool ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ASCII_Hex_Digit", ASCII_HEX_DIGIT), + ("Alphabetic", ALPHABETIC), + ("Bidi_Control", BIDI_CONTROL), + ("Bidi_Mirrored", BIDI_MIRRORED), + ("Case_Ignorable", CASE_IGNORABLE), + ("Cased", CASED), + ("Changes_When_Casefolded", CHANGES_WHEN_CASEFOLDED), + ("Changes_When_Casemapped", CHANGES_WHEN_CASEMAPPED), + ("Changes_When_Lowercased", CHANGES_WHEN_LOWERCASED), + ("Changes_When_Titlecased", CHANGES_WHEN_TITLECASED), + ("Changes_When_Uppercased", CHANGES_WHEN_UPPERCASED), + ("Dash", DASH), + ("Default_Ignorable_Code_Point", DEFAULT_IGNORABLE_CODE_POINT), + ("Deprecated", DEPRECATED), + ("Diacritic", DIACRITIC), + ("Emoji", EMOJI), + ("Emoji_Component", EMOJI_COMPONENT), + ("Emoji_Modifier", EMOJI_MODIFIER), + ("Emoji_Modifier_Base", EMOJI_MODIFIER_BASE), + ("Emoji_Presentation", EMOJI_PRESENTATION), + ("Extended_Pictographic", EXTENDED_PICTOGRAPHIC), + ("Extender", EXTENDER), + ("Grapheme_Base", GRAPHEME_BASE), + ("Grapheme_Extend", GRAPHEME_EXTEND), + ("Grapheme_Link", GRAPHEME_LINK), + ("Hex_Digit", HEX_DIGIT), + ("Hyphen", HYPHEN), + ("IDS_Binary_Operator", IDS_BINARY_OPERATOR), + ("IDS_Trinary_Operator", IDS_TRINARY_OPERATOR), + ("IDS_Unary_Operator", IDS_UNARY_OPERATOR), + ("ID_Compat_Math_Continue", ID_COMPAT_MATH_CONTINUE), + ("ID_Compat_Math_Start", ID_COMPAT_MATH_START), + ("ID_Continue", ID_CONTINUE), + ("ID_Start", ID_START), + ("Ideographic", IDEOGRAPHIC), + ("InCB", INCB), + ("Join_Control", JOIN_CONTROL), + ("Logical_Order_Exception", LOGICAL_ORDER_EXCEPTION), + ("Lowercase", LOWERCASE), + ("Math", MATH), + ("Modifier_Combining_Mark", MODIFIER_COMBINING_MARK), + ("Noncharacter_Code_Point", NONCHARACTER_CODE_POINT), + ("Other_Alphabetic", OTHER_ALPHABETIC), + ("Other_Default_Ignorable_Code_Point", OTHER_DEFAULT_IGNORABLE_CODE_POINT), + ("Other_Grapheme_Extend", OTHER_GRAPHEME_EXTEND), + ("Other_ID_Continue", OTHER_ID_CONTINUE), + ("Other_ID_Start", OTHER_ID_START), + ("Other_Lowercase", OTHER_LOWERCASE), + ("Other_Math", OTHER_MATH), + ("Other_Uppercase", OTHER_UPPERCASE), + ("Pattern_Syntax", PATTERN_SYNTAX), + ("Pattern_White_Space", PATTERN_WHITE_SPACE), + ("Prepended_Concatenation_Mark", PREPENDED_CONCATENATION_MARK), + ("Quotation_Mark", QUOTATION_MARK), + ("Radical", RADICAL), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("Sentence_Terminal", SENTENCE_TERMINAL), + ("Soft_Dotted", SOFT_DOTTED), + ("Terminal_Punctuation", TERMINAL_PUNCTUATION), + ("Unified_Ideograph", UNIFIED_IDEOGRAPH), + ("Uppercase", UPPERCASE), + ("Variation_Selector", VARIATION_SELECTOR), + ("White_Space", WHITE_SPACE), + ("XID_Continue", XID_CONTINUE), + ("XID_Start", XID_START), +]; + +pub const ASCII_HEX_DIGIT: &'static [(char, char)] = + &[('0', '9'), ('A', 'F'), ('a', 'f')]; + +pub const ALPHABETIC: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{345}', '\u{345}'), + ('\u{363}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{5b0}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '\u{657}'), + ('\u{659}', '\u{65f}'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6e1}', '\u{6e8}'), + ('\u{6ed}', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{73f}'), + ('ݍ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', '\u{817}'), + ('ࠚ', '\u{82c}'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{897}'), + ('ࢠ', 'ࣉ'), + ('\u{8d4}', '\u{8df}'), + ('\u{8e3}', '\u{8e9}'), + ('\u{8f0}', 'ऻ'), + ('ऽ', 'ौ'), + ('ॎ', 'ॐ'), + ('\u{955}', '\u{963}'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ৎ', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4c}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('\u{a70}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', 'ૌ'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('ૹ', '\u{afc}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b56}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4c}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccc}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ൎ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', 'ๆ'), + ('\u{e4d}', '\u{e4d}'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', '\u{eb9}'), + ('\u{ebb}', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ecd}', '\u{ecd}'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f83}'), + ('ྈ', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('က', '\u{1036}'), + ('း', 'း'), + ('ျ', 'ဿ'), + ('ၐ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1713}'), + ('ᜟ', '\u{1733}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', 'ឳ'), + ('ា', 'ៈ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', 'ᤸ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('ᩡ', '\u{1a74}'), + ('ᪧ', 'ᪧ'), + ('\u{1abf}', '\u{1ac0}'), + ('\u{1acc}', '\u{1ace}'), + ('\u{1b00}', 'ᬳ'), + ('\u{1b35}', '\u{1b43}'), + ('ᭅ', 'ᭌ'), + ('\u{1b80}', '\u{1ba9}'), + ('\u{1bac}', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᯧ', '\u{1bf1}'), + ('ᰀ', '\u{1c36}'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('\u{1dd3}', '\u{1df4}'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('\u{a674}', '\u{a67b}'), + ('ꙿ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠅ'), + ('ꠇ', 'ꠧ'), + ('ꡀ', 'ꡳ'), + ('ꢀ', 'ꣃ'), + ('\u{a8c5}', '\u{a8c5}'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a8ff}'), + ('ꤊ', '\u{a92a}'), + ('ꤰ', 'ꥒ'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', 'ꦲ'), + ('ꦴ', 'ꦿ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', '\u{aabe}'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', 'ꫵ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐵊', '𐵥'), + ('\u{10d69}', '\u{10d69}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10efc}'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11045}'), + ('𑁱', '𑁵'), + ('\u{11080}', '𑂸'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('\u{11100}', '\u{11132}'), + ('𑅄', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑆿'), + ('𑇁', '𑇄'), + ('𑇎', '\u{111cf}'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11234}'), + ('\u{11237}', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112e8}'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑑁'), + ('\u{11443}', '𑑅'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑓁'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '\u{115b5}'), + ('𑖸', '𑖾'), + ('𑗘', '\u{115dd}'), + ('𑘀', '𑘾'), + ('\u{11640}', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑚀', '\u{116b5}'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172a}'), + ('𑝀', '𑝆'), + ('𑠀', '𑠸'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193c}'), + ('𑤿', '𑥂'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧟'), + ('𑧡', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '𑨲'), + ('\u{11a35}', '\u{11a3e}'), + ('𑩐', '𑪗'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑰾'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d41}'), + ('\u{11d43}', '\u{11d43}'), + ('𑵆', '\u{11d47}'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶖'), + ('𑶘', '𑶘'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f40}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '\u{1612e}'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9e}', '\u{1bc9e}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('\u{1e947}', '\u{1e947}'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const BIDI_CONTROL: &'static [(char, char)] = &[ + ('\u{61c}', '\u{61c}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2066}', '\u{2069}'), +]; + +pub const BIDI_MIRRORED: &'static [(char, char)] = &[ + ('(', ')'), + ('<', '<'), + ('>', '>'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('«', '«'), + ('»', '»'), + ('༺', '༽'), + ('᚛', '᚜'), + ('‹', '›'), + ('⁅', '⁆'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⅀', '⅀'), + ('∁', '∄'), + ('∈', '∍'), + ('∑', '∑'), + ('∕', '∖'), + ('√', '∝'), + ('∟', '∢'), + ('∤', '∤'), + ('∦', '∦'), + ('∫', '∳'), + ('∹', '∹'), + ('∻', '≌'), + ('≒', '≕'), + ('≟', '≠'), + ('≢', '≢'), + ('≤', '≫'), + ('≭', '⊌'), + ('⊏', '⊒'), + ('⊘', '⊘'), + ('⊢', '⊣'), + ('⊦', '⊸'), + ('⊾', '⊿'), + ('⋉', '⋍'), + ('⋐', '⋑'), + ('⋖', '⋭'), + ('⋰', '⋿'), + ('⌈', '⌋'), + ('⌠', '⌡'), + ('〈', '〉'), + ('❨', '❵'), + ('⟀', '⟀'), + ('⟃', '⟆'), + ('⟈', '⟉'), + ('⟋', '⟍'), + ('⟓', '⟖'), + ('⟜', '⟞'), + ('⟢', '⟯'), + ('⦃', '⦘'), + ('⦛', '⦠'), + ('⦢', '⦯'), + ('⦸', '⦸'), + ('⧀', '⧅'), + ('⧉', '⧉'), + ('⧎', '⧒'), + ('⧔', '⧕'), + ('⧘', '⧜'), + ('⧡', '⧡'), + ('⧣', '⧥'), + ('⧨', '⧩'), + ('⧴', '⧹'), + ('⧼', '⧽'), + ('⨊', '⨜'), + ('⨞', '⨡'), + ('⨤', '⨤'), + ('⨦', '⨦'), + ('⨩', '⨩'), + ('⨫', '⨮'), + ('⨴', '⨵'), + ('⨼', '⨾'), + ('⩗', '⩘'), + ('⩤', '⩥'), + ('⩪', '⩭'), + ('⩯', '⩰'), + ('⩳', '⩴'), + ('⩹', '⪣'), + ('⪦', '⪭'), + ('⪯', '⫖'), + ('⫝̸', '⫝̸'), + ('⫞', '⫞'), + ('⫢', '⫦'), + ('⫬', '⫮'), + ('⫳', '⫳'), + ('⫷', '⫻'), + ('⫽', '⫽'), + ('⯾', '⯾'), + ('⸂', '⸅'), + ('⸉', '⸊'), + ('⸌', '⸍'), + ('⸜', '⸝'), + ('⸠', '⸩'), + ('⹕', '⹜'), + ('〈', '】'), + ('〔', '〛'), + ('﹙', '﹞'), + ('﹤', '﹥'), + ('(', ')'), + ('<', '<'), + ('>', '>'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('⦅', '⦆'), + ('「', '」'), + ('𝛛', '𝛛'), + ('𝜕', '𝜕'), + ('𝝏', '𝝏'), + ('𝞉', '𝞉'), + ('𝟃', '𝟃'), +]; + +pub const CASE_IGNORABLE: &'static [(char, char)] = &[ + ('\'', '\''), + ('.', '.'), + (':', ':'), + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('\u{ad}', '\u{ad}'), + ('¯', '¯'), + ('´', '´'), + ('·', '¸'), + ('ʰ', '\u{36f}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + ('΄', '΅'), + ('·', '·'), + ('\u{483}', '\u{489}'), + ('ՙ', 'ՙ'), + ('՟', '՟'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('״', '״'), + ('\u{600}', '\u{605}'), + ('\u{610}', '\u{61a}'), + ('\u{61c}', '\u{61c}'), + ('ـ', 'ـ'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dd}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{70f}', '\u{70f}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('࢈', '࢈'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{89f}'), + ('ࣉ', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('ॱ', 'ॱ'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3f}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b56}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cbf}'), + ('\u{cc6}', '\u{cc6}'), + ('\u{ccc}', '\u{ccd}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('ๆ', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('ჼ', 'ჼ'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1714}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180f}'), + ('ᡃ', 'ᡃ'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b36}', '\u{1b3a}'), + ('\u{1b3c}', '\u{1b3c}'), + ('\u{1b42}', '\u{1b42}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1ba9}'), + ('\u{1bab}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf1}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('ᱸ', 'ᱽ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', '\u{1dff}'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('\u{200b}', '\u{200f}'), + ('‘', '’'), + ('․', '․'), + ('‧', '‧'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20f0}'), + ('ⱼ', 'ⱽ'), + ('\u{2cef}', '\u{2cf1}'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('\u{302a}', '\u{302d}'), + ('〱', '〵'), + ('〻', '〻'), + ('\u{3099}', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꓸ', 'ꓽ'), + ('ꘌ', 'ꘌ'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', 'ꙿ'), + ('ꚜ', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('꜀', '꜡'), + ('ꝰ', 'ꝰ'), + ('ꞈ', '꞊'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('ꧏ', 'ꧏ'), + ('\u{a9e5}', 'ꧦ'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('ꩰ', 'ꩰ'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫝ', 'ꫝ'), + ('\u{aaec}', '\u{aaed}'), + ('ꫳ', 'ꫴ'), + ('\u{aaf6}', '\u{aaf6}'), + ('꭛', 'ꭟ'), + ('ꭩ', '꭫'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('﮲', '﯂'), + ('\u{fe00}', '\u{fe0f}'), + ('︓', '︓'), + ('\u{fe20}', '\u{fe2f}'), + ('﹒', '﹒'), + ('﹕', '﹕'), + ('\u{feff}', '\u{feff}'), + (''', '''), + ('.', '.'), + (':', ':'), + ('^', '^'), + ('`', '`'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + (' ̄', ' ̄'), + ('\u{fff9}', '\u{fffb}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('𐵎', '𐵎'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐵯'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{110cd}', '\u{110cd}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11234}'), + ('\u{11236}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{11340}', '\u{11340}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113ce}', '\u{113ce}'), + ('\u{113d0}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b5}'), + ('\u{116b7}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193b}', '\u{1193c}'), + ('\u{1193e}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f40}'), + ('\u{11f42}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13430}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖵀', '𖵂'), + ('𖵫', '𖵬'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d173}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '𞄽'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('𞓫', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '𞥋'), + ('🏻', '🏿'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const CASED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ƺ'), + ('Ƽ', 'ƿ'), + ('DŽ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՠ', 'ֈ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ⅿ'), + ('Ↄ', 'ↄ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚝ'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꞎ'), + ('Ꞑ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤀', '𞥃'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const CHANGES_WHEN_CASEFOLDED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ß'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('ʼn', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('ſ', 'ſ'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('ς', 'ς'), + ('Ϗ', 'ϑ'), + ('ϕ', 'ϖ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϰ', 'ϱ'), + ('ϴ', 'ϵ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('և', 'և'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẚ', 'ẛ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾀ', 'ᾯ'), + ('ᾲ', 'ᾴ'), + ('ᾷ', 'ᾼ'), + ('ῂ', 'ῄ'), + ('ῇ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῷ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𞤀', '𞤡'), +]; + +pub const CHANGES_WHEN_CASEMAPPED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('µ', 'µ'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ķ'), + ('Ĺ', 'ƌ'), + ('Ǝ', 'Ʃ'), + ('Ƭ', 'ƹ'), + ('Ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('DŽ', 'Ƞ'), + ('Ȣ', 'ȳ'), + ('Ⱥ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͳ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϑ'), + ('ϕ', 'ϵ'), + ('Ϸ', 'ϻ'), + ('Ͻ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ա', 'և'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('Ḁ', 'ẛ'), + ('ẞ', 'ẞ'), + ('Ạ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ⅿ'), + ('Ↄ', 'ↄ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'Ɒ'), + ('Ⱳ', 'ⱳ'), + ('Ⱶ', 'ⱶ'), + ('Ȿ', 'ⳣ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('Ꙁ', 'ꙭ'), + ('Ꚁ', 'ꚛ'), + ('Ꜣ', 'ꜯ'), + ('Ꜳ', 'ꝯ'), + ('Ꝺ', 'ꞇ'), + ('Ꞌ', 'Ɥ'), + ('Ꞑ', 'ꞔ'), + ('Ꞗ', 'Ɪ'), + ('Ʞ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('Ꟗ', 'Ƛ'), + ('Ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('A', 'Z'), + ('a', 'z'), + ('𐐀', '𐑏'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐵐', '𐵥'), + ('𐵰', '𐶅'), + ('𑢠', '𑣟'), + ('𖹀', '𖹿'), + ('𞤀', '𞥃'), +]; + +pub const CHANGES_WHEN_LOWERCASED: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('Ᾰ', 'ᾼ'), + ('Ὲ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'ῼ'), + ('Ω', 'Ω'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𞤀', '𞤡'), +]; + +pub const CHANGES_WHEN_TITLECASED: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ķ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƌ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƹ'), + ('ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('DŽ', 'DŽ'), + ('dž', 'LJ'), + ('lj', 'NJ'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'DZ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȳ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϻ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ա', 'և'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẛ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱳ', 'ⱳ'), + ('ⱶ', 'ⱶ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳣ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜯ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞔ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𞤢', '𞥃'), +]; + +pub const CHANGES_WHEN_UPPERCASED: &'static [(char, char)] = &[ + ('a', 'z'), + ('µ', 'µ'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ķ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƌ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƹ'), + ('ƽ', 'ƽ'), + ('ƿ', 'ƿ'), + ('Dž', 'dž'), + ('Lj', 'lj'), + ('Nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('Dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȳ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ɔ'), + ('ɖ', 'ɗ'), + ('ə', 'ə'), + ('ɛ', 'ɜ'), + ('ɠ', 'ɡ'), + ('ɣ', 'ɦ'), + ('ɨ', 'ɬ'), + ('ɯ', 'ɯ'), + ('ɱ', 'ɲ'), + ('ɵ', 'ɵ'), + ('ɽ', 'ɽ'), + ('ʀ', 'ʀ'), + ('ʂ', 'ʃ'), + ('ʇ', 'ʌ'), + ('ʒ', 'ʒ'), + ('ʝ', 'ʞ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϻ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ա', 'և'), + ('ა', 'ჺ'), + ('ჽ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᵹ', 'ᵹ'), + ('ᵽ', 'ᵽ'), + ('ᶎ', 'ᶎ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẛ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ᾼ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῌ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ῼ', 'ῼ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱳ', 'ⱳ'), + ('ⱶ', 'ⱶ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳣ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚛ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜯ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝯ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞔ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟶ', 'ꟶ'), + ('ꭓ', 'ꭓ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𞤢', '𞥃'), +]; + +pub const DASH: &'static [(char, char)] = &[ + ('-', '-'), + ('֊', '֊'), + ('־', '־'), + ('᐀', '᐀'), + ('᠆', '᠆'), + ('‐', '―'), + ('⁓', '⁓'), + ('⁻', '⁻'), + ('₋', '₋'), + ('−', '−'), + ('⸗', '⸗'), + ('⸚', '⸚'), + ('⸺', '⸻'), + ('⹀', '⹀'), + ('⹝', '⹝'), + ('〜', '〜'), + ('〰', '〰'), + ('゠', '゠'), + ('︱', '︲'), + ('﹘', '﹘'), + ('﹣', '﹣'), + ('-', '-'), + ('𐵮', '𐵮'), + ('𐺭', '𐺭'), +]; + +pub const DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{34f}', '\u{34f}'), + ('\u{61c}', '\u{61c}'), + ('ᅟ', 'ᅠ'), + ('\u{17b4}', '\u{17b5}'), + ('\u{180b}', '\u{180f}'), + ('\u{200b}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{206f}'), + ('ㅤ', 'ㅤ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{feff}', '\u{feff}'), + ('ᅠ', 'ᅠ'), + ('\u{fff0}', '\u{fff8}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0000}', '\u{e0fff}'), +]; + +pub const DEPRECATED: &'static [(char, char)] = &[ + ('ʼn', 'ʼn'), + ('ٳ', 'ٳ'), + ('\u{f77}', '\u{f77}'), + ('\u{f79}', '\u{f79}'), + ('ឣ', 'ឤ'), + ('\u{206a}', '\u{206f}'), + ('〈', '〉'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const DIACRITIC: &'static [(char, char)] = &[ + ('^', '^'), + ('`', '`'), + ('¨', '¨'), + ('¯', '¯'), + ('´', '´'), + ('·', '¸'), + ('ʰ', '\u{34e}'), + ('\u{350}', '\u{357}'), + ('\u{35d}', '\u{362}'), + ('ʹ', '͵'), + ('ͺ', 'ͺ'), + ('΄', '΅'), + ('\u{483}', '\u{487}'), + ('ՙ', 'ՙ'), + ('\u{591}', '\u{5a1}'), + ('\u{5a3}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c4}'), + ('\u{64b}', '\u{652}'), + ('\u{657}', '\u{658}'), + ('\u{6df}', '\u{6e0}'), + ('ۥ', 'ۦ'), + ('\u{6ea}', '\u{6ec}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', 'ߵ'), + ('\u{818}', '\u{819}'), + ('\u{898}', '\u{89f}'), + ('ࣉ', '\u{8d2}'), + ('\u{8e3}', '\u{8fe}'), + ('\u{93c}', '\u{93c}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{954}'), + ('ॱ', 'ॱ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a4d}', '\u{a4d}'), + ('\u{abc}', '\u{abc}'), + ('\u{acd}', '\u{acd}'), + ('\u{afd}', '\u{aff}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b55}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c4d}', '\u{c4d}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{ccd}', '\u{ccd}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{dca}', '\u{dca}'), + ('\u{e3a}', '\u{e3a}'), + ('\u{e47}', '\u{e4c}'), + ('\u{e4e}', '\u{e4e}'), + ('\u{eba}', '\u{eba}'), + ('\u{ec8}', '\u{ecc}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f82}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{1037}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('ၣ', 'ၤ'), + ('ၩ', 'ၭ'), + ('ႇ', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', 'ႛ'), + ('\u{135d}', '\u{135f}'), + ('\u{1714}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a75}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1abe}'), + ('\u{1ac1}', '\u{1acb}'), + ('\u{1b34}', '\u{1b34}'), + ('\u{1b44}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1baa}', '\u{1bab}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{1c36}', '\u{1c37}'), + ('ᱸ', 'ᱽ'), + ('\u{1cd0}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('ᴬ', 'ᵪ'), + ('\u{1dc4}', '\u{1dcf}'), + ('\u{1df5}', '\u{1dff}'), + ('᾽', '᾽'), + ('᾿', '῁'), + ('῍', '῏'), + ('῝', '῟'), + ('῭', '`'), + ('´', '῾'), + ('\u{2cef}', '\u{2cf1}'), + ('ⸯ', 'ⸯ'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '゜'), + ('ー', 'ー'), + ('\u{a66f}', '\u{a66f}'), + ('\u{a67c}', '\u{a67d}'), + ('ꙿ', 'ꙿ'), + ('ꚜ', 'ꚝ'), + ('\u{a6f0}', '\u{a6f1}'), + ('꜀', '꜡'), + ('ꞈ', '꞊'), + ('ꟸ', 'ꟹ'), + ('\u{a806}', '\u{a806}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c4}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a92b}', '꤮'), + ('\u{a953}', '\u{a953}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('ꩻ', 'ꩽ'), + ('\u{aabf}', 'ꫂ'), + ('\u{aaf6}', '\u{aaf6}'), + ('꭛', 'ꭟ'), + ('ꭩ', '꭫'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe20}', '\u{fe2f}'), + ('^', '^'), + ('`', '`'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + (' ̄', ' ̄'), + ('\u{102e0}', '\u{102e0}'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('𐴢', '\u{10d27}'), + ('𐵎', '𐵎'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10efd}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11046}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{11133}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111ca}', '\u{111cc}'), + ('\u{11235}', '\u{11236}'), + ('\u{112e9}', '\u{112ea}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11442}', '\u{11442}'), + ('\u{11446}', '\u{11446}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{1163f}', '\u{1163f}'), + ('\u{116b6}', '\u{116b7}'), + ('\u{1172b}', '\u{1172b}'), + ('\u{11839}', '\u{1183a}'), + ('\u{1193d}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a34}', '\u{11a34}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a99}', '\u{11a99}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11d42}', '\u{11d42}'), + ('\u{11d44}', '\u{11d45}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11f41}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13447}', '\u{13455}'), + ('\u{1612f}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('𖵫', '𖵬'), + ('\u{16f8f}', '𖾟'), + ('\u{16ff0}', '\u{16ff1}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('𞀰', '𞁭'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e946}'), + ('\u{1e948}', '\u{1e94a}'), +]; + +pub const EMOJI: &'static [(char, char)] = &[ + ('#', '#'), + ('*', '*'), + ('0', '9'), + ('©', '©'), + ('®', '®'), + ('‼', '‼'), + ('⁉', '⁉'), + ('™', '™'), + ('ℹ', 'ℹ'), + ('↔', '↙'), + ('↩', '↪'), + ('⌚', '⌛'), + ('⌨', '⌨'), + ('⏏', '⏏'), + ('⏩', '⏳'), + ('⏸', '⏺'), + ('Ⓜ', 'Ⓜ'), + ('▪', '▫'), + ('▶', '▶'), + ('◀', '◀'), + ('◻', '◾'), + ('☀', '☄'), + ('☎', '☎'), + ('☑', '☑'), + ('☔', '☕'), + ('☘', '☘'), + ('☝', '☝'), + ('☠', '☠'), + ('☢', '☣'), + ('☦', '☦'), + ('☪', '☪'), + ('☮', '☯'), + ('☸', '☺'), + ('♀', '♀'), + ('♂', '♂'), + ('♈', '♓'), + ('♟', '♠'), + ('♣', '♣'), + ('♥', '♦'), + ('♨', '♨'), + ('♻', '♻'), + ('♾', '♿'), + ('⚒', '⚗'), + ('⚙', '⚙'), + ('⚛', '⚜'), + ('⚠', '⚡'), + ('⚧', '⚧'), + ('⚪', '⚫'), + ('⚰', '⚱'), + ('⚽', '⚾'), + ('⛄', '⛅'), + ('⛈', '⛈'), + ('⛎', '⛏'), + ('⛑', '⛑'), + ('⛓', '⛔'), + ('⛩', '⛪'), + ('⛰', '⛵'), + ('⛷', '⛺'), + ('⛽', '⛽'), + ('✂', '✂'), + ('✅', '✅'), + ('✈', '✍'), + ('✏', '✏'), + ('✒', '✒'), + ('✔', '✔'), + ('✖', '✖'), + ('✝', '✝'), + ('✡', '✡'), + ('✨', '✨'), + ('✳', '✴'), + ('❄', '❄'), + ('❇', '❇'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('❣', '❤'), + ('➕', '➗'), + ('➡', '➡'), + ('➰', '➰'), + ('➿', '➿'), + ('⤴', '⤵'), + ('⬅', '⬇'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('〰', '〰'), + ('〽', '〽'), + ('㊗', '㊗'), + ('㊙', '㊙'), + ('🀄', '🀄'), + ('🃏', '🃏'), + ('🅰', '🅱'), + ('🅾', '🅿'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌡'), + ('🌤', '🎓'), + ('🎖', '🎗'), + ('🎙', '🎛'), + ('🎞', '🏰'), + ('🏳', '🏵'), + ('🏷', '📽'), + ('📿', '🔽'), + ('🕉', '🕎'), + ('🕐', '🕧'), + ('🕯', '🕰'), + ('🕳', '🕺'), + ('🖇', '🖇'), + ('🖊', '🖍'), + ('🖐', '🖐'), + ('🖕', '🖖'), + ('🖤', '🖥'), + ('🖨', '🖨'), + ('🖱', '🖲'), + ('🖼', '🖼'), + ('🗂', '🗄'), + ('🗑', '🗓'), + ('🗜', '🗞'), + ('🗡', '🗡'), + ('🗣', '🗣'), + ('🗨', '🗨'), + ('🗯', '🗯'), + ('🗳', '🗳'), + ('🗺', '🙏'), + ('🚀', '🛅'), + ('🛋', '🛒'), + ('🛕', '🛗'), + ('🛜', '🛥'), + ('🛩', '🛩'), + ('🛫', '🛬'), + ('🛰', '🛰'), + ('🛳', '🛼'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '🧿'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), +]; + +pub const EMOJI_COMPONENT: &'static [(char, char)] = &[ + ('#', '#'), + ('*', '*'), + ('0', '9'), + ('\u{200d}', '\u{200d}'), + ('\u{20e3}', '\u{20e3}'), + ('\u{fe0f}', '\u{fe0f}'), + ('🇦', '🇿'), + ('🏻', '🏿'), + ('🦰', '🦳'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const EMOJI_MODIFIER: &'static [(char, char)] = &[('🏻', '🏿')]; + +pub const EMOJI_MODIFIER_BASE: &'static [(char, char)] = &[ + ('☝', '☝'), + ('⛹', '⛹'), + ('✊', '✍'), + ('🎅', '🎅'), + ('🏂', '🏄'), + ('🏇', '🏇'), + ('🏊', '🏌'), + ('👂', '👃'), + ('👆', '👐'), + ('👦', '👸'), + ('👼', '👼'), + ('💁', '💃'), + ('💅', '💇'), + ('💏', '💏'), + ('💑', '💑'), + ('💪', '💪'), + ('🕴', '🕵'), + ('🕺', '🕺'), + ('🖐', '🖐'), + ('🖕', '🖖'), + ('🙅', '🙇'), + ('🙋', '🙏'), + ('🚣', '🚣'), + ('🚴', '🚶'), + ('🛀', '🛀'), + ('🛌', '🛌'), + ('🤌', '🤌'), + ('🤏', '🤏'), + ('🤘', '🤟'), + ('🤦', '🤦'), + ('🤰', '🤹'), + ('🤼', '🤾'), + ('🥷', '🥷'), + ('🦵', '🦶'), + ('🦸', '🦹'), + ('🦻', '🦻'), + ('🧍', '🧏'), + ('🧑', '🧝'), + ('🫃', '🫅'), + ('🫰', '🫸'), +]; + +pub const EMOJI_PRESENTATION: &'static [(char, char)] = &[ + ('⌚', '⌛'), + ('⏩', '⏬'), + ('⏰', '⏰'), + ('⏳', '⏳'), + ('◽', '◾'), + ('☔', '☕'), + ('♈', '♓'), + ('♿', '♿'), + ('⚓', '⚓'), + ('⚡', '⚡'), + ('⚪', '⚫'), + ('⚽', '⚾'), + ('⛄', '⛅'), + ('⛎', '⛎'), + ('⛔', '⛔'), + ('⛪', '⛪'), + ('⛲', '⛳'), + ('⛵', '⛵'), + ('⛺', '⛺'), + ('⛽', '⛽'), + ('✅', '✅'), + ('✊', '✋'), + ('✨', '✨'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('➕', '➗'), + ('➰', '➰'), + ('➿', '➿'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('🀄', '🀄'), + ('🃏', '🃏'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🇦', '🇿'), + ('🈁', '🈁'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈶'), + ('🈸', '🈺'), + ('🉐', '🉑'), + ('🌀', '🌠'), + ('🌭', '🌵'), + ('🌷', '🍼'), + ('🍾', '🎓'), + ('🎠', '🏊'), + ('🏏', '🏓'), + ('🏠', '🏰'), + ('🏴', '🏴'), + ('🏸', '🐾'), + ('👀', '👀'), + ('👂', '📼'), + ('📿', '🔽'), + ('🕋', '🕎'), + ('🕐', '🕧'), + ('🕺', '🕺'), + ('🖕', '🖖'), + ('🖤', '🖤'), + ('🗻', '🙏'), + ('🚀', '🛅'), + ('🛌', '🛌'), + ('🛐', '🛒'), + ('🛕', '🛗'), + ('🛜', '🛟'), + ('🛫', '🛬'), + ('🛴', '🛼'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '🧿'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), +]; + +pub const EXTENDED_PICTOGRAPHIC: &'static [(char, char)] = &[ + ('©', '©'), + ('®', '®'), + ('‼', '‼'), + ('⁉', '⁉'), + ('™', '™'), + ('ℹ', 'ℹ'), + ('↔', '↙'), + ('↩', '↪'), + ('⌚', '⌛'), + ('⌨', '⌨'), + ('⎈', '⎈'), + ('⏏', '⏏'), + ('⏩', '⏳'), + ('⏸', '⏺'), + ('Ⓜ', 'Ⓜ'), + ('▪', '▫'), + ('▶', '▶'), + ('◀', '◀'), + ('◻', '◾'), + ('☀', '★'), + ('☇', '☒'), + ('☔', '⚅'), + ('⚐', '✅'), + ('✈', '✒'), + ('✔', '✔'), + ('✖', '✖'), + ('✝', '✝'), + ('✡', '✡'), + ('✨', '✨'), + ('✳', '✴'), + ('❄', '❄'), + ('❇', '❇'), + ('❌', '❌'), + ('❎', '❎'), + ('❓', '❕'), + ('❗', '❗'), + ('❣', '❧'), + ('➕', '➗'), + ('➡', '➡'), + ('➰', '➰'), + ('➿', '➿'), + ('⤴', '⤵'), + ('⬅', '⬇'), + ('⬛', '⬜'), + ('⭐', '⭐'), + ('⭕', '⭕'), + ('〰', '〰'), + ('〽', '〽'), + ('㊗', '㊗'), + ('㊙', '㊙'), + ('🀀', '\u{1f0ff}'), + ('🄍', '🄏'), + ('🄯', '🄯'), + ('🅬', '🅱'), + ('🅾', '🅿'), + ('🆎', '🆎'), + ('🆑', '🆚'), + ('🆭', '\u{1f1e5}'), + ('🈁', '\u{1f20f}'), + ('🈚', '🈚'), + ('🈯', '🈯'), + ('🈲', '🈺'), + ('\u{1f23c}', '\u{1f23f}'), + ('\u{1f249}', '🏺'), + ('🐀', '🔽'), + ('🕆', '🙏'), + ('🚀', '\u{1f6ff}'), + ('🝴', '🝿'), + ('🟕', '\u{1f7ff}'), + ('\u{1f80c}', '\u{1f80f}'), + ('\u{1f848}', '\u{1f84f}'), + ('\u{1f85a}', '\u{1f85f}'), + ('\u{1f888}', '\u{1f88f}'), + ('\u{1f8ae}', '\u{1f8ff}'), + ('🤌', '🤺'), + ('🤼', '🥅'), + ('🥇', '\u{1faff}'), + ('\u{1fc00}', '\u{1fffd}'), +]; + +pub const EXTENDER: &'static [(char, char)] = &[ + ('·', '·'), + ('ː', 'ˑ'), + ('ـ', 'ـ'), + ('ߺ', 'ߺ'), + ('\u{a71}', '\u{a71}'), + ('\u{afb}', '\u{afb}'), + ('\u{b55}', '\u{b55}'), + ('ๆ', 'ๆ'), + ('ໆ', 'ໆ'), + ('᠊', '᠊'), + ('ᡃ', 'ᡃ'), + ('ᪧ', 'ᪧ'), + ('\u{1c36}', '\u{1c36}'), + ('ᱻ', 'ᱻ'), + ('々', '々'), + ('〱', '〵'), + ('ゝ', 'ゞ'), + ('ー', 'ヾ'), + ('ꀕ', 'ꀕ'), + ('ꘌ', 'ꘌ'), + ('ꧏ', 'ꧏ'), + ('ꧦ', 'ꧦ'), + ('ꩰ', 'ꩰ'), + ('ꫝ', 'ꫝ'), + ('ꫳ', 'ꫴ'), + ('ー', 'ー'), + ('𐞁', '𐞂'), + ('𐵎', '𐵎'), + ('\u{10d6a}', '\u{10d6a}'), + ('𐵯', '𐵯'), + ('\u{11237}', '\u{11237}'), + ('𑍝', '𑍝'), + ('\u{113d2}', '𑏓'), + ('𑗆', '𑗈'), + ('\u{11a98}', '\u{11a98}'), + ('𖭂', '𖭃'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𞄼', '𞄽'), + ('\u{1e5ef}', '\u{1e5ef}'), + ('\u{1e944}', '\u{1e946}'), +]; + +pub const GRAPHEME_BASE: &'static [(char, char)] = &[ + (' ', '~'), + ('\u{a0}', '¬'), + ('®', '˿'), + ('Ͱ', 'ͷ'), + ('ͺ', 'Ϳ'), + ('΄', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', '҂'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', '֊'), + ('֍', '֏'), + ('־', '־'), + ('׀', '׀'), + ('׃', '׃'), + ('׆', '׆'), + ('א', 'ת'), + ('ׯ', '״'), + ('؆', '؏'), + ('؛', '؛'), + ('؝', 'ي'), + ('٠', 'ٯ'), + ('ٱ', 'ە'), + ('۞', '۞'), + ('ۥ', 'ۦ'), + ('۩', '۩'), + ('ۮ', '܍'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('߀', 'ߪ'), + ('ߴ', 'ߺ'), + ('߾', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('࠰', '࠾'), + ('ࡀ', 'ࡘ'), + ('࡞', '࡞'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ः', 'ह'), + ('ऻ', 'ऻ'), + ('ऽ', 'ी'), + ('ॉ', 'ौ'), + ('ॎ', 'ॐ'), + ('क़', 'ॡ'), + ('।', 'ঀ'), + ('ং', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ি', 'ী'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('০', '৽'), + ('ਃ', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਾ', 'ੀ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੯'), + ('ੲ', 'ੴ'), + ('੶', '੶'), + ('ઃ', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ી'), + ('ૉ', 'ૉ'), + ('ો', 'ૌ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('૦', '૱'), + ('ૹ', 'ૹ'), + ('ଂ', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ୀ', 'ୀ'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('୦', '୷'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ி', 'ி'), + ('ு', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('ௐ', 'ௐ'), + ('௦', '௺'), + ('ఁ', 'ః'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ు', 'ౄ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('౦', '౯'), + ('౷', 'ಀ'), + ('ಂ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಾ'), + ('ು', 'ು'), + ('ೃ', 'ೄ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('ം', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ി', 'ീ'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('ൎ', '൏'), + ('ൔ', 'ൖ'), + ('൘', 'ൡ'), + ('൦', 'ൿ'), + ('ං', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ැ', 'ෑ'), + ('ෘ', 'ෞ'), + ('෦', '෯'), + ('ෲ', '෴'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('฿', 'ๆ'), + ('๏', '๛'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', '༗'), + ('༚', '༴'), + ('༶', '༶'), + ('༸', '༸'), + ('༺', 'ཇ'), + ('ཉ', 'ཬ'), + ('ཿ', 'ཿ'), + ('྅', '྅'), + ('ྈ', 'ྌ'), + ('྾', '࿅'), + ('࿇', '࿌'), + ('࿎', '࿚'), + ('က', 'ာ'), + ('ေ', 'ေ'), + ('း', 'း'), + ('ျ', 'ြ'), + ('ဿ', 'ၗ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႃ', 'ႄ'), + ('ႇ', 'ႌ'), + ('ႎ', 'ႜ'), + ('႞', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('፠', '፼'), + ('ᎀ', '᎙'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('᐀', '᚜'), + ('ᚠ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('᜵', '᜶'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ា', 'ា'), + ('ើ', 'ៅ'), + ('ះ', 'ៈ'), + ('។', 'ៜ'), + ('០', '៩'), + ('៰', '៹'), + ('᠀', '᠊'), + ('᠐', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᤣ', 'ᤦ'), + ('ᤩ', 'ᤫ'), + ('ᤰ', 'ᤱ'), + ('ᤳ', 'ᤸ'), + ('᥀', '᥀'), + ('᥄', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('᧞', 'ᨖ'), + ('ᨙ', 'ᨚ'), + ('᨞', 'ᩕ'), + ('ᩗ', 'ᩗ'), + ('ᩡ', 'ᩡ'), + ('ᩣ', 'ᩤ'), + ('ᩭ', 'ᩲ'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), + ('ᬄ', 'ᬳ'), + ('ᬾ', 'ᭁ'), + ('ᭅ', 'ᭌ'), + ('᭎', '᭪'), + ('᭴', '᭿'), + ('ᮂ', 'ᮡ'), + ('ᮦ', 'ᮧ'), + ('ᮮ', 'ᯥ'), + ('ᯧ', 'ᯧ'), + ('ᯪ', 'ᯬ'), + ('ᯮ', 'ᯮ'), + ('᯼', 'ᰫ'), + ('ᰴ', 'ᰵ'), + ('᰻', '᱉'), + ('ᱍ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', '᳇'), + ('᳓', '᳓'), + ('᳡', '᳡'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', '᳷'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('\u{2000}', '\u{200a}'), + ('‐', '‧'), + ('\u{202f}', '\u{205f}'), + ('⁰', 'ⁱ'), + ('⁴', '₎'), + ('ₐ', 'ₜ'), + ('₠', '⃀'), + ('℀', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⭳'), + ('⭶', '⮕'), + ('⮗', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('⳹', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', '⵰'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('⸀', '⹝'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '〩'), + ('〰', '〿'), + ('ぁ', 'ゖ'), + ('゛', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('㆐', '㇥'), + ('㇯', '㈞'), + ('㈠', 'ꒌ'), + ('꒐', '꓆'), + ('ꓐ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('꙳', '꙳'), + ('꙾', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('꛲', '꛷'), + ('꜀', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠤ'), + ('ꠧ', '꠫'), + ('꠰', '꠹'), + ('ꡀ', '꡷'), + ('ꢀ', 'ꣃ'), + ('꣎', '꣙'), + ('ꣲ', 'ꣾ'), + ('꤀', 'ꤥ'), + ('꤮', 'ꥆ'), + ('ꥒ', 'ꥒ'), + ('꥟', 'ꥼ'), + ('ꦃ', 'ꦲ'), + ('ꦴ', 'ꦵ'), + ('ꦺ', 'ꦻ'), + ('ꦾ', 'ꦿ'), + ('꧁', '꧍'), + ('ꧏ', '꧙'), + ('꧞', 'ꧤ'), + ('ꧦ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꨯ', 'ꨰ'), + ('ꨳ', 'ꨴ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩍ', 'ꩍ'), + ('꩐', '꩙'), + ('꩜', 'ꩻ'), + ('ꩽ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫫ'), + ('ꫮ', 'ꫵ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', '꭫'), + ('ꭰ', 'ꯤ'), + ('ꯦ', 'ꯧ'), + ('ꯩ', '꯬'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', '﯂'), + ('ﯓ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('!', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('¢', '₩'), + ('│', '○'), + ('', '�'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐆎'), + ('𐆐', '𐆜'), + ('𐆠', '𐆠'), + ('𐇐', '𐇼'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐋡', '𐋻'), + ('𐌀', '𐌣'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎟', '𐏃'), + ('𐏈', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕯', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡗', '𐢞'), + ('𐢧', '𐢯'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐣻', '𐤛'), + ('𐤟', '𐤹'), + ('𐤿', '𐤿'), + ('𐦀', '𐦷'), + ('𐦼', '𐧏'), + ('𐧒', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩀', '𐩈'), + ('𐩐', '𐩘'), + ('𐩠', '𐪟'), + ('𐫀', '𐫤'), + ('𐫫', '𐫶'), + ('𐬀', '𐬵'), + ('𐬹', '𐭕'), + ('𐭘', '𐭲'), + ('𐭸', '𐮑'), + ('𐮙', '𐮜'), + ('𐮩', '𐮯'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐴣'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('𐵮', '𐶅'), + ('𐶎', '𐶏'), + ('𐹠', '𐹾'), + ('𐺀', '𐺩'), + ('𐺭', '𐺭'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼧'), + ('𐼰', '𐽅'), + ('𐽑', '𐽙'), + ('𐽰', '𐾁'), + ('𐾆', '𐾉'), + ('𐾰', '𐿋'), + ('𐿠', '𐿶'), + ('𑀀', '𑀀'), + ('𑀂', '𑀷'), + ('𑁇', '𑁍'), + ('𑁒', '𑁯'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂂', '𑂲'), + ('𑂷', '𑂸'), + ('𑂻', '𑂼'), + ('𑂾', '𑃁'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('𑄃', '𑄦'), + ('𑄬', '𑄬'), + ('𑄶', '𑅇'), + ('𑅐', '𑅲'), + ('𑅴', '𑅶'), + ('𑆂', '𑆵'), + ('𑆿', '𑆿'), + ('𑇁', '𑇈'), + ('𑇍', '𑇎'), + ('𑇐', '𑇟'), + ('𑇡', '𑇴'), + ('𑈀', '𑈑'), + ('𑈓', '𑈮'), + ('𑈲', '𑈳'), + ('𑈸', '𑈽'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊩'), + ('𑊰', '𑋞'), + ('𑋠', '𑋢'), + ('𑋰', '𑋹'), + ('𑌂', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑌿', '𑌿'), + ('𑍁', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('𑍐', '𑍐'), + ('𑍝', '𑍣'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑎹', '𑎺'), + ('𑏊', '𑏊'), + ('𑏌', '𑏍'), + ('𑏑', '𑏑'), + ('𑏓', '𑏕'), + ('𑏗', '𑏘'), + ('𑐀', '𑐷'), + ('𑑀', '𑑁'), + ('𑑅', '𑑅'), + ('𑑇', '𑑛'), + ('𑑝', '𑑝'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑒱', '𑒲'), + ('𑒹', '𑒹'), + ('𑒻', '𑒼'), + ('𑒾', '𑒾'), + ('𑓁', '𑓁'), + ('𑓄', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '𑖮'), + ('𑖰', '𑖱'), + ('𑖸', '𑖻'), + ('𑖾', '𑖾'), + ('𑗁', '𑗛'), + ('𑘀', '𑘲'), + ('𑘻', '𑘼'), + ('𑘾', '𑘾'), + ('𑙁', '𑙄'), + ('𑙐', '𑙙'), + ('𑙠', '𑙬'), + ('𑚀', '𑚪'), + ('𑚬', '𑚬'), + ('𑚮', '𑚯'), + ('𑚸', '𑚹'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('𑜞', '𑜞'), + ('𑜠', '𑜡'), + ('𑜦', '𑜦'), + ('𑜰', '𑝆'), + ('𑠀', '𑠮'), + ('𑠸', '𑠸'), + ('𑠻', '𑠻'), + ('𑢠', '𑣲'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤱', '𑤵'), + ('𑤷', '𑤸'), + ('𑤿', '𑥂'), + ('𑥄', '𑥆'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '𑧓'), + ('𑧜', '𑧟'), + ('𑧡', '𑧤'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨹', '𑨺'), + ('𑨿', '𑩆'), + ('𑩐', '𑩐'), + ('𑩗', '𑩘'), + ('𑩜', '𑪉'), + ('𑪗', '𑪗'), + ('𑪚', '𑪢'), + ('𑪰', '𑫸'), + ('𑬀', '𑬉'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '𑰯'), + ('𑰾', '𑰾'), + ('𑱀', '𑱅'), + ('𑱐', '𑱬'), + ('𑱰', '𑲏'), + ('𑲩', '𑲩'), + ('𑲱', '𑲱'), + ('𑲴', '𑲴'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('𑶓', '𑶔'), + ('𑶖', '𑶖'), + ('𑶘', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻲'), + ('𑻵', '𑻸'), + ('𑼂', '𑼐'), + ('𑼒', '𑼵'), + ('𑼾', '𑼿'), + ('𑽃', '𑽙'), + ('𑾰', '𑾰'), + ('𑿀', '𑿱'), + ('𑿿', '𒎙'), + ('𒐀', '𒑮'), + ('𒑰', '𒑴'), + ('𒒀', '𒕃'), + ('𒾐', '𒿲'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖄪', '𖄬'), + ('𖄰', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩮', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('𖫵', '𖫵'), + ('𖬀', '𖬯'), + ('𖬷', '𖭅'), + ('𖭐', '𖭙'), + ('𖭛', '𖭡'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵹'), + ('𖹀', '𖺚'), + ('𖼀', '𖽊'), + ('𖽐', '𖾇'), + ('𖾓', '𖾟'), + ('𖿠', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '𛲜'), + ('𛲟', '𛲟'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '𝅘𝅥𝅲'), + ('𝅪', '𝅬'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝈀', '𝉁'), + ('𝉅', '𝉅'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍠', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝧿'), + ('𝨷', '𝨺'), + ('𝩭', '𝩴'), + ('𝩶', '𝪃'), + ('𝪅', '𝪋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅏'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞋰', '𞋹'), + ('𞋿', '𞋿'), + ('𞓐', '𞓫'), + ('𞓰', '𞓹'), + ('𞗐', '𞗭'), + ('𞗰', '𞗺'), + ('𞗿', '𞗿'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞣇', '𞣏'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const GRAPHEME_EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('\u{93a}', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', '\u{981}'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const GRAPHEME_LINK: &'static [(char, char)] = &[ + ('\u{94d}', '\u{94d}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{a4d}', '\u{a4d}'), + ('\u{acd}', '\u{acd}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{c4d}', '\u{c4d}'), + ('\u{ccd}', '\u{ccd}'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{dca}', '\u{dca}'), + ('\u{e3a}', '\u{e3a}'), + ('\u{eba}', '\u{eba}'), + ('\u{f84}', '\u{f84}'), + ('\u{1039}', '\u{103a}'), + ('\u{1714}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{17d2}', '\u{17d2}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1b44}', '\u{1b44}'), + ('\u{1baa}', '\u{1bab}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{a806}', '\u{a806}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c4}'), + ('\u{a953}', '\u{a953}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abed}', '\u{abed}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{11046}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{1107f}', '\u{1107f}'), + ('\u{110b9}', '\u{110b9}'), + ('\u{11133}', '\u{11134}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{11235}', '\u{11235}'), + ('\u{112ea}', '\u{112ea}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{11442}', '\u{11442}'), + ('\u{114c2}', '\u{114c2}'), + ('\u{115bf}', '\u{115bf}'), + ('\u{1163f}', '\u{1163f}'), + ('\u{116b6}', '\u{116b6}'), + ('\u{1172b}', '\u{1172b}'), + ('\u{11839}', '\u{11839}'), + ('\u{1193d}', '\u{1193e}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a34}', '\u{11a34}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a99}', '\u{11a99}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11d44}', '\u{11d45}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11f41}', '\u{11f42}'), + ('\u{1612f}', '\u{1612f}'), +]; + +pub const HEX_DIGIT: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'F'), + ('a', 'f'), + ('0', '9'), + ('A', 'F'), + ('a', 'f'), +]; + +pub const HYPHEN: &'static [(char, char)] = &[ + ('-', '-'), + ('\u{ad}', '\u{ad}'), + ('֊', '֊'), + ('᠆', '᠆'), + ('‐', '‑'), + ('⸗', '⸗'), + ('・', '・'), + ('﹣', '﹣'), + ('-', '-'), + ('・', '・'), +]; + +pub const IDS_BINARY_OPERATOR: &'static [(char, char)] = + &[('⿰', '⿱'), ('⿴', '⿽'), ('㇯', '㇯')]; + +pub const IDS_TRINARY_OPERATOR: &'static [(char, char)] = &[('⿲', '⿳')]; + +pub const IDS_UNARY_OPERATOR: &'static [(char, char)] = &[('⿾', '⿿')]; + +pub const ID_COMPAT_MATH_CONTINUE: &'static [(char, char)] = &[ + ('²', '³'), + ('¹', '¹'), + ('⁰', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('∂', '∂'), + ('∇', '∇'), + ('∞', '∞'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), +]; + +pub const ID_COMPAT_MATH_START: &'static [(char, char)] = &[ + ('∂', '∂'), + ('∇', '∇'), + ('∞', '∞'), + ('𝛁', '𝛁'), + ('𝛛', '𝛛'), + ('𝛻', '𝛻'), + ('𝜕', '𝜕'), + ('𝜵', '𝜵'), + ('𝝏', '𝝏'), + ('𝝯', '𝝯'), + ('𝞉', '𝞉'), + ('𝞩', '𝞩'), + ('𝟃', '𝟃'), +]; + +pub const ID_CONTINUE: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', '\u{487}'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('፩', '፱'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', 'ゟ'), + ('ァ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('・', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const ID_START: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('゛', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const IDEOGRAPHIC: &'static [(char, char)] = &[ + ('〆', '〇'), + ('〡', '〩'), + ('〸', '〺'), + ('㐀', '䶿'), + ('一', '鿿'), + ('豈', '舘'), + ('並', '龎'), + ('\u{16fe4}', '\u{16fe4}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𛅰', '𛋻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const INCB: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', '\u{902}'), + ('क', '\u{93a}'), + ('\u{93c}', '\u{93c}'), + ('\u{941}', '\u{948}'), + ('\u{94d}', '\u{94d}'), + ('\u{951}', 'य़'), + ('\u{962}', '\u{963}'), + ('ॸ', 'ॿ'), + ('\u{981}', '\u{981}'), + ('ক', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9be}'), + ('\u{9c1}', '\u{9c4}'), + ('\u{9cd}', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', 'য়'), + ('\u{9e2}', '\u{9e3}'), + ('ৰ', 'ৱ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', '\u{a02}'), + ('\u{a3c}', '\u{a3c}'), + ('\u{a41}', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', '\u{a82}'), + ('ક', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{abc}'), + ('\u{ac1}', '\u{ac5}'), + ('\u{ac7}', '\u{ac8}'), + ('\u{acd}', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('ૹ', '\u{aff}'), + ('\u{b01}', '\u{b01}'), + ('କ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b3f}'), + ('\u{b41}', '\u{b44}'), + ('\u{b4d}', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୟ'), + ('\u{b62}', '\u{b63}'), + ('ୱ', 'ୱ'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bc0}', '\u{bc0}'), + ('\u{bcd}', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c00}'), + ('\u{c04}', '\u{c04}'), + ('క', 'న'), + ('ప', 'హ'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', '\u{c40}'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', '\u{c81}'), + ('\u{cbc}', '\u{cbc}'), + ('\u{cbf}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('\u{d00}', '\u{d01}'), + ('ക', '\u{d3c}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d41}', '\u{d44}'), + ('\u{d4d}', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', '\u{d81}'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{dd2}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('\u{f71}', '\u{f7e}'), + ('\u{f80}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('\u{102d}', '\u{1030}'), + ('\u{1032}', '\u{1037}'), + ('\u{1039}', '\u{103a}'), + ('\u{103d}', '\u{103e}'), + ('\u{1058}', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{1082}'), + ('\u{1085}', '\u{1086}'), + ('\u{108d}', '\u{108d}'), + ('\u{109d}', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17b5}'), + ('\u{17b7}', '\u{17bd}'), + ('\u{17c6}', '\u{17c6}'), + ('\u{17c9}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', '\u{1922}'), + ('\u{1927}', '\u{1928}'), + ('\u{1932}', '\u{1932}'), + ('\u{1939}', '\u{193b}'), + ('\u{1a17}', '\u{1a18}'), + ('\u{1a1b}', '\u{1a1b}'), + ('\u{1a56}', '\u{1a56}'), + ('\u{1a58}', '\u{1a5e}'), + ('\u{1a60}', '\u{1a60}'), + ('\u{1a62}', '\u{1a62}'), + ('\u{1a65}', '\u{1a6c}'), + ('\u{1a73}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', '\u{1b03}'), + ('\u{1b34}', '\u{1b3d}'), + ('\u{1b42}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1b81}'), + ('\u{1ba2}', '\u{1ba5}'), + ('\u{1ba8}', '\u{1bad}'), + ('\u{1be6}', '\u{1be6}'), + ('\u{1be8}', '\u{1be9}'), + ('\u{1bed}', '\u{1bed}'), + ('\u{1bef}', '\u{1bf3}'), + ('\u{1c2c}', '\u{1c33}'), + ('\u{1c36}', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200d}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('\u{a825}', '\u{a826}'), + ('\u{a82c}', '\u{a82c}'), + ('\u{a8c4}', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a951}'), + ('\u{a953}', '\u{a953}'), + ('\u{a980}', '\u{a982}'), + ('\u{a9b3}', '\u{a9b3}'), + ('\u{a9b6}', '\u{a9b9}'), + ('\u{a9bc}', '\u{a9bd}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa2e}'), + ('\u{aa31}', '\u{aa32}'), + ('\u{aa35}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', '\u{aa4c}'), + ('\u{aa7c}', '\u{aa7c}'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('\u{aaec}', '\u{aaed}'), + ('\u{aaf6}', '\u{aaf6}'), + ('\u{abe5}', '\u{abe5}'), + ('\u{abe8}', '\u{abe8}'), + ('\u{abed}', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('\u{11001}', '\u{11001}'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '\u{11081}'), + ('\u{110b3}', '\u{110b6}'), + ('\u{110b9}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{1112b}'), + ('\u{1112d}', '\u{11134}'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '\u{11181}'), + ('\u{111b6}', '\u{111be}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('\u{111cf}', '\u{111cf}'), + ('\u{1122f}', '\u{11231}'), + ('\u{11234}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112df}'), + ('\u{112e3}', '\u{112ea}'), + ('\u{11300}', '\u{11301}'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{11340}', '\u{11340}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113bb}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113ce}', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('\u{11438}', '\u{1143f}'), + ('\u{11442}', '\u{11444}'), + ('\u{11446}', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114b3}', '\u{114b8}'), + ('\u{114ba}', '\u{114ba}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{114bf}', '\u{114c0}'), + ('\u{114c2}', '\u{114c3}'), + ('\u{115af}', '\u{115af}'), + ('\u{115b2}', '\u{115b5}'), + ('\u{115bc}', '\u{115bd}'), + ('\u{115bf}', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('\u{11633}', '\u{1163a}'), + ('\u{1163d}', '\u{1163d}'), + ('\u{1163f}', '\u{11640}'), + ('\u{116ab}', '\u{116ab}'), + ('\u{116ad}', '\u{116ad}'), + ('\u{116b0}', '\u{116b7}'), + ('\u{1171d}', '\u{1171d}'), + ('\u{1171f}', '\u{1171f}'), + ('\u{11722}', '\u{11725}'), + ('\u{11727}', '\u{1172b}'), + ('\u{1182f}', '\u{11837}'), + ('\u{11839}', '\u{1183a}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193b}', '\u{1193e}'), + ('\u{11943}', '\u{11943}'), + ('\u{119d4}', '\u{119d7}'), + ('\u{119da}', '\u{119db}'), + ('\u{119e0}', '\u{119e0}'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '\u{11a38}'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a56}'), + ('\u{11a59}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a96}'), + ('\u{11a98}', '\u{11a99}'), + ('\u{11c30}', '\u{11c36}'), + ('\u{11c38}', '\u{11c3d}'), + ('\u{11c3f}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('\u{11caa}', '\u{11cb0}'), + ('\u{11cb2}', '\u{11cb3}'), + ('\u{11cb5}', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('\u{11d90}', '\u{11d91}'), + ('\u{11d95}', '\u{11d95}'), + ('\u{11d97}', '\u{11d97}'), + ('\u{11ef3}', '\u{11ef4}'), + ('\u{11f00}', '\u{11f01}'), + ('\u{11f36}', '\u{11f3a}'), + ('\u{11f40}', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{16129}'), + ('\u{1612d}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const JOIN_CONTROL: &'static [(char, char)] = &[('\u{200c}', '\u{200d}')]; + +pub const LOGICAL_ORDER_EXCEPTION: &'static [(char, char)] = &[ + ('เ', 'ไ'), + ('ເ', 'ໄ'), + ('ᦵ', 'ᦷ'), + ('ᦺ', 'ᦺ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪹ'), + ('ꪻ', 'ꪼ'), +]; + +pub const LOWERCASE: &'static [(char, char)] = &[ + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᶿ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱽ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚝ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟲ', 'ꟴ'), + ('ꟶ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤢', '𞥃'), +]; + +pub const MATH: &'static [(char, char)] = &[ + ('+', '+'), + ('<', '>'), + ('^', '^'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('±', '±'), + ('×', '×'), + ('÷', '÷'), + ('ϐ', 'ϒ'), + ('ϕ', 'ϕ'), + ('ϰ', 'ϱ'), + ('ϴ', '϶'), + ('؆', '؈'), + ('‖', '‖'), + ('′', '‴'), + ('⁀', '⁀'), + ('⁄', '⁄'), + ('⁒', '⁒'), + ('\u{2061}', '\u{2064}'), + ('⁺', '⁾'), + ('₊', '₎'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20e6}'), + ('\u{20eb}', '\u{20ef}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('ℨ', '℩'), + ('ℬ', 'ℭ'), + ('ℯ', 'ℱ'), + ('ℳ', 'ℸ'), + ('ℼ', 'ⅉ'), + ('⅋', '⅋'), + ('←', '↧'), + ('↩', '↮'), + ('↰', '↱'), + ('↶', '↷'), + ('↼', '⇛'), + ('⇝', '⇝'), + ('⇤', '⇥'), + ('⇴', '⋿'), + ('⌈', '⌋'), + ('⌠', '⌡'), + ('⍼', '⍼'), + ('⎛', '⎵'), + ('⎷', '⎷'), + ('⏐', '⏐'), + ('⏜', '⏢'), + ('■', '□'), + ('▮', '▷'), + ('▼', '◁'), + ('◆', '◇'), + ('◊', '○'), + ('●', '◓'), + ('◢', '◢'), + ('◤', '◤'), + ('◧', '◬'), + ('◸', '◿'), + ('★', '☆'), + ('♀', '♀'), + ('♂', '♂'), + ('♠', '♣'), + ('♭', '♯'), + ('⟀', '⟿'), + ('⤀', '⫿'), + ('⬰', '⭄'), + ('⭇', '⭌'), + ('﬩', '﬩'), + ('﹡', '﹦'), + ('﹨', '﹨'), + ('+', '+'), + ('<', '>'), + ('\', '\'), + ('^', '^'), + ('|', '|'), + ('~', '~'), + ('¬', '¬'), + ('←', '↓'), + ('𐶎', '𐶏'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const MODIFIER_COMBINING_MARK: &'static [(char, char)] = &[ + ('\u{654}', '\u{655}'), + ('\u{658}', '\u{658}'), + ('\u{6dc}', '\u{6dc}'), + ('\u{6e3}', '\u{6e3}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{8ca}', '\u{8cb}'), + ('\u{8cd}', '\u{8cf}'), + ('\u{8d3}', '\u{8d3}'), + ('\u{8f3}', '\u{8f3}'), +]; + +pub const NONCHARACTER_CODE_POINT: &'static [(char, char)] = &[ + ('\u{fdd0}', '\u{fdef}'), + ('\u{fffe}', '\u{ffff}'), + ('\u{1fffe}', '\u{1ffff}'), + ('\u{2fffe}', '\u{2ffff}'), + ('\u{3fffe}', '\u{3ffff}'), + ('\u{4fffe}', '\u{4ffff}'), + ('\u{5fffe}', '\u{5ffff}'), + ('\u{6fffe}', '\u{6ffff}'), + ('\u{7fffe}', '\u{7ffff}'), + ('\u{8fffe}', '\u{8ffff}'), + ('\u{9fffe}', '\u{9ffff}'), + ('\u{afffe}', '\u{affff}'), + ('\u{bfffe}', '\u{bffff}'), + ('\u{cfffe}', '\u{cffff}'), + ('\u{dfffe}', '\u{dffff}'), + ('\u{efffe}', '\u{effff}'), + ('\u{ffffe}', '\u{fffff}'), + ('\u{10fffe}', '\u{10ffff}'), +]; + +pub const OTHER_ALPHABETIC: &'static [(char, char)] = &[ + ('\u{345}', '\u{345}'), + ('\u{363}', '\u{36f}'), + ('\u{5b0}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{657}'), + ('\u{659}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6e1}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ed}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{73f}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{816}', '\u{817}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82c}'), + ('\u{897}', '\u{897}'), + ('\u{8d4}', '\u{8df}'), + ('\u{8e3}', '\u{8e9}'), + ('\u{8f0}', 'ः'), + ('\u{93a}', 'ऻ'), + ('ा', 'ौ'), + ('ॎ', 'ॏ'), + ('\u{955}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৌ'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{a01}', 'ਃ'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4c}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', 'ૌ'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{afc}'), + ('\u{b01}', 'ଃ'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', 'ୌ'), + ('\u{b56}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', 'ௌ'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4c}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccc}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൌ'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e4d}', '\u{e4d}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{eb9}'), + ('\u{ebb}', '\u{ebc}'), + ('\u{ecd}', '\u{ecd}'), + ('\u{f71}', '\u{f83}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('ါ', '\u{1036}'), + ('း', 'း'), + ('ျ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{1712}', '\u{1713}'), + ('\u{1732}', '\u{1733}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('ា', 'ៈ'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', 'ᤸ'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('ᩡ', '\u{1a74}'), + ('\u{1abf}', '\u{1ac0}'), + ('\u{1acc}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b35}', '\u{1b43}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1ba9}'), + ('\u{1bac}', '\u{1bad}'), + ('ᯧ', '\u{1bf1}'), + ('ᰤ', '\u{1c36}'), + ('\u{1dd3}', '\u{1df4}'), + ('Ⓐ', 'ⓩ'), + ('\u{2de0}', '\u{2dff}'), + ('\u{a674}', '\u{a67b}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a802}', '\u{a802}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('ꢀ', 'ꢁ'), + ('ꢴ', 'ꣃ'), + ('\u{a8c5}', '\u{a8c5}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92a}'), + ('\u{a947}', 'ꥒ'), + ('\u{a980}', 'ꦃ'), + ('ꦴ', 'ꦿ'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabe}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', 'ꫵ'), + ('ꯣ', 'ꯪ'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d69}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10efc}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11045}'), + ('\u{11073}', '\u{11074}'), + ('\u{11080}', '𑂂'), + ('𑂰', '𑂸'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11132}'), + ('𑅅', '𑅆'), + ('\u{11180}', '𑆂'), + ('𑆳', '𑆿'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11234}'), + ('\u{11237}', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112e8}'), + ('\u{11300}', '𑌃'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '𑍌'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏍'), + ('𑐵', '𑑁'), + ('\u{11443}', '𑑅'), + ('\u{114b0}', '𑓁'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '𑖾'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '𑘾'), + ('\u{11640}', '\u{11640}'), + ('\u{116ab}', '\u{116b5}'), + ('\u{1171d}', '\u{1172a}'), + ('𑠬', '𑠸'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193c}'), + ('𑥀', '𑥀'), + ('𑥂', '𑥂'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '𑧟'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a35}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '𑪗'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '𑰾'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d41}'), + ('\u{11d43}', '\u{11d43}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶖'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f40}'), + ('\u{1611e}', '\u{1612e}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9e}', '\u{1bc9e}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e947}', '\u{1e947}'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const OTHER_DEFAULT_IGNORABLE_CODE_POINT: &'static [(char, char)] = &[ + ('\u{34f}', '\u{34f}'), + ('ᅟ', 'ᅠ'), + ('\u{17b4}', '\u{17b5}'), + ('\u{2065}', '\u{2065}'), + ('ㅤ', 'ㅤ'), + ('ᅠ', 'ᅠ'), + ('\u{fff0}', '\u{fff8}'), + ('\u{e0000}', '\u{e0000}'), + ('\u{e0002}', '\u{e001f}'), + ('\u{e0080}', '\u{e00ff}'), + ('\u{e01f0}', '\u{e0fff}'), +]; + +pub const OTHER_GRAPHEME_EXTEND: &'static [(char, char)] = &[ + ('\u{9be}', '\u{9be}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{b3e}', '\u{b3e}'), + ('\u{b57}', '\u{b57}'), + ('\u{bbe}', '\u{bbe}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{cc0}', '\u{cc0}'), + ('\u{cc2}', '\u{cc2}'), + ('\u{cc7}', '\u{cc8}'), + ('\u{cca}', '\u{ccb}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{d3e}', '\u{d3e}'), + ('\u{d57}', '\u{d57}'), + ('\u{dcf}', '\u{dcf}'), + ('\u{ddf}', '\u{ddf}'), + ('\u{1715}', '\u{1715}'), + ('\u{1734}', '\u{1734}'), + ('\u{1b35}', '\u{1b35}'), + ('\u{1b3b}', '\u{1b3b}'), + ('\u{1b3d}', '\u{1b3d}'), + ('\u{1b43}', '\u{1b44}'), + ('\u{1baa}', '\u{1baa}'), + ('\u{1bf2}', '\u{1bf3}'), + ('\u{200c}', '\u{200c}'), + ('\u{302e}', '\u{302f}'), + ('\u{a953}', '\u{a953}'), + ('\u{a9c0}', '\u{a9c0}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{111c0}', '\u{111c0}'), + ('\u{11235}', '\u{11235}'), + ('\u{1133e}', '\u{1133e}'), + ('\u{1134d}', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('\u{113b8}', '\u{113b8}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '\u{113c9}'), + ('\u{113cf}', '\u{113cf}'), + ('\u{114b0}', '\u{114b0}'), + ('\u{114bd}', '\u{114bd}'), + ('\u{115af}', '\u{115af}'), + ('\u{116b6}', '\u{116b6}'), + ('\u{11930}', '\u{11930}'), + ('\u{1193d}', '\u{1193d}'), + ('\u{11f41}', '\u{11f41}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1d165}', '\u{1d166}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const OTHER_ID_CONTINUE: &'static [(char, char)] = &[ + ('·', '·'), + ('·', '·'), + ('፩', '፱'), + ('᧚', '᧚'), + ('\u{200c}', '\u{200d}'), + ('・', '・'), + ('・', '・'), +]; + +pub const OTHER_ID_START: &'static [(char, char)] = + &[('\u{1885}', '\u{1886}'), ('℘', '℘'), ('℮', '℮'), ('゛', '゜')]; + +pub const OTHER_LOWERCASE: &'static [(char, char)] = &[ + ('ª', 'ª'), + ('º', 'º'), + ('ʰ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('\u{345}', '\u{345}'), + ('ͺ', 'ͺ'), + ('ჼ', 'ჼ'), + ('ᴬ', 'ᵪ'), + ('ᵸ', 'ᵸ'), + ('ᶛ', 'ᶿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ⅰ', 'ⅿ'), + ('ⓐ', 'ⓩ'), + ('ⱼ', 'ⱽ'), + ('ꚜ', 'ꚝ'), + ('ꝰ', 'ꝰ'), + ('ꟲ', 'ꟴ'), + ('ꟸ', 'ꟹ'), + ('ꭜ', 'ꭟ'), + ('ꭩ', 'ꭩ'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𞀰', '𞁭'), +]; + +pub const OTHER_MATH: &'static [(char, char)] = &[ + ('^', '^'), + ('ϐ', 'ϒ'), + ('ϕ', 'ϕ'), + ('ϰ', 'ϱ'), + ('ϴ', 'ϵ'), + ('‖', '‖'), + ('′', '‴'), + ('⁀', '⁀'), + ('\u{2061}', '\u{2064}'), + ('⁽', '⁾'), + ('₍', '₎'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20e6}'), + ('\u{20eb}', '\u{20ef}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('ℨ', '℩'), + ('ℬ', 'ℭ'), + ('ℯ', 'ℱ'), + ('ℳ', 'ℸ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('↕', '↙'), + ('↜', '↟'), + ('↡', '↢'), + ('↤', '↥'), + ('↧', '↧'), + ('↩', '↭'), + ('↰', '↱'), + ('↶', '↷'), + ('↼', '⇍'), + ('⇐', '⇑'), + ('⇓', '⇓'), + ('⇕', '⇛'), + ('⇝', '⇝'), + ('⇤', '⇥'), + ('⌈', '⌋'), + ('⎴', '⎵'), + ('⎷', '⎷'), + ('⏐', '⏐'), + ('⏢', '⏢'), + ('■', '□'), + ('▮', '▶'), + ('▼', '◀'), + ('◆', '◇'), + ('◊', '○'), + ('●', '◓'), + ('◢', '◢'), + ('◤', '◤'), + ('◧', '◬'), + ('★', '☆'), + ('♀', '♀'), + ('♂', '♂'), + ('♠', '♣'), + ('♭', '♮'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('﹡', '﹡'), + ('﹣', '﹣'), + ('﹨', '﹨'), + ('\', '\'), + ('^', '^'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), +]; + +pub const OTHER_UPPERCASE: &'static [(char, char)] = + &[('Ⅰ', 'Ⅿ'), ('Ⓐ', 'Ⓩ'), ('🄰', '🅉'), ('🅐', '🅩'), ('🅰', '🆉')]; + +pub const PATTERN_SYNTAX: &'static [(char, char)] = &[ + ('!', '/'), + (':', '@'), + ('[', '^'), + ('`', '`'), + ('{', '~'), + ('¡', '§'), + ('©', '©'), + ('«', '¬'), + ('®', '®'), + ('°', '±'), + ('¶', '¶'), + ('»', '»'), + ('¿', '¿'), + ('×', '×'), + ('÷', '÷'), + ('‐', '‧'), + ('‰', '‾'), + ('⁁', '⁓'), + ('⁕', '⁞'), + ('←', '\u{245f}'), + ('─', '❵'), + ('➔', '⯿'), + ('⸀', '\u{2e7f}'), + ('、', '〃'), + ('〈', '〠'), + ('〰', '〰'), + ('﴾', '﴿'), + ('﹅', '﹆'), +]; + +pub const PATTERN_WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{200e}', '\u{200f}'), + ('\u{2028}', '\u{2029}'), +]; + +pub const PREPENDED_CONCATENATION_MARK: &'static [(char, char)] = &[ + ('\u{600}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{70f}', '\u{70f}'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), +]; + +pub const QUOTATION_MARK: &'static [(char, char)] = &[ + ('"', '"'), + ('\'', '\''), + ('«', '«'), + ('»', '»'), + ('‘', '‟'), + ('‹', '›'), + ('⹂', '⹂'), + ('「', '』'), + ('〝', '〟'), + ('﹁', '﹄'), + ('"', '"'), + (''', '''), + ('「', '」'), +]; + +pub const RADICAL: &'static [(char, char)] = + &[('⺀', '⺙'), ('⺛', '⻳'), ('⼀', '⿕')]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SENTENCE_TERMINAL: &'static [(char, char)] = &[ + ('!', '!'), + ('.', '.'), + ('?', '?'), + ('։', '։'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܂'), + ('߹', '߹'), + ('࠷', '࠷'), + ('࠹', '࠹'), + ('࠽', '࠾'), + ('।', '॥'), + ('၊', '။'), + ('።', '።'), + ('፧', '፨'), + ('᙮', '᙮'), + ('᜵', '᜶'), + ('។', '៕'), + ('᠃', '᠃'), + ('᠉', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭞', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰼'), + ('᱾', '᱿'), + ('․', '․'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹓', '⹔'), + ('。', '。'), + ('꓿', '꓿'), + ('꘎', '꘏'), + ('꛳', '꛳'), + ('꛷', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧈', '꧉'), + ('꩝', '꩟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹒', '﹒'), + ('﹖', '﹗'), + ('!', '!'), + ('.', '.'), + ('?', '?'), + ('。', '。'), + ('𐩖', '𐩗'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁈'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈹'), + ('𑈻', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑌'), + ('𑗂', '𑗃'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑱁', '𑱂'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬸'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺘', '𖺘'), + ('𛲟', '𛲟'), + ('𝪈', '𝪈'), +]; + +pub const SOFT_DOTTED: &'static [(char, char)] = &[ + ('i', 'j'), + ('į', 'į'), + ('ɉ', 'ɉ'), + ('ɨ', 'ɨ'), + ('ʝ', 'ʝ'), + ('ʲ', 'ʲ'), + ('ϳ', 'ϳ'), + ('і', 'і'), + ('ј', 'ј'), + ('ᵢ', 'ᵢ'), + ('ᶖ', 'ᶖ'), + ('ᶤ', 'ᶤ'), + ('ᶨ', 'ᶨ'), + ('ḭ', 'ḭ'), + ('ị', 'ị'), + ('ⁱ', 'ⁱ'), + ('ⅈ', 'ⅉ'), + ('ⱼ', 'ⱼ'), + ('𝐢', '𝐣'), + ('𝑖', '𝑗'), + ('𝒊', '𝒋'), + ('𝒾', '𝒿'), + ('𝓲', '𝓳'), + ('𝔦', '𝔧'), + ('𝕚', '𝕛'), + ('𝖎', '𝖏'), + ('𝗂', '𝗃'), + ('𝗶', '𝗷'), + ('𝘪', '𝘫'), + ('𝙞', '𝙟'), + ('𝚒', '𝚓'), + ('𝼚', '𝼚'), + ('𞁌', '𞁍'), + ('𞁨', '𞁨'), +]; + +pub const TERMINAL_PUNCTUATION: &'static [(char, char)] = &[ + ('!', '!'), + (',', ','), + ('.', '.'), + (':', ';'), + ('?', '?'), + (';', ';'), + ('·', '·'), + ('։', '։'), + ('׃', '׃'), + ('،', '،'), + ('؛', '؛'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܊'), + ('܌', '܌'), + ('߸', '߹'), + ('࠰', '࠵'), + ('࠷', '࠾'), + ('࡞', '࡞'), + ('।', '॥'), + ('๚', '๛'), + ('༈', '༈'), + ('།', '༒'), + ('၊', '။'), + ('፡', '፨'), + ('᙮', '᙮'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('។', '៖'), + ('៚', '៚'), + ('᠂', '᠅'), + ('᠈', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭝', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰿'), + ('᱾', '᱿'), + ('․', '․'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹁', '⹁'), + ('⹌', '⹌'), + ('⹎', '⹏'), + ('⹓', '⹔'), + ('、', '。'), + ('꓾', '꓿'), + ('꘍', '꘏'), + ('꛳', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧇', '꧉'), + ('꩝', '꩟'), + ('꫟', '꫟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹐', '﹒'), + ('﹔', '﹗'), + ('!', '!'), + (',', ','), + ('.', '.'), + (':', ';'), + ('?', '?'), + ('。', '。'), + ('、', '、'), + ('𐎟', '𐎟'), + ('𐏐', '𐏐'), + ('𐡗', '𐡗'), + ('𐤟', '𐤟'), + ('𐩖', '𐩗'), + ('𐫰', '𐫵'), + ('𐬺', '𐬿'), + ('𐮙', '𐮜'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁍'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑍'), + ('𑑚', '𑑛'), + ('𑗂', '𑗅'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑪡', '𑪢'), + ('𑱁', '𑱃'), + ('𑱱', '𑱱'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𒑰', '𒑴'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬹'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺗', '𖺘'), + ('𛲟', '𛲟'), + ('𝪇', '𝪊'), +]; + +pub const UNIFIED_IDEOGRAPH: &'static [(char, char)] = &[ + ('㐀', '䶿'), + ('一', '鿿'), + ('﨎', '﨏'), + ('﨑', '﨑'), + ('﨓', '﨔'), + ('﨟', '﨟'), + ('﨡', '﨡'), + ('﨣', '﨤'), + ('﨧', '﨩'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const UPPERCASE: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'DŽ'), + ('LJ', 'LJ'), + ('NJ', 'NJ'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'DZ'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('Ᾰ', 'Ά'), + ('Ὲ', 'Ή'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'Ώ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const VARIATION_SELECTOR: &'static [(char, char)] = &[ + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const WHITE_SPACE: &'static [(char, char)] = &[ + ('\t', '\r'), + (' ', ' '), + ('\u{85}', '\u{85}'), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{2028}', '\u{2029}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const XID_CONTINUE: &'static [(char, char)] = &[ + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('\u{300}', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('\u{483}', '\u{487}'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('\u{610}', '\u{61a}'), + ('ؠ', '٩'), + ('ٮ', 'ۓ'), + ('ە', '\u{6dc}'), + ('\u{6df}', '\u{6e8}'), + ('\u{6ea}', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', '\u{74a}'), + ('ݍ', 'ޱ'), + ('߀', 'ߵ'), + ('ߺ', 'ߺ'), + ('\u{7fd}', '\u{7fd}'), + ('ࠀ', '\u{82d}'), + ('ࡀ', '\u{85b}'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{963}'), + ('०', '९'), + ('ॱ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', 'ৱ'), + ('ৼ', 'ৼ'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૯'), + ('ૹ', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୯'), + ('ୱ', 'ୱ'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௯'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('ಀ', 'ಃ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', 'ൎ'), + ('ൔ', '\u{d57}'), + ('ൟ', '\u{d63}'), + ('൦', '൯'), + ('ൺ', 'ൿ'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', 'ෳ'), + ('ก', '\u{e3a}'), + ('เ', '\u{e4e}'), + ('๐', '๙'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('\u{f18}', '\u{f19}'), + ('༠', '༩'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('က', '၉'), + ('ၐ', '\u{109d}'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '\u{135f}'), + ('፩', '፱'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', '\u{1715}'), + ('ᜟ', '\u{1734}'), + ('ᝀ', '\u{1753}'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('\u{1772}', '\u{1773}'), + ('ក', '\u{17d3}'), + ('ៗ', 'ៗ'), + ('ៜ', '\u{17dd}'), + ('០', '៩'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥆', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('᧐', '᧚'), + ('ᨀ', '\u{1a1b}'), + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('ᪧ', 'ᪧ'), + ('\u{1ab0}', '\u{1abd}'), + ('\u{1abf}', '\u{1ace}'), + ('\u{1b00}', 'ᭌ'), + ('᭐', '᭙'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', '\u{1bf3}'), + ('ᰀ', '\u{1c37}'), + ('᱀', '᱉'), + ('ᱍ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', 'ᳺ'), + ('ᴀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('\u{200c}', '\u{200d}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20d0}', '\u{20dc}'), + ('\u{20e1}', '\u{20e1}'), + ('\u{20e5}', '\u{20f0}'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('\u{2d7f}', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('\u{2de0}', '\u{2dff}'), + ('々', '〇'), + ('〡', '\u{302f}'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('\u{3099}', '\u{309a}'), + ('ゝ', 'ゟ'), + ('ァ', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘫ'), + ('Ꙁ', '\u{a66f}'), + ('\u{a674}', '\u{a67d}'), + ('ꙿ', '\u{a6f1}'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꡀ', 'ꡳ'), + ('ꢀ', '\u{a8c5}'), + ('꣐', '꣙'), + ('\u{a8e0}', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', '\u{a92d}'), + ('ꤰ', '\u{a953}'), + ('ꥠ', 'ꥼ'), + ('\u{a980}', '\u{a9c0}'), + ('ꧏ', '꧙'), + ('ꧠ', 'ꧾ'), + ('ꨀ', '\u{aa36}'), + ('ꩀ', 'ꩍ'), + ('꩐', '꩙'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫯ'), + ('ꫲ', '\u{aaf6}'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('꯰', '꯹'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﱝ'), + ('ﱤ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷹ'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('︳', '︴'), + ('﹍', '﹏'), + ('ﹱ', 'ﹱ'), + ('ﹳ', 'ﹳ'), + ('ﹷ', 'ﹷ'), + ('ﹹ', 'ﹹ'), + ('ﹻ', 'ﹻ'), + ('ﹽ', 'ﹽ'), + ('ﹿ', 'ﻼ'), + ('0', '9'), + ('A', 'Z'), + ('_', '_'), + ('a', 'z'), + ('・', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('\u{101fd}', '\u{101fd}'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('\u{102e0}', '\u{102e0}'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '\u{1037a}'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒠', '𐒩'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '\u{10ae6}'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), + ('𐵀', '𐵥'), + ('\u{10d69}', '\u{10d6d}'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('\u{10eab}', '\u{10eac}'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('\u{10efc}', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '\u{10f50}'), + ('𐽰', '\u{10f85}'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀀', '\u{11046}'), + ('𑁦', '𑁵'), + ('\u{1107f}', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('𑃐', '𑃨'), + ('𑃰', '𑃹'), + ('\u{11100}', '\u{11134}'), + ('𑄶', '𑄿'), + ('𑅄', '𑅇'), + ('𑅐', '\u{11173}'), + ('𑅶', '𑅶'), + ('\u{11180}', '𑇄'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '\u{11237}'), + ('\u{1123e}', '\u{11241}'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '\u{112ea}'), + ('𑋰', '𑋹'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏓'), + ('\u{113e1}', '\u{113e2}'), + ('𑐀', '𑑊'), + ('𑑐', '𑑙'), + ('\u{1145e}', '𑑡'), + ('𑒀', '𑓅'), + ('𑓇', '𑓇'), + ('𑓐', '𑓙'), + ('𑖀', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('𑗘', '\u{115dd}'), + ('𑘀', '\u{11640}'), + ('𑙄', '𑙄'), + ('𑙐', '𑙙'), + ('𑚀', '𑚸'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜀', '𑜚'), + ('\u{1171d}', '\u{1172b}'), + ('𑜰', '𑜹'), + ('𑝀', '𑝆'), + ('𑠀', '\u{1183a}'), + ('𑢠', '𑣩'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{11943}'), + ('𑥐', '𑥙'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧡'), + ('𑧣', '𑧤'), + ('𑨀', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('𑩐', '\u{11a99}'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑯰', '𑯹'), + ('𑰀', '𑰈'), + ('𑰊', '\u{11c36}'), + ('\u{11c38}', '𑱀'), + ('𑱐', '𑱙'), + ('𑱲', '𑲏'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), + ('𑻠', '𑻶'), + ('\u{11f00}', '𑼐'), + ('𑼒', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('𑽐', '\u{11f5a}'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('\u{13440}', '\u{13455}'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄹'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩠', '𖩩'), + ('𖩰', '𖪾'), + ('𖫀', '𖫉'), + ('𖫐', '𖫭'), + ('\u{16af0}', '\u{16af4}'), + ('𖬀', '\u{16b36}'), + ('𖭀', '𖭃'), + ('𖭐', '𖭙'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖵰', '𖵹'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('\u{16f4f}', '𖾇'), + ('\u{16f8f}', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('𜳰', '𜳹'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝟎', '𝟿'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), + ('𞄀', '𞄬'), + ('\u{1e130}', '𞄽'), + ('𞅀', '𞅉'), + ('𞅎', '𞅎'), + ('𞊐', '\u{1e2ae}'), + ('𞋀', '𞋹'), + ('𞓐', '𞓹'), + ('𞗐', '𞗺'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🯰', '🯹'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const XID_START: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ˁ'), + ('ˆ', 'ˑ'), + ('ˠ', 'ˤ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͻ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', 'ՙ'), + ('ՠ', 'ֈ'), + ('א', 'ת'), + ('ׯ', 'ײ'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'า'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'າ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('℘', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('Ꜣ', 'ꞈ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﱝ'), + ('ﱤ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷹ'), + ('ﹱ', 'ﹱ'), + ('ﹳ', 'ﹳ'), + ('ﹷ', 'ﹷ'), + ('ﹹ', 'ﹹ'), + ('ﹻ', 'ﹻ'), + ('ﹽ', 'ﹽ'), + ('ﹿ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ヲ', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_names.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_names.rs new file mode 100644 index 0000000000000000000000000000000000000000..a27b49133d33acf45f70a4dc8c6e88af19531648 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_names.rs @@ -0,0 +1,281 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-names ucd-16.0.0 +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PROPERTY_NAMES: &'static [(&'static str, &'static str)] = &[ + ("age", "Age"), + ("ahex", "ASCII_Hex_Digit"), + ("alpha", "Alphabetic"), + ("alphabetic", "Alphabetic"), + ("asciihexdigit", "ASCII_Hex_Digit"), + ("bc", "Bidi_Class"), + ("bidic", "Bidi_Control"), + ("bidiclass", "Bidi_Class"), + ("bidicontrol", "Bidi_Control"), + ("bidim", "Bidi_Mirrored"), + ("bidimirrored", "Bidi_Mirrored"), + ("bidimirroringglyph", "Bidi_Mirroring_Glyph"), + ("bidipairedbracket", "Bidi_Paired_Bracket"), + ("bidipairedbrackettype", "Bidi_Paired_Bracket_Type"), + ("blk", "Block"), + ("block", "Block"), + ("bmg", "Bidi_Mirroring_Glyph"), + ("bpb", "Bidi_Paired_Bracket"), + ("bpt", "Bidi_Paired_Bracket_Type"), + ("canonicalcombiningclass", "Canonical_Combining_Class"), + ("cased", "Cased"), + ("casefolding", "Case_Folding"), + ("caseignorable", "Case_Ignorable"), + ("ccc", "Canonical_Combining_Class"), + ("ce", "Composition_Exclusion"), + ("cf", "Case_Folding"), + ("changeswhencasefolded", "Changes_When_Casefolded"), + ("changeswhencasemapped", "Changes_When_Casemapped"), + ("changeswhenlowercased", "Changes_When_Lowercased"), + ("changeswhennfkccasefolded", "Changes_When_NFKC_Casefolded"), + ("changeswhentitlecased", "Changes_When_Titlecased"), + ("changeswhenuppercased", "Changes_When_Uppercased"), + ("ci", "Case_Ignorable"), + ("cjkaccountingnumeric", "kAccountingNumeric"), + ("cjkcompatibilityvariant", "kCompatibilityVariant"), + ("cjkiicore", "kIICore"), + ("cjkirggsource", "kIRG_GSource"), + ("cjkirghsource", "kIRG_HSource"), + ("cjkirgjsource", "kIRG_JSource"), + ("cjkirgkpsource", "kIRG_KPSource"), + ("cjkirgksource", "kIRG_KSource"), + ("cjkirgmsource", "kIRG_MSource"), + ("cjkirgssource", "kIRG_SSource"), + ("cjkirgtsource", "kIRG_TSource"), + ("cjkirguksource", "kIRG_UKSource"), + ("cjkirgusource", "kIRG_USource"), + ("cjkirgvsource", "kIRG_VSource"), + ("cjkothernumeric", "kOtherNumeric"), + ("cjkprimarynumeric", "kPrimaryNumeric"), + ("cjkrsunicode", "kRSUnicode"), + ("compex", "Full_Composition_Exclusion"), + ("compositionexclusion", "Composition_Exclusion"), + ("cwcf", "Changes_When_Casefolded"), + ("cwcm", "Changes_When_Casemapped"), + ("cwkcf", "Changes_When_NFKC_Casefolded"), + ("cwl", "Changes_When_Lowercased"), + ("cwt", "Changes_When_Titlecased"), + ("cwu", "Changes_When_Uppercased"), + ("dash", "Dash"), + ("decompositionmapping", "Decomposition_Mapping"), + ("decompositiontype", "Decomposition_Type"), + ("defaultignorablecodepoint", "Default_Ignorable_Code_Point"), + ("dep", "Deprecated"), + ("deprecated", "Deprecated"), + ("di", "Default_Ignorable_Code_Point"), + ("dia", "Diacritic"), + ("diacritic", "Diacritic"), + ("dm", "Decomposition_Mapping"), + ("dt", "Decomposition_Type"), + ("ea", "East_Asian_Width"), + ("eastasianwidth", "East_Asian_Width"), + ("ebase", "Emoji_Modifier_Base"), + ("ecomp", "Emoji_Component"), + ("emod", "Emoji_Modifier"), + ("emoji", "Emoji"), + ("emojicomponent", "Emoji_Component"), + ("emojimodifier", "Emoji_Modifier"), + ("emojimodifierbase", "Emoji_Modifier_Base"), + ("emojipresentation", "Emoji_Presentation"), + ("epres", "Emoji_Presentation"), + ("equideo", "Equivalent_Unified_Ideograph"), + ("equivalentunifiedideograph", "Equivalent_Unified_Ideograph"), + ("expandsonnfc", "Expands_On_NFC"), + ("expandsonnfd", "Expands_On_NFD"), + ("expandsonnfkc", "Expands_On_NFKC"), + ("expandsonnfkd", "Expands_On_NFKD"), + ("ext", "Extender"), + ("extendedpictographic", "Extended_Pictographic"), + ("extender", "Extender"), + ("extpict", "Extended_Pictographic"), + ("fcnfkc", "FC_NFKC_Closure"), + ("fcnfkcclosure", "FC_NFKC_Closure"), + ("fullcompositionexclusion", "Full_Composition_Exclusion"), + ("gc", "General_Category"), + ("gcb", "Grapheme_Cluster_Break"), + ("generalcategory", "General_Category"), + ("graphemebase", "Grapheme_Base"), + ("graphemeclusterbreak", "Grapheme_Cluster_Break"), + ("graphemeextend", "Grapheme_Extend"), + ("graphemelink", "Grapheme_Link"), + ("grbase", "Grapheme_Base"), + ("grext", "Grapheme_Extend"), + ("grlink", "Grapheme_Link"), + ("hangulsyllabletype", "Hangul_Syllable_Type"), + ("hex", "Hex_Digit"), + ("hexdigit", "Hex_Digit"), + ("hst", "Hangul_Syllable_Type"), + ("hyphen", "Hyphen"), + ("idc", "ID_Continue"), + ("idcompatmathcontinue", "ID_Compat_Math_Continue"), + ("idcompatmathstart", "ID_Compat_Math_Start"), + ("idcontinue", "ID_Continue"), + ("ideo", "Ideographic"), + ("ideographic", "Ideographic"), + ("ids", "ID_Start"), + ("idsb", "IDS_Binary_Operator"), + ("idsbinaryoperator", "IDS_Binary_Operator"), + ("idst", "IDS_Trinary_Operator"), + ("idstart", "ID_Start"), + ("idstrinaryoperator", "IDS_Trinary_Operator"), + ("idsu", "IDS_Unary_Operator"), + ("idsunaryoperator", "IDS_Unary_Operator"), + ("incb", "Indic_Conjunct_Break"), + ("indicconjunctbreak", "Indic_Conjunct_Break"), + ("indicpositionalcategory", "Indic_Positional_Category"), + ("indicsyllabiccategory", "Indic_Syllabic_Category"), + ("inpc", "Indic_Positional_Category"), + ("insc", "Indic_Syllabic_Category"), + ("isc", "ISO_Comment"), + ("jamoshortname", "Jamo_Short_Name"), + ("jg", "Joining_Group"), + ("joinc", "Join_Control"), + ("joincontrol", "Join_Control"), + ("joininggroup", "Joining_Group"), + ("joiningtype", "Joining_Type"), + ("jsn", "Jamo_Short_Name"), + ("jt", "Joining_Type"), + ("kaccountingnumeric", "kAccountingNumeric"), + ("kcompatibilityvariant", "kCompatibilityVariant"), + ("kehcat", "kEH_Cat"), + ("kehdesc", "kEH_Desc"), + ("kehhg", "kEH_HG"), + ("kehifao", "kEH_IFAO"), + ("kehjsesh", "kEH_JSesh"), + ("kehnomirror", "kEH_NoMirror"), + ("kehnorotate", "kEH_NoRotate"), + ("kiicore", "kIICore"), + ("kirggsource", "kIRG_GSource"), + ("kirghsource", "kIRG_HSource"), + ("kirgjsource", "kIRG_JSource"), + ("kirgkpsource", "kIRG_KPSource"), + ("kirgksource", "kIRG_KSource"), + ("kirgmsource", "kIRG_MSource"), + ("kirgssource", "kIRG_SSource"), + ("kirgtsource", "kIRG_TSource"), + ("kirguksource", "kIRG_UKSource"), + ("kirgusource", "kIRG_USource"), + ("kirgvsource", "kIRG_VSource"), + ("kothernumeric", "kOtherNumeric"), + ("kprimarynumeric", "kPrimaryNumeric"), + ("krsunicode", "kRSUnicode"), + ("lb", "Line_Break"), + ("lc", "Lowercase_Mapping"), + ("linebreak", "Line_Break"), + ("loe", "Logical_Order_Exception"), + ("logicalorderexception", "Logical_Order_Exception"), + ("lower", "Lowercase"), + ("lowercase", "Lowercase"), + ("lowercasemapping", "Lowercase_Mapping"), + ("math", "Math"), + ("mcm", "Modifier_Combining_Mark"), + ("modifiercombiningmark", "Modifier_Combining_Mark"), + ("na", "Name"), + ("na1", "Unicode_1_Name"), + ("name", "Name"), + ("namealias", "Name_Alias"), + ("nchar", "Noncharacter_Code_Point"), + ("nfcqc", "NFC_Quick_Check"), + ("nfcquickcheck", "NFC_Quick_Check"), + ("nfdqc", "NFD_Quick_Check"), + ("nfdquickcheck", "NFD_Quick_Check"), + ("nfkccasefold", "NFKC_Casefold"), + ("nfkccf", "NFKC_Casefold"), + ("nfkcqc", "NFKC_Quick_Check"), + ("nfkcquickcheck", "NFKC_Quick_Check"), + ("nfkcscf", "NFKC_Simple_Casefold"), + ("nfkcsimplecasefold", "NFKC_Simple_Casefold"), + ("nfkdqc", "NFKD_Quick_Check"), + ("nfkdquickcheck", "NFKD_Quick_Check"), + ("noncharactercodepoint", "Noncharacter_Code_Point"), + ("nt", "Numeric_Type"), + ("numerictype", "Numeric_Type"), + ("numericvalue", "Numeric_Value"), + ("nv", "Numeric_Value"), + ("oalpha", "Other_Alphabetic"), + ("ocomment", "ISO_Comment"), + ("odi", "Other_Default_Ignorable_Code_Point"), + ("ogrext", "Other_Grapheme_Extend"), + ("oidc", "Other_ID_Continue"), + ("oids", "Other_ID_Start"), + ("olower", "Other_Lowercase"), + ("omath", "Other_Math"), + ("otheralphabetic", "Other_Alphabetic"), + ("otherdefaultignorablecodepoint", "Other_Default_Ignorable_Code_Point"), + ("othergraphemeextend", "Other_Grapheme_Extend"), + ("otheridcontinue", "Other_ID_Continue"), + ("otheridstart", "Other_ID_Start"), + ("otherlowercase", "Other_Lowercase"), + ("othermath", "Other_Math"), + ("otheruppercase", "Other_Uppercase"), + ("oupper", "Other_Uppercase"), + ("patsyn", "Pattern_Syntax"), + ("patternsyntax", "Pattern_Syntax"), + ("patternwhitespace", "Pattern_White_Space"), + ("patws", "Pattern_White_Space"), + ("pcm", "Prepended_Concatenation_Mark"), + ("prependedconcatenationmark", "Prepended_Concatenation_Mark"), + ("qmark", "Quotation_Mark"), + ("quotationmark", "Quotation_Mark"), + ("radical", "Radical"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("sb", "Sentence_Break"), + ("sc", "Script"), + ("scf", "Simple_Case_Folding"), + ("script", "Script"), + ("scriptextensions", "Script_Extensions"), + ("scx", "Script_Extensions"), + ("sd", "Soft_Dotted"), + ("sentencebreak", "Sentence_Break"), + ("sentenceterminal", "Sentence_Terminal"), + ("sfc", "Simple_Case_Folding"), + ("simplecasefolding", "Simple_Case_Folding"), + ("simplelowercasemapping", "Simple_Lowercase_Mapping"), + ("simpletitlecasemapping", "Simple_Titlecase_Mapping"), + ("simpleuppercasemapping", "Simple_Uppercase_Mapping"), + ("slc", "Simple_Lowercase_Mapping"), + ("softdotted", "Soft_Dotted"), + ("space", "White_Space"), + ("stc", "Simple_Titlecase_Mapping"), + ("sterm", "Sentence_Terminal"), + ("suc", "Simple_Uppercase_Mapping"), + ("tc", "Titlecase_Mapping"), + ("term", "Terminal_Punctuation"), + ("terminalpunctuation", "Terminal_Punctuation"), + ("titlecasemapping", "Titlecase_Mapping"), + ("uc", "Uppercase_Mapping"), + ("uideo", "Unified_Ideograph"), + ("unicode1name", "Unicode_1_Name"), + ("unicoderadicalstroke", "kRSUnicode"), + ("unifiedideograph", "Unified_Ideograph"), + ("upper", "Uppercase"), + ("uppercase", "Uppercase"), + ("uppercasemapping", "Uppercase_Mapping"), + ("urs", "kRSUnicode"), + ("variationselector", "Variation_Selector"), + ("verticalorientation", "Vertical_Orientation"), + ("vo", "Vertical_Orientation"), + ("vs", "Variation_Selector"), + ("wb", "Word_Break"), + ("whitespace", "White_Space"), + ("wordbreak", "Word_Break"), + ("wspace", "White_Space"), + ("xidc", "XID_Continue"), + ("xidcontinue", "XID_Continue"), + ("xids", "XID_Start"), + ("xidstart", "XID_Start"), + ("xonfc", "Expands_On_NFC"), + ("xonfd", "Expands_On_NFD"), + ("xonfkc", "Expands_On_NFKC"), + ("xonfkd", "Expands_On_NFKD"), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_values.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_values.rs new file mode 100644 index 0000000000000000000000000000000000000000..2270d66383735d6b82649975b7cbe32d4f5cd4e8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/property_values.rs @@ -0,0 +1,956 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate property-values ucd-16.0.0 --include gc,script,scx,age,gcb,wb,sb +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const PROPERTY_VALUES: &'static [( + &'static str, + &'static [(&'static str, &'static str)], +)] = &[ + ( + "Age", + &[ + ("1.1", "V1_1"), + ("10.0", "V10_0"), + ("11.0", "V11_0"), + ("12.0", "V12_0"), + ("12.1", "V12_1"), + ("13.0", "V13_0"), + ("14.0", "V14_0"), + ("15.0", "V15_0"), + ("15.1", "V15_1"), + ("16.0", "V16_0"), + ("2.0", "V2_0"), + ("2.1", "V2_1"), + ("3.0", "V3_0"), + ("3.1", "V3_1"), + ("3.2", "V3_2"), + ("4.0", "V4_0"), + ("4.1", "V4_1"), + ("5.0", "V5_0"), + ("5.1", "V5_1"), + ("5.2", "V5_2"), + ("6.0", "V6_0"), + ("6.1", "V6_1"), + ("6.2", "V6_2"), + ("6.3", "V6_3"), + ("7.0", "V7_0"), + ("8.0", "V8_0"), + ("9.0", "V9_0"), + ("na", "Unassigned"), + ("unassigned", "Unassigned"), + ("v100", "V10_0"), + ("v11", "V1_1"), + ("v110", "V11_0"), + ("v120", "V12_0"), + ("v121", "V12_1"), + ("v130", "V13_0"), + ("v140", "V14_0"), + ("v150", "V15_0"), + ("v151", "V15_1"), + ("v160", "V16_0"), + ("v20", "V2_0"), + ("v21", "V2_1"), + ("v30", "V3_0"), + ("v31", "V3_1"), + ("v32", "V3_2"), + ("v40", "V4_0"), + ("v41", "V4_1"), + ("v50", "V5_0"), + ("v51", "V5_1"), + ("v52", "V5_2"), + ("v60", "V6_0"), + ("v61", "V6_1"), + ("v62", "V6_2"), + ("v63", "V6_3"), + ("v70", "V7_0"), + ("v80", "V8_0"), + ("v90", "V9_0"), + ], + ), + ( + "General_Category", + &[ + ("c", "Other"), + ("casedletter", "Cased_Letter"), + ("cc", "Control"), + ("cf", "Format"), + ("closepunctuation", "Close_Punctuation"), + ("cn", "Unassigned"), + ("cntrl", "Control"), + ("co", "Private_Use"), + ("combiningmark", "Mark"), + ("connectorpunctuation", "Connector_Punctuation"), + ("control", "Control"), + ("cs", "Surrogate"), + ("currencysymbol", "Currency_Symbol"), + ("dashpunctuation", "Dash_Punctuation"), + ("decimalnumber", "Decimal_Number"), + ("digit", "Decimal_Number"), + ("enclosingmark", "Enclosing_Mark"), + ("finalpunctuation", "Final_Punctuation"), + ("format", "Format"), + ("initialpunctuation", "Initial_Punctuation"), + ("l", "Letter"), + ("lc", "Cased_Letter"), + ("letter", "Letter"), + ("letternumber", "Letter_Number"), + ("lineseparator", "Line_Separator"), + ("ll", "Lowercase_Letter"), + ("lm", "Modifier_Letter"), + ("lo", "Other_Letter"), + ("lowercaseletter", "Lowercase_Letter"), + ("lt", "Titlecase_Letter"), + ("lu", "Uppercase_Letter"), + ("m", "Mark"), + ("mark", "Mark"), + ("mathsymbol", "Math_Symbol"), + ("mc", "Spacing_Mark"), + ("me", "Enclosing_Mark"), + ("mn", "Nonspacing_Mark"), + ("modifierletter", "Modifier_Letter"), + ("modifiersymbol", "Modifier_Symbol"), + ("n", "Number"), + ("nd", "Decimal_Number"), + ("nl", "Letter_Number"), + ("no", "Other_Number"), + ("nonspacingmark", "Nonspacing_Mark"), + ("number", "Number"), + ("openpunctuation", "Open_Punctuation"), + ("other", "Other"), + ("otherletter", "Other_Letter"), + ("othernumber", "Other_Number"), + ("otherpunctuation", "Other_Punctuation"), + ("othersymbol", "Other_Symbol"), + ("p", "Punctuation"), + ("paragraphseparator", "Paragraph_Separator"), + ("pc", "Connector_Punctuation"), + ("pd", "Dash_Punctuation"), + ("pe", "Close_Punctuation"), + ("pf", "Final_Punctuation"), + ("pi", "Initial_Punctuation"), + ("po", "Other_Punctuation"), + ("privateuse", "Private_Use"), + ("ps", "Open_Punctuation"), + ("punct", "Punctuation"), + ("punctuation", "Punctuation"), + ("s", "Symbol"), + ("sc", "Currency_Symbol"), + ("separator", "Separator"), + ("sk", "Modifier_Symbol"), + ("sm", "Math_Symbol"), + ("so", "Other_Symbol"), + ("spaceseparator", "Space_Separator"), + ("spacingmark", "Spacing_Mark"), + ("surrogate", "Surrogate"), + ("symbol", "Symbol"), + ("titlecaseletter", "Titlecase_Letter"), + ("unassigned", "Unassigned"), + ("uppercaseletter", "Uppercase_Letter"), + ("z", "Separator"), + ("zl", "Line_Separator"), + ("zp", "Paragraph_Separator"), + ("zs", "Space_Separator"), + ], + ), + ( + "Grapheme_Cluster_Break", + &[ + ("cn", "Control"), + ("control", "Control"), + ("cr", "CR"), + ("eb", "E_Base"), + ("ebase", "E_Base"), + ("ebasegaz", "E_Base_GAZ"), + ("ebg", "E_Base_GAZ"), + ("em", "E_Modifier"), + ("emodifier", "E_Modifier"), + ("ex", "Extend"), + ("extend", "Extend"), + ("gaz", "Glue_After_Zwj"), + ("glueafterzwj", "Glue_After_Zwj"), + ("l", "L"), + ("lf", "LF"), + ("lv", "LV"), + ("lvt", "LVT"), + ("other", "Other"), + ("pp", "Prepend"), + ("prepend", "Prepend"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("sm", "SpacingMark"), + ("spacingmark", "SpacingMark"), + ("t", "T"), + ("v", "V"), + ("xx", "Other"), + ("zwj", "ZWJ"), + ], + ), + ( + "Script", + &[ + ("adlam", "Adlam"), + ("adlm", "Adlam"), + ("aghb", "Caucasian_Albanian"), + ("ahom", "Ahom"), + ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), + ("arab", "Arabic"), + ("arabic", "Arabic"), + ("armenian", "Armenian"), + ("armi", "Imperial_Aramaic"), + ("armn", "Armenian"), + ("avestan", "Avestan"), + ("avst", "Avestan"), + ("bali", "Balinese"), + ("balinese", "Balinese"), + ("bamu", "Bamum"), + ("bamum", "Bamum"), + ("bass", "Bassa_Vah"), + ("bassavah", "Bassa_Vah"), + ("batak", "Batak"), + ("batk", "Batak"), + ("beng", "Bengali"), + ("bengali", "Bengali"), + ("bhaiksuki", "Bhaiksuki"), + ("bhks", "Bhaiksuki"), + ("bopo", "Bopomofo"), + ("bopomofo", "Bopomofo"), + ("brah", "Brahmi"), + ("brahmi", "Brahmi"), + ("brai", "Braille"), + ("braille", "Braille"), + ("bugi", "Buginese"), + ("buginese", "Buginese"), + ("buhd", "Buhid"), + ("buhid", "Buhid"), + ("cakm", "Chakma"), + ("canadianaboriginal", "Canadian_Aboriginal"), + ("cans", "Canadian_Aboriginal"), + ("cari", "Carian"), + ("carian", "Carian"), + ("caucasianalbanian", "Caucasian_Albanian"), + ("chakma", "Chakma"), + ("cham", "Cham"), + ("cher", "Cherokee"), + ("cherokee", "Cherokee"), + ("chorasmian", "Chorasmian"), + ("chrs", "Chorasmian"), + ("common", "Common"), + ("copt", "Coptic"), + ("coptic", "Coptic"), + ("cpmn", "Cypro_Minoan"), + ("cprt", "Cypriot"), + ("cuneiform", "Cuneiform"), + ("cypriot", "Cypriot"), + ("cyprominoan", "Cypro_Minoan"), + ("cyrillic", "Cyrillic"), + ("cyrl", "Cyrillic"), + ("deseret", "Deseret"), + ("deva", "Devanagari"), + ("devanagari", "Devanagari"), + ("diak", "Dives_Akuru"), + ("divesakuru", "Dives_Akuru"), + ("dogr", "Dogra"), + ("dogra", "Dogra"), + ("dsrt", "Deseret"), + ("dupl", "Duployan"), + ("duployan", "Duployan"), + ("egyp", "Egyptian_Hieroglyphs"), + ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), + ("elba", "Elbasan"), + ("elbasan", "Elbasan"), + ("elym", "Elymaic"), + ("elymaic", "Elymaic"), + ("ethi", "Ethiopic"), + ("ethiopic", "Ethiopic"), + ("gara", "Garay"), + ("garay", "Garay"), + ("geor", "Georgian"), + ("georgian", "Georgian"), + ("glag", "Glagolitic"), + ("glagolitic", "Glagolitic"), + ("gong", "Gunjala_Gondi"), + ("gonm", "Masaram_Gondi"), + ("goth", "Gothic"), + ("gothic", "Gothic"), + ("gran", "Grantha"), + ("grantha", "Grantha"), + ("greek", "Greek"), + ("grek", "Greek"), + ("gujarati", "Gujarati"), + ("gujr", "Gujarati"), + ("gukh", "Gurung_Khema"), + ("gunjalagondi", "Gunjala_Gondi"), + ("gurmukhi", "Gurmukhi"), + ("guru", "Gurmukhi"), + ("gurungkhema", "Gurung_Khema"), + ("han", "Han"), + ("hang", "Hangul"), + ("hangul", "Hangul"), + ("hani", "Han"), + ("hanifirohingya", "Hanifi_Rohingya"), + ("hano", "Hanunoo"), + ("hanunoo", "Hanunoo"), + ("hatr", "Hatran"), + ("hatran", "Hatran"), + ("hebr", "Hebrew"), + ("hebrew", "Hebrew"), + ("hira", "Hiragana"), + ("hiragana", "Hiragana"), + ("hluw", "Anatolian_Hieroglyphs"), + ("hmng", "Pahawh_Hmong"), + ("hmnp", "Nyiakeng_Puachue_Hmong"), + ("hrkt", "Katakana_Or_Hiragana"), + ("hung", "Old_Hungarian"), + ("imperialaramaic", "Imperial_Aramaic"), + ("inherited", "Inherited"), + ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), + ("inscriptionalparthian", "Inscriptional_Parthian"), + ("ital", "Old_Italic"), + ("java", "Javanese"), + ("javanese", "Javanese"), + ("kaithi", "Kaithi"), + ("kali", "Kayah_Li"), + ("kana", "Katakana"), + ("kannada", "Kannada"), + ("katakana", "Katakana"), + ("katakanaorhiragana", "Katakana_Or_Hiragana"), + ("kawi", "Kawi"), + ("kayahli", "Kayah_Li"), + ("khar", "Kharoshthi"), + ("kharoshthi", "Kharoshthi"), + ("khitansmallscript", "Khitan_Small_Script"), + ("khmer", "Khmer"), + ("khmr", "Khmer"), + ("khoj", "Khojki"), + ("khojki", "Khojki"), + ("khudawadi", "Khudawadi"), + ("kiratrai", "Kirat_Rai"), + ("kits", "Khitan_Small_Script"), + ("knda", "Kannada"), + ("krai", "Kirat_Rai"), + ("kthi", "Kaithi"), + ("lana", "Tai_Tham"), + ("lao", "Lao"), + ("laoo", "Lao"), + ("latin", "Latin"), + ("latn", "Latin"), + ("lepc", "Lepcha"), + ("lepcha", "Lepcha"), + ("limb", "Limbu"), + ("limbu", "Limbu"), + ("lina", "Linear_A"), + ("linb", "Linear_B"), + ("lineara", "Linear_A"), + ("linearb", "Linear_B"), + ("lisu", "Lisu"), + ("lyci", "Lycian"), + ("lycian", "Lycian"), + ("lydi", "Lydian"), + ("lydian", "Lydian"), + ("mahajani", "Mahajani"), + ("mahj", "Mahajani"), + ("maka", "Makasar"), + ("makasar", "Makasar"), + ("malayalam", "Malayalam"), + ("mand", "Mandaic"), + ("mandaic", "Mandaic"), + ("mani", "Manichaean"), + ("manichaean", "Manichaean"), + ("marc", "Marchen"), + ("marchen", "Marchen"), + ("masaramgondi", "Masaram_Gondi"), + ("medefaidrin", "Medefaidrin"), + ("medf", "Medefaidrin"), + ("meeteimayek", "Meetei_Mayek"), + ("mend", "Mende_Kikakui"), + ("mendekikakui", "Mende_Kikakui"), + ("merc", "Meroitic_Cursive"), + ("mero", "Meroitic_Hieroglyphs"), + ("meroiticcursive", "Meroitic_Cursive"), + ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), + ("miao", "Miao"), + ("mlym", "Malayalam"), + ("modi", "Modi"), + ("mong", "Mongolian"), + ("mongolian", "Mongolian"), + ("mro", "Mro"), + ("mroo", "Mro"), + ("mtei", "Meetei_Mayek"), + ("mult", "Multani"), + ("multani", "Multani"), + ("myanmar", "Myanmar"), + ("mymr", "Myanmar"), + ("nabataean", "Nabataean"), + ("nagm", "Nag_Mundari"), + ("nagmundari", "Nag_Mundari"), + ("nand", "Nandinagari"), + ("nandinagari", "Nandinagari"), + ("narb", "Old_North_Arabian"), + ("nbat", "Nabataean"), + ("newa", "Newa"), + ("newtailue", "New_Tai_Lue"), + ("nko", "Nko"), + ("nkoo", "Nko"), + ("nshu", "Nushu"), + ("nushu", "Nushu"), + ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), + ("ogam", "Ogham"), + ("ogham", "Ogham"), + ("olchiki", "Ol_Chiki"), + ("olck", "Ol_Chiki"), + ("oldhungarian", "Old_Hungarian"), + ("olditalic", "Old_Italic"), + ("oldnortharabian", "Old_North_Arabian"), + ("oldpermic", "Old_Permic"), + ("oldpersian", "Old_Persian"), + ("oldsogdian", "Old_Sogdian"), + ("oldsoutharabian", "Old_South_Arabian"), + ("oldturkic", "Old_Turkic"), + ("olduyghur", "Old_Uyghur"), + ("olonal", "Ol_Onal"), + ("onao", "Ol_Onal"), + ("oriya", "Oriya"), + ("orkh", "Old_Turkic"), + ("orya", "Oriya"), + ("osage", "Osage"), + ("osge", "Osage"), + ("osma", "Osmanya"), + ("osmanya", "Osmanya"), + ("ougr", "Old_Uyghur"), + ("pahawhhmong", "Pahawh_Hmong"), + ("palm", "Palmyrene"), + ("palmyrene", "Palmyrene"), + ("pauc", "Pau_Cin_Hau"), + ("paucinhau", "Pau_Cin_Hau"), + ("perm", "Old_Permic"), + ("phag", "Phags_Pa"), + ("phagspa", "Phags_Pa"), + ("phli", "Inscriptional_Pahlavi"), + ("phlp", "Psalter_Pahlavi"), + ("phnx", "Phoenician"), + ("phoenician", "Phoenician"), + ("plrd", "Miao"), + ("prti", "Inscriptional_Parthian"), + ("psalterpahlavi", "Psalter_Pahlavi"), + ("qaac", "Coptic"), + ("qaai", "Inherited"), + ("rejang", "Rejang"), + ("rjng", "Rejang"), + ("rohg", "Hanifi_Rohingya"), + ("runic", "Runic"), + ("runr", "Runic"), + ("samaritan", "Samaritan"), + ("samr", "Samaritan"), + ("sarb", "Old_South_Arabian"), + ("saur", "Saurashtra"), + ("saurashtra", "Saurashtra"), + ("sgnw", "SignWriting"), + ("sharada", "Sharada"), + ("shavian", "Shavian"), + ("shaw", "Shavian"), + ("shrd", "Sharada"), + ("sidd", "Siddham"), + ("siddham", "Siddham"), + ("signwriting", "SignWriting"), + ("sind", "Khudawadi"), + ("sinh", "Sinhala"), + ("sinhala", "Sinhala"), + ("sogd", "Sogdian"), + ("sogdian", "Sogdian"), + ("sogo", "Old_Sogdian"), + ("sora", "Sora_Sompeng"), + ("sorasompeng", "Sora_Sompeng"), + ("soyo", "Soyombo"), + ("soyombo", "Soyombo"), + ("sund", "Sundanese"), + ("sundanese", "Sundanese"), + ("sunu", "Sunuwar"), + ("sunuwar", "Sunuwar"), + ("sylo", "Syloti_Nagri"), + ("sylotinagri", "Syloti_Nagri"), + ("syrc", "Syriac"), + ("syriac", "Syriac"), + ("tagalog", "Tagalog"), + ("tagb", "Tagbanwa"), + ("tagbanwa", "Tagbanwa"), + ("taile", "Tai_Le"), + ("taitham", "Tai_Tham"), + ("taiviet", "Tai_Viet"), + ("takr", "Takri"), + ("takri", "Takri"), + ("tale", "Tai_Le"), + ("talu", "New_Tai_Lue"), + ("tamil", "Tamil"), + ("taml", "Tamil"), + ("tang", "Tangut"), + ("tangsa", "Tangsa"), + ("tangut", "Tangut"), + ("tavt", "Tai_Viet"), + ("telu", "Telugu"), + ("telugu", "Telugu"), + ("tfng", "Tifinagh"), + ("tglg", "Tagalog"), + ("thaa", "Thaana"), + ("thaana", "Thaana"), + ("thai", "Thai"), + ("tibetan", "Tibetan"), + ("tibt", "Tibetan"), + ("tifinagh", "Tifinagh"), + ("tirh", "Tirhuta"), + ("tirhuta", "Tirhuta"), + ("tnsa", "Tangsa"), + ("todhri", "Todhri"), + ("todr", "Todhri"), + ("toto", "Toto"), + ("tulutigalari", "Tulu_Tigalari"), + ("tutg", "Tulu_Tigalari"), + ("ugar", "Ugaritic"), + ("ugaritic", "Ugaritic"), + ("unknown", "Unknown"), + ("vai", "Vai"), + ("vaii", "Vai"), + ("vith", "Vithkuqi"), + ("vithkuqi", "Vithkuqi"), + ("wancho", "Wancho"), + ("wara", "Warang_Citi"), + ("warangciti", "Warang_Citi"), + ("wcho", "Wancho"), + ("xpeo", "Old_Persian"), + ("xsux", "Cuneiform"), + ("yezi", "Yezidi"), + ("yezidi", "Yezidi"), + ("yi", "Yi"), + ("yiii", "Yi"), + ("zanabazarsquare", "Zanabazar_Square"), + ("zanb", "Zanabazar_Square"), + ("zinh", "Inherited"), + ("zyyy", "Common"), + ("zzzz", "Unknown"), + ], + ), + ( + "Script_Extensions", + &[ + ("adlam", "Adlam"), + ("adlm", "Adlam"), + ("aghb", "Caucasian_Albanian"), + ("ahom", "Ahom"), + ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), + ("arab", "Arabic"), + ("arabic", "Arabic"), + ("armenian", "Armenian"), + ("armi", "Imperial_Aramaic"), + ("armn", "Armenian"), + ("avestan", "Avestan"), + ("avst", "Avestan"), + ("bali", "Balinese"), + ("balinese", "Balinese"), + ("bamu", "Bamum"), + ("bamum", "Bamum"), + ("bass", "Bassa_Vah"), + ("bassavah", "Bassa_Vah"), + ("batak", "Batak"), + ("batk", "Batak"), + ("beng", "Bengali"), + ("bengali", "Bengali"), + ("bhaiksuki", "Bhaiksuki"), + ("bhks", "Bhaiksuki"), + ("bopo", "Bopomofo"), + ("bopomofo", "Bopomofo"), + ("brah", "Brahmi"), + ("brahmi", "Brahmi"), + ("brai", "Braille"), + ("braille", "Braille"), + ("bugi", "Buginese"), + ("buginese", "Buginese"), + ("buhd", "Buhid"), + ("buhid", "Buhid"), + ("cakm", "Chakma"), + ("canadianaboriginal", "Canadian_Aboriginal"), + ("cans", "Canadian_Aboriginal"), + ("cari", "Carian"), + ("carian", "Carian"), + ("caucasianalbanian", "Caucasian_Albanian"), + ("chakma", "Chakma"), + ("cham", "Cham"), + ("cher", "Cherokee"), + ("cherokee", "Cherokee"), + ("chorasmian", "Chorasmian"), + ("chrs", "Chorasmian"), + ("common", "Common"), + ("copt", "Coptic"), + ("coptic", "Coptic"), + ("cpmn", "Cypro_Minoan"), + ("cprt", "Cypriot"), + ("cuneiform", "Cuneiform"), + ("cypriot", "Cypriot"), + ("cyprominoan", "Cypro_Minoan"), + ("cyrillic", "Cyrillic"), + ("cyrl", "Cyrillic"), + ("deseret", "Deseret"), + ("deva", "Devanagari"), + ("devanagari", "Devanagari"), + ("diak", "Dives_Akuru"), + ("divesakuru", "Dives_Akuru"), + ("dogr", "Dogra"), + ("dogra", "Dogra"), + ("dsrt", "Deseret"), + ("dupl", "Duployan"), + ("duployan", "Duployan"), + ("egyp", "Egyptian_Hieroglyphs"), + ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), + ("elba", "Elbasan"), + ("elbasan", "Elbasan"), + ("elym", "Elymaic"), + ("elymaic", "Elymaic"), + ("ethi", "Ethiopic"), + ("ethiopic", "Ethiopic"), + ("gara", "Garay"), + ("garay", "Garay"), + ("geor", "Georgian"), + ("georgian", "Georgian"), + ("glag", "Glagolitic"), + ("glagolitic", "Glagolitic"), + ("gong", "Gunjala_Gondi"), + ("gonm", "Masaram_Gondi"), + ("goth", "Gothic"), + ("gothic", "Gothic"), + ("gran", "Grantha"), + ("grantha", "Grantha"), + ("greek", "Greek"), + ("grek", "Greek"), + ("gujarati", "Gujarati"), + ("gujr", "Gujarati"), + ("gukh", "Gurung_Khema"), + ("gunjalagondi", "Gunjala_Gondi"), + ("gurmukhi", "Gurmukhi"), + ("guru", "Gurmukhi"), + ("gurungkhema", "Gurung_Khema"), + ("han", "Han"), + ("hang", "Hangul"), + ("hangul", "Hangul"), + ("hani", "Han"), + ("hanifirohingya", "Hanifi_Rohingya"), + ("hano", "Hanunoo"), + ("hanunoo", "Hanunoo"), + ("hatr", "Hatran"), + ("hatran", "Hatran"), + ("hebr", "Hebrew"), + ("hebrew", "Hebrew"), + ("hira", "Hiragana"), + ("hiragana", "Hiragana"), + ("hluw", "Anatolian_Hieroglyphs"), + ("hmng", "Pahawh_Hmong"), + ("hmnp", "Nyiakeng_Puachue_Hmong"), + ("hrkt", "Katakana_Or_Hiragana"), + ("hung", "Old_Hungarian"), + ("imperialaramaic", "Imperial_Aramaic"), + ("inherited", "Inherited"), + ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), + ("inscriptionalparthian", "Inscriptional_Parthian"), + ("ital", "Old_Italic"), + ("java", "Javanese"), + ("javanese", "Javanese"), + ("kaithi", "Kaithi"), + ("kali", "Kayah_Li"), + ("kana", "Katakana"), + ("kannada", "Kannada"), + ("katakana", "Katakana"), + ("katakanaorhiragana", "Katakana_Or_Hiragana"), + ("kawi", "Kawi"), + ("kayahli", "Kayah_Li"), + ("khar", "Kharoshthi"), + ("kharoshthi", "Kharoshthi"), + ("khitansmallscript", "Khitan_Small_Script"), + ("khmer", "Khmer"), + ("khmr", "Khmer"), + ("khoj", "Khojki"), + ("khojki", "Khojki"), + ("khudawadi", "Khudawadi"), + ("kiratrai", "Kirat_Rai"), + ("kits", "Khitan_Small_Script"), + ("knda", "Kannada"), + ("krai", "Kirat_Rai"), + ("kthi", "Kaithi"), + ("lana", "Tai_Tham"), + ("lao", "Lao"), + ("laoo", "Lao"), + ("latin", "Latin"), + ("latn", "Latin"), + ("lepc", "Lepcha"), + ("lepcha", "Lepcha"), + ("limb", "Limbu"), + ("limbu", "Limbu"), + ("lina", "Linear_A"), + ("linb", "Linear_B"), + ("lineara", "Linear_A"), + ("linearb", "Linear_B"), + ("lisu", "Lisu"), + ("lyci", "Lycian"), + ("lycian", "Lycian"), + ("lydi", "Lydian"), + ("lydian", "Lydian"), + ("mahajani", "Mahajani"), + ("mahj", "Mahajani"), + ("maka", "Makasar"), + ("makasar", "Makasar"), + ("malayalam", "Malayalam"), + ("mand", "Mandaic"), + ("mandaic", "Mandaic"), + ("mani", "Manichaean"), + ("manichaean", "Manichaean"), + ("marc", "Marchen"), + ("marchen", "Marchen"), + ("masaramgondi", "Masaram_Gondi"), + ("medefaidrin", "Medefaidrin"), + ("medf", "Medefaidrin"), + ("meeteimayek", "Meetei_Mayek"), + ("mend", "Mende_Kikakui"), + ("mendekikakui", "Mende_Kikakui"), + ("merc", "Meroitic_Cursive"), + ("mero", "Meroitic_Hieroglyphs"), + ("meroiticcursive", "Meroitic_Cursive"), + ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), + ("miao", "Miao"), + ("mlym", "Malayalam"), + ("modi", "Modi"), + ("mong", "Mongolian"), + ("mongolian", "Mongolian"), + ("mro", "Mro"), + ("mroo", "Mro"), + ("mtei", "Meetei_Mayek"), + ("mult", "Multani"), + ("multani", "Multani"), + ("myanmar", "Myanmar"), + ("mymr", "Myanmar"), + ("nabataean", "Nabataean"), + ("nagm", "Nag_Mundari"), + ("nagmundari", "Nag_Mundari"), + ("nand", "Nandinagari"), + ("nandinagari", "Nandinagari"), + ("narb", "Old_North_Arabian"), + ("nbat", "Nabataean"), + ("newa", "Newa"), + ("newtailue", "New_Tai_Lue"), + ("nko", "Nko"), + ("nkoo", "Nko"), + ("nshu", "Nushu"), + ("nushu", "Nushu"), + ("nyiakengpuachuehmong", "Nyiakeng_Puachue_Hmong"), + ("ogam", "Ogham"), + ("ogham", "Ogham"), + ("olchiki", "Ol_Chiki"), + ("olck", "Ol_Chiki"), + ("oldhungarian", "Old_Hungarian"), + ("olditalic", "Old_Italic"), + ("oldnortharabian", "Old_North_Arabian"), + ("oldpermic", "Old_Permic"), + ("oldpersian", "Old_Persian"), + ("oldsogdian", "Old_Sogdian"), + ("oldsoutharabian", "Old_South_Arabian"), + ("oldturkic", "Old_Turkic"), + ("olduyghur", "Old_Uyghur"), + ("olonal", "Ol_Onal"), + ("onao", "Ol_Onal"), + ("oriya", "Oriya"), + ("orkh", "Old_Turkic"), + ("orya", "Oriya"), + ("osage", "Osage"), + ("osge", "Osage"), + ("osma", "Osmanya"), + ("osmanya", "Osmanya"), + ("ougr", "Old_Uyghur"), + ("pahawhhmong", "Pahawh_Hmong"), + ("palm", "Palmyrene"), + ("palmyrene", "Palmyrene"), + ("pauc", "Pau_Cin_Hau"), + ("paucinhau", "Pau_Cin_Hau"), + ("perm", "Old_Permic"), + ("phag", "Phags_Pa"), + ("phagspa", "Phags_Pa"), + ("phli", "Inscriptional_Pahlavi"), + ("phlp", "Psalter_Pahlavi"), + ("phnx", "Phoenician"), + ("phoenician", "Phoenician"), + ("plrd", "Miao"), + ("prti", "Inscriptional_Parthian"), + ("psalterpahlavi", "Psalter_Pahlavi"), + ("qaac", "Coptic"), + ("qaai", "Inherited"), + ("rejang", "Rejang"), + ("rjng", "Rejang"), + ("rohg", "Hanifi_Rohingya"), + ("runic", "Runic"), + ("runr", "Runic"), + ("samaritan", "Samaritan"), + ("samr", "Samaritan"), + ("sarb", "Old_South_Arabian"), + ("saur", "Saurashtra"), + ("saurashtra", "Saurashtra"), + ("sgnw", "SignWriting"), + ("sharada", "Sharada"), + ("shavian", "Shavian"), + ("shaw", "Shavian"), + ("shrd", "Sharada"), + ("sidd", "Siddham"), + ("siddham", "Siddham"), + ("signwriting", "SignWriting"), + ("sind", "Khudawadi"), + ("sinh", "Sinhala"), + ("sinhala", "Sinhala"), + ("sogd", "Sogdian"), + ("sogdian", "Sogdian"), + ("sogo", "Old_Sogdian"), + ("sora", "Sora_Sompeng"), + ("sorasompeng", "Sora_Sompeng"), + ("soyo", "Soyombo"), + ("soyombo", "Soyombo"), + ("sund", "Sundanese"), + ("sundanese", "Sundanese"), + ("sunu", "Sunuwar"), + ("sunuwar", "Sunuwar"), + ("sylo", "Syloti_Nagri"), + ("sylotinagri", "Syloti_Nagri"), + ("syrc", "Syriac"), + ("syriac", "Syriac"), + ("tagalog", "Tagalog"), + ("tagb", "Tagbanwa"), + ("tagbanwa", "Tagbanwa"), + ("taile", "Tai_Le"), + ("taitham", "Tai_Tham"), + ("taiviet", "Tai_Viet"), + ("takr", "Takri"), + ("takri", "Takri"), + ("tale", "Tai_Le"), + ("talu", "New_Tai_Lue"), + ("tamil", "Tamil"), + ("taml", "Tamil"), + ("tang", "Tangut"), + ("tangsa", "Tangsa"), + ("tangut", "Tangut"), + ("tavt", "Tai_Viet"), + ("telu", "Telugu"), + ("telugu", "Telugu"), + ("tfng", "Tifinagh"), + ("tglg", "Tagalog"), + ("thaa", "Thaana"), + ("thaana", "Thaana"), + ("thai", "Thai"), + ("tibetan", "Tibetan"), + ("tibt", "Tibetan"), + ("tifinagh", "Tifinagh"), + ("tirh", "Tirhuta"), + ("tirhuta", "Tirhuta"), + ("tnsa", "Tangsa"), + ("todhri", "Todhri"), + ("todr", "Todhri"), + ("toto", "Toto"), + ("tulutigalari", "Tulu_Tigalari"), + ("tutg", "Tulu_Tigalari"), + ("ugar", "Ugaritic"), + ("ugaritic", "Ugaritic"), + ("unknown", "Unknown"), + ("vai", "Vai"), + ("vaii", "Vai"), + ("vith", "Vithkuqi"), + ("vithkuqi", "Vithkuqi"), + ("wancho", "Wancho"), + ("wara", "Warang_Citi"), + ("warangciti", "Warang_Citi"), + ("wcho", "Wancho"), + ("xpeo", "Old_Persian"), + ("xsux", "Cuneiform"), + ("yezi", "Yezidi"), + ("yezidi", "Yezidi"), + ("yi", "Yi"), + ("yiii", "Yi"), + ("zanabazarsquare", "Zanabazar_Square"), + ("zanb", "Zanabazar_Square"), + ("zinh", "Inherited"), + ("zyyy", "Common"), + ("zzzz", "Unknown"), + ], + ), + ( + "Sentence_Break", + &[ + ("at", "ATerm"), + ("aterm", "ATerm"), + ("cl", "Close"), + ("close", "Close"), + ("cr", "CR"), + ("ex", "Extend"), + ("extend", "Extend"), + ("fo", "Format"), + ("format", "Format"), + ("le", "OLetter"), + ("lf", "LF"), + ("lo", "Lower"), + ("lower", "Lower"), + ("nu", "Numeric"), + ("numeric", "Numeric"), + ("oletter", "OLetter"), + ("other", "Other"), + ("sc", "SContinue"), + ("scontinue", "SContinue"), + ("se", "Sep"), + ("sep", "Sep"), + ("sp", "Sp"), + ("st", "STerm"), + ("sterm", "STerm"), + ("up", "Upper"), + ("upper", "Upper"), + ("xx", "Other"), + ], + ), + ( + "Word_Break", + &[ + ("aletter", "ALetter"), + ("cr", "CR"), + ("doublequote", "Double_Quote"), + ("dq", "Double_Quote"), + ("eb", "E_Base"), + ("ebase", "E_Base"), + ("ebasegaz", "E_Base_GAZ"), + ("ebg", "E_Base_GAZ"), + ("em", "E_Modifier"), + ("emodifier", "E_Modifier"), + ("ex", "ExtendNumLet"), + ("extend", "Extend"), + ("extendnumlet", "ExtendNumLet"), + ("fo", "Format"), + ("format", "Format"), + ("gaz", "Glue_After_Zwj"), + ("glueafterzwj", "Glue_After_Zwj"), + ("hebrewletter", "Hebrew_Letter"), + ("hl", "Hebrew_Letter"), + ("ka", "Katakana"), + ("katakana", "Katakana"), + ("le", "ALetter"), + ("lf", "LF"), + ("mb", "MidNumLet"), + ("midletter", "MidLetter"), + ("midnum", "MidNum"), + ("midnumlet", "MidNumLet"), + ("ml", "MidLetter"), + ("mn", "MidNum"), + ("newline", "Newline"), + ("nl", "Newline"), + ("nu", "Numeric"), + ("numeric", "Numeric"), + ("other", "Other"), + ("regionalindicator", "Regional_Indicator"), + ("ri", "Regional_Indicator"), + ("singlequote", "Single_Quote"), + ("sq", "Single_Quote"), + ("wsegspace", "WSegSpace"), + ("xx", "Other"), + ("zwj", "ZWJ"), + ], + ), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e437ca9ca73e5982e27a28181d28905732eff50 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script.rs @@ -0,0 +1,1300 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate script ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Adlam", ADLAM), + ("Ahom", AHOM), + ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), + ("Arabic", ARABIC), + ("Armenian", ARMENIAN), + ("Avestan", AVESTAN), + ("Balinese", BALINESE), + ("Bamum", BAMUM), + ("Bassa_Vah", BASSA_VAH), + ("Batak", BATAK), + ("Bengali", BENGALI), + ("Bhaiksuki", BHAIKSUKI), + ("Bopomofo", BOPOMOFO), + ("Brahmi", BRAHMI), + ("Braille", BRAILLE), + ("Buginese", BUGINESE), + ("Buhid", BUHID), + ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), + ("Carian", CARIAN), + ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), + ("Chakma", CHAKMA), + ("Cham", CHAM), + ("Cherokee", CHEROKEE), + ("Chorasmian", CHORASMIAN), + ("Common", COMMON), + ("Coptic", COPTIC), + ("Cuneiform", CUNEIFORM), + ("Cypriot", CYPRIOT), + ("Cypro_Minoan", CYPRO_MINOAN), + ("Cyrillic", CYRILLIC), + ("Deseret", DESERET), + ("Devanagari", DEVANAGARI), + ("Dives_Akuru", DIVES_AKURU), + ("Dogra", DOGRA), + ("Duployan", DUPLOYAN), + ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), + ("Elbasan", ELBASAN), + ("Elymaic", ELYMAIC), + ("Ethiopic", ETHIOPIC), + ("Garay", GARAY), + ("Georgian", GEORGIAN), + ("Glagolitic", GLAGOLITIC), + ("Gothic", GOTHIC), + ("Grantha", GRANTHA), + ("Greek", GREEK), + ("Gujarati", GUJARATI), + ("Gunjala_Gondi", GUNJALA_GONDI), + ("Gurmukhi", GURMUKHI), + ("Gurung_Khema", GURUNG_KHEMA), + ("Han", HAN), + ("Hangul", HANGUL), + ("Hanifi_Rohingya", HANIFI_ROHINGYA), + ("Hanunoo", HANUNOO), + ("Hatran", HATRAN), + ("Hebrew", HEBREW), + ("Hiragana", HIRAGANA), + ("Imperial_Aramaic", IMPERIAL_ARAMAIC), + ("Inherited", INHERITED), + ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), + ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), + ("Javanese", JAVANESE), + ("Kaithi", KAITHI), + ("Kannada", KANNADA), + ("Katakana", KATAKANA), + ("Kawi", KAWI), + ("Kayah_Li", KAYAH_LI), + ("Kharoshthi", KHAROSHTHI), + ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), + ("Khmer", KHMER), + ("Khojki", KHOJKI), + ("Khudawadi", KHUDAWADI), + ("Kirat_Rai", KIRAT_RAI), + ("Lao", LAO), + ("Latin", LATIN), + ("Lepcha", LEPCHA), + ("Limbu", LIMBU), + ("Linear_A", LINEAR_A), + ("Linear_B", LINEAR_B), + ("Lisu", LISU), + ("Lycian", LYCIAN), + ("Lydian", LYDIAN), + ("Mahajani", MAHAJANI), + ("Makasar", MAKASAR), + ("Malayalam", MALAYALAM), + ("Mandaic", MANDAIC), + ("Manichaean", MANICHAEAN), + ("Marchen", MARCHEN), + ("Masaram_Gondi", MASARAM_GONDI), + ("Medefaidrin", MEDEFAIDRIN), + ("Meetei_Mayek", MEETEI_MAYEK), + ("Mende_Kikakui", MENDE_KIKAKUI), + ("Meroitic_Cursive", MEROITIC_CURSIVE), + ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), + ("Miao", MIAO), + ("Modi", MODI), + ("Mongolian", MONGOLIAN), + ("Mro", MRO), + ("Multani", MULTANI), + ("Myanmar", MYANMAR), + ("Nabataean", NABATAEAN), + ("Nag_Mundari", NAG_MUNDARI), + ("Nandinagari", NANDINAGARI), + ("New_Tai_Lue", NEW_TAI_LUE), + ("Newa", NEWA), + ("Nko", NKO), + ("Nushu", NUSHU), + ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), + ("Ogham", OGHAM), + ("Ol_Chiki", OL_CHIKI), + ("Ol_Onal", OL_ONAL), + ("Old_Hungarian", OLD_HUNGARIAN), + ("Old_Italic", OLD_ITALIC), + ("Old_North_Arabian", OLD_NORTH_ARABIAN), + ("Old_Permic", OLD_PERMIC), + ("Old_Persian", OLD_PERSIAN), + ("Old_Sogdian", OLD_SOGDIAN), + ("Old_South_Arabian", OLD_SOUTH_ARABIAN), + ("Old_Turkic", OLD_TURKIC), + ("Old_Uyghur", OLD_UYGHUR), + ("Oriya", ORIYA), + ("Osage", OSAGE), + ("Osmanya", OSMANYA), + ("Pahawh_Hmong", PAHAWH_HMONG), + ("Palmyrene", PALMYRENE), + ("Pau_Cin_Hau", PAU_CIN_HAU), + ("Phags_Pa", PHAGS_PA), + ("Phoenician", PHOENICIAN), + ("Psalter_Pahlavi", PSALTER_PAHLAVI), + ("Rejang", REJANG), + ("Runic", RUNIC), + ("Samaritan", SAMARITAN), + ("Saurashtra", SAURASHTRA), + ("Sharada", SHARADA), + ("Shavian", SHAVIAN), + ("Siddham", SIDDHAM), + ("SignWriting", SIGNWRITING), + ("Sinhala", SINHALA), + ("Sogdian", SOGDIAN), + ("Sora_Sompeng", SORA_SOMPENG), + ("Soyombo", SOYOMBO), + ("Sundanese", SUNDANESE), + ("Sunuwar", SUNUWAR), + ("Syloti_Nagri", SYLOTI_NAGRI), + ("Syriac", SYRIAC), + ("Tagalog", TAGALOG), + ("Tagbanwa", TAGBANWA), + ("Tai_Le", TAI_LE), + ("Tai_Tham", TAI_THAM), + ("Tai_Viet", TAI_VIET), + ("Takri", TAKRI), + ("Tamil", TAMIL), + ("Tangsa", TANGSA), + ("Tangut", TANGUT), + ("Telugu", TELUGU), + ("Thaana", THAANA), + ("Thai", THAI), + ("Tibetan", TIBETAN), + ("Tifinagh", TIFINAGH), + ("Tirhuta", TIRHUTA), + ("Todhri", TODHRI), + ("Toto", TOTO), + ("Tulu_Tigalari", TULU_TIGALARI), + ("Ugaritic", UGARITIC), + ("Vai", VAI), + ("Vithkuqi", VITHKUQI), + ("Wancho", WANCHO), + ("Warang_Citi", WARANG_CITI), + ("Yezidi", YEZIDI), + ("Yi", YI), + ("Zanabazar_Square", ZANABAZAR_SQUARE), +]; + +pub const ADLAM: &'static [(char, char)] = + &[('𞤀', '𞥋'), ('𞥐', '𞥙'), ('𞥞', '𞥟')]; + +pub const AHOM: &'static [(char, char)] = + &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; + +pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; + +pub const ARABIC: &'static [(char, char)] = &[ + ('\u{600}', '\u{604}'), + ('؆', '؋'), + ('؍', '\u{61a}'), + ('\u{61c}', '؞'), + ('ؠ', 'ؿ'), + ('ف', 'ي'), + ('\u{656}', 'ٯ'), + ('ٱ', '\u{6dc}'), + ('۞', 'ۿ'), + ('ݐ', 'ݿ'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{8ff}'), + ('ﭐ', '﯂'), + ('ﯓ', 'ﴽ'), + ('﵀', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('𐹠', '𐹾'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10eff}'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const ARMENIAN: &'static [(char, char)] = + &[('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; + +pub const AVESTAN: &'static [(char, char)] = &[('𐬀', '𐬵'), ('𐬹', '𐬿')]; + +pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; + +pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; + +pub const BASSA_VAH: &'static [(char, char)] = + &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; + +pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; + +pub const BENGALI: &'static [(char, char)] = &[ + ('ঀ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '\u{9fe}'), +]; + +pub const BHAIKSUKI: &'static [(char, char)] = + &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; + +pub const BOPOMOFO: &'static [(char, char)] = + &[('˪', '˫'), ('ㄅ', 'ㄯ'), ('ㆠ', 'ㆿ')]; + +pub const BRAHMI: &'static [(char, char)] = + &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; + +pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; + +pub const BUGINESE: &'static [(char, char)] = &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟')]; + +pub const BUHID: &'static [(char, char)] = &[('ᝀ', '\u{1753}')]; + +pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = + &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; + +pub const CARIAN: &'static [(char, char)] = &[('𐊠', '𐋐')]; + +pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = + &[('𐔰', '𐕣'), ('𐕯', '𐕯')]; + +pub const CHAKMA: &'static [(char, char)] = + &[('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; + +pub const CHAM: &'static [(char, char)] = + &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; + +pub const CHEROKEE: &'static [(char, char)] = + &[('Ꭰ', 'Ᏽ'), ('ᏸ', 'ᏽ'), ('ꭰ', 'ꮿ')]; + +pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; + +pub const COMMON: &'static [(char, char)] = &[ + ('\0', '@'), + ('[', '`'), + ('{', '©'), + ('«', '¹'), + ('»', '¿'), + ('×', '×'), + ('÷', '÷'), + ('ʹ', '˟'), + ('˥', '˩'), + ('ˬ', '˿'), + ('ʹ', 'ʹ'), + (';', ';'), + ('΅', '΅'), + ('·', '·'), + ('\u{605}', '\u{605}'), + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('\u{6dd}', '\u{6dd}'), + ('\u{8e2}', '\u{8e2}'), + ('।', '॥'), + ('฿', '฿'), + ('࿕', '࿘'), + ('჻', '჻'), + ('᛫', '᛭'), + ('᜵', '᜶'), + ('᠂', '᠃'), + ('᠅', '᠅'), + ('᳓', '᳓'), + ('᳡', '᳡'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', '᳷'), + ('ᳺ', 'ᳺ'), + ('\u{2000}', '\u{200b}'), + ('\u{200e}', '\u{2064}'), + ('\u{2066}', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('₠', '⃀'), + ('℀', '℥'), + ('℧', '℩'), + ('ℬ', 'ℱ'), + ('ℳ', '⅍'), + ('⅏', '⅟'), + ('↉', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⟿'), + ('⤀', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⸀', '⹝'), + ('⿰', '〄'), + ('〆', '〆'), + ('〈', '〠'), + ('〰', '〷'), + ('〼', '〿'), + ('゛', '゜'), + ('゠', '゠'), + ('・', 'ー'), + ('㆐', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈠', '㉟'), + ('㉿', '㋏'), + ('㋿', '㋿'), + ('㍘', '㏿'), + ('䷀', '䷿'), + ('꜀', '꜡'), + ('ꞈ', '꞊'), + ('꠰', '꠹'), + ('꤮', '꤮'), + ('ꧏ', 'ꧏ'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('﴾', '﴿'), + ('︐', '︙'), + ('︰', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('\u{feff}', '\u{feff}'), + ('!', '@'), + ('[', '`'), + ('{', '・'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('¢', '₩'), + ('│', '○'), + ('\u{fff9}', '�'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐆐', '𐆜'), + ('𐇐', '𐇼'), + ('𐋡', '𐋻'), + ('\u{1bca0}', '\u{1bca3}'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '\u{1d166}'), + ('𝅪', '\u{1d17a}'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍠', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉐', '🉑'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const COPTIC: &'static [(char, char)] = + &[('Ϣ', 'ϯ'), ('Ⲁ', 'ⳳ'), ('⳹', '⳿')]; + +pub const CUNEIFORM: &'static [(char, char)] = + &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; + +pub const CYPRIOT: &'static [(char, char)] = + &[('𐠀', '𐠅'), ('𐠈', '𐠈'), ('𐠊', '𐠵'), ('𐠷', '𐠸'), ('𐠼', '𐠼'), ('𐠿', '𐠿')]; + +pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𒾐', '𒿲')]; + +pub const CYRILLIC: &'static [(char, char)] = &[ + ('Ѐ', '\u{484}'), + ('\u{487}', 'ԯ'), + ('ᲀ', 'ᲊ'), + ('ᴫ', 'ᴫ'), + ('ᵸ', 'ᵸ'), + ('\u{2de0}', '\u{2dff}'), + ('Ꙁ', '\u{a69f}'), + ('\u{fe2e}', '\u{fe2f}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), +]; + +pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; + +pub const DEVANAGARI: &'static [(char, char)] = &[ + ('\u{900}', 'ॐ'), + ('\u{955}', '\u{963}'), + ('०', 'ॿ'), + ('\u{a8e0}', '\u{a8ff}'), + ('𑬀', '𑬉'), +]; + +pub const DIVES_AKURU: &'static [(char, char)] = &[ + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), +]; + +pub const DOGRA: &'static [(char, char)] = &[('𑠀', '𑠻')]; + +pub const DUPLOYAN: &'static [(char, char)] = + &[('𛰀', '𛱪'), ('𛱰', '𛱼'), ('𛲀', '𛲈'), ('𛲐', '𛲙'), ('𛲜', '𛲟')]; + +pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = + &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; + +pub const ELBASAN: &'static [(char, char)] = &[('𐔀', '𐔧')]; + +pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; + +pub const ETHIOPIC: &'static [(char, char)] = &[ + ('ሀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '፼'), + ('ᎀ', '᎙'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), +]; + +pub const GARAY: &'static [(char, char)] = + &[('𐵀', '𐵥'), ('\u{10d69}', '𐶅'), ('𐶎', '𐶏')]; + +pub const GEORGIAN: &'static [(char, char)] = &[ + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ჿ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), +]; + +pub const GLAGOLITIC: &'static [(char, char)] = &[ + ('Ⰰ', 'ⱟ'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), +]; + +pub const GOTHIC: &'static [(char, char)] = &[('𐌰', '𐍊')]; + +pub const GRANTHA: &'static [(char, char)] = &[ + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133c}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), +]; + +pub const GREEK: &'static [(char, char)] = &[ + ('Ͱ', 'ͳ'), + ('͵', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('΄', '΄'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϡ'), + ('ϰ', 'Ͽ'), + ('ᴦ', 'ᴪ'), + ('ᵝ', 'ᵡ'), + ('ᵦ', 'ᵪ'), + ('ᶿ', 'ᶿ'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('Ω', 'Ω'), + ('ꭥ', 'ꭥ'), + ('𐅀', '𐆎'), + ('𐆠', '𐆠'), + ('𝈀', '𝉅'), +]; + +pub const GUJARATI: &'static [(char, char)] = &[ + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૱'), + ('ૹ', '\u{aff}'), +]; + +pub const GUNJALA_GONDI: &'static [(char, char)] = &[ + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), +]; + +pub const GURMUKHI: &'static [(char, char)] = &[ + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੶'), +]; + +pub const GURUNG_KHEMA: &'static [(char, char)] = &[('𖄀', '𖄹')]; + +pub const HAN: &'static [(char, char)] = &[ + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('々', '々'), + ('〇', '〇'), + ('〡', '〩'), + ('〸', '〻'), + ('㐀', '䶿'), + ('一', '鿿'), + ('豈', '舘'), + ('並', '龎'), + ('𖿢', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const HANGUL: &'static [(char, char)] = &[ + ('ᄀ', 'ᇿ'), + ('\u{302e}', '\u{302f}'), + ('ㄱ', 'ㆎ'), + ('㈀', '㈞'), + ('㉠', '㉾'), + ('ꥠ', 'ꥼ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), +]; + +pub const HANIFI_ROHINGYA: &'static [(char, char)] = + &[('𐴀', '\u{10d27}'), ('𐴰', '𐴹')]; + +pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '\u{1734}')]; + +pub const HATRAN: &'static [(char, char)] = + &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; + +pub const HEBREW: &'static [(char, char)] = &[ + ('\u{591}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', '״'), + ('יִ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const HIRAGANA: &'static [(char, char)] = &[ + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('𛀁', '𛄟'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('🈀', '🈀'), +]; + +pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = + &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; + +pub const INHERITED: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{485}', '\u{486}'), + ('\u{64b}', '\u{655}'), + ('\u{670}', '\u{670}'), + ('\u{951}', '\u{954}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce0}'), + ('\u{1ce2}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{302a}', '\u{302d}'), + ('\u{3099}', '\u{309a}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2d}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{1133b}', '\u{1133b}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = + &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; + +pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = + &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; + +pub const JAVANESE: &'static [(char, char)] = + &[('\u{a980}', '꧍'), ('꧐', '꧙'), ('꧞', '꧟')]; + +pub const KAITHI: &'static [(char, char)] = + &[('\u{11080}', '\u{110c2}'), ('\u{110cd}', '\u{110cd}')]; + +pub const KANNADA: &'static [(char, char)] = &[ + ('ಀ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('ァ', 'ヺ'), + ('ヽ', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('ヲ', 'ッ'), + ('ア', 'ン'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const KAWI: &'static [(char, char)] = + &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; + +pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '\u{a92d}'), ('꤯', '꤯')]; + +pub const KHAROSHTHI: &'static [(char, char)] = &[ + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩈'), + ('𐩐', '𐩘'), +]; + +pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = + &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; + +pub const KHMER: &'static [(char, char)] = + &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; + +pub const KHOJKI: &'static [(char, char)] = &[('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; + +pub const KHUDAWADI: &'static [(char, char)] = + &[('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; + +pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; + +pub const LAO: &'static [(char, char)] = &[ + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), +]; + +pub const LATIN: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ʸ'), + ('ˠ', 'ˤ'), + ('ᴀ', 'ᴥ'), + ('ᴬ', 'ᵜ'), + ('ᵢ', 'ᵥ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶾ'), + ('Ḁ', 'ỿ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⱡ', 'Ɀ'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟿ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭤ'), + ('ꭦ', 'ꭩ'), + ('ff', 'st'), + ('A', 'Z'), + ('a', 'z'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), +]; + +pub const LEPCHA: &'static [(char, char)] = + &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; + +pub const LIMBU: &'static [(char, char)] = &[ + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', '᥏'), +]; + +pub const LINEAR_A: &'static [(char, char)] = + &[('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; + +pub const LINEAR_B: &'static [(char, char)] = &[ + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), +]; + +pub const LISU: &'static [(char, char)] = &[('ꓐ', '꓿'), ('𑾰', '𑾰')]; + +pub const LYCIAN: &'static [(char, char)] = &[('𐊀', '𐊜')]; + +pub const LYDIAN: &'static [(char, char)] = &[('𐤠', '𐤹'), ('𐤿', '𐤿')]; + +pub const MAHAJANI: &'static [(char, char)] = &[('𑅐', '𑅶')]; + +pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; + +pub const MALAYALAM: &'static [(char, char)] = &[ + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '൏'), + ('ൔ', '\u{d63}'), + ('൦', 'ൿ'), +]; + +pub const MANDAIC: &'static [(char, char)] = &[('ࡀ', '\u{85b}'), ('࡞', '࡞')]; + +pub const MANICHAEAN: &'static [(char, char)] = + &[('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; + +pub const MARCHEN: &'static [(char, char)] = + &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; + +pub const MASARAM_GONDI: &'static [(char, char)] = &[ + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), +]; + +pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; + +pub const MEETEI_MAYEK: &'static [(char, char)] = + &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; + +pub const MENDE_KIKAKUI: &'static [(char, char)] = + &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; + +pub const MEROITIC_CURSIVE: &'static [(char, char)] = + &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; + +pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = &[('𐦀', '𐦟')]; + +pub const MIAO: &'static [(char, char)] = + &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; + +pub const MODI: &'static [(char, char)] = &[('𑘀', '𑙄'), ('𑙐', '𑙙')]; + +pub const MONGOLIAN: &'static [(char, char)] = + &[('᠀', '᠁'), ('᠄', '᠄'), ('᠆', '᠙'), ('ᠠ', 'ᡸ'), ('ᢀ', 'ᢪ'), ('𑙠', '𑙬')]; + +pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; + +pub const MULTANI: &'static [(char, char)] = + &[('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; + +pub const MYANMAR: &'static [(char, char)] = + &[('က', '႟'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; + +pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; + +pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; + +pub const NANDINAGARI: &'static [(char, char)] = + &[('𑦠', '𑦧'), ('𑦪', '\u{119d7}'), ('\u{119da}', '𑧤')]; + +pub const NEW_TAI_LUE: &'static [(char, char)] = + &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; + +pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; + +pub const NKO: &'static [(char, char)] = &[('߀', 'ߺ'), ('\u{7fd}', '߿')]; + +pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; + +pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = + &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; + +pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; + +pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; + +pub const OL_ONAL: &'static [(char, char)] = &[('𞗐', '𞗺'), ('𞗿', '𞗿')]; + +pub const OLD_HUNGARIAN: &'static [(char, char)] = + &[('𐲀', '𐲲'), ('𐳀', '𐳲'), ('𐳺', '𐳿')]; + +pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; + +pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; + +pub const OLD_PERMIC: &'static [(char, char)] = &[('𐍐', '\u{1037a}')]; + +pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; + +pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; + +pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; + +pub const OLD_TURKIC: &'static [(char, char)] = &[('𐰀', '𐱈')]; + +pub const OLD_UYGHUR: &'static [(char, char)] = &[('𐽰', '𐾉')]; + +pub const ORIYA: &'static [(char, char)] = &[ + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୷'), +]; + +pub const OSAGE: &'static [(char, char)] = &[('𐒰', '𐓓'), ('𐓘', '𐓻')]; + +pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; + +pub const PAHAWH_HMONG: &'static [(char, char)] = + &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; + +pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; + +pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; + +pub const PHAGS_PA: &'static [(char, char)] = &[('ꡀ', '꡷')]; + +pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; + +pub const PSALTER_PAHLAVI: &'static [(char, char)] = + &[('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; + +pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; + +pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛪ'), ('ᛮ', 'ᛸ')]; + +pub const SAMARITAN: &'static [(char, char)] = &[('ࠀ', '\u{82d}'), ('࠰', '࠾')]; + +pub const SAURASHTRA: &'static [(char, char)] = + &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; + +pub const SHARADA: &'static [(char, char)] = &[('\u{11180}', '𑇟')]; + +pub const SHAVIAN: &'static [(char, char)] = &[('𐑐', '𐑿')]; + +pub const SIDDHAM: &'static [(char, char)] = + &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; + +pub const SIGNWRITING: &'static [(char, char)] = + &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; + +pub const SINHALA: &'static [(char, char)] = &[ + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', '෴'), + ('𑇡', '𑇴'), +]; + +pub const SOGDIAN: &'static [(char, char)] = &[('𐼰', '𐽙')]; + +pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; + +pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; + +pub const SUNDANESE: &'static [(char, char)] = + &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; + +pub const SUNUWAR: &'static [(char, char)] = &[('𑯀', '𑯡'), ('𑯰', '𑯹')]; + +pub const SYLOTI_NAGRI: &'static [(char, char)] = &[('ꠀ', '\u{a82c}')]; + +pub const SYRIAC: &'static [(char, char)] = + &[('܀', '܍'), ('\u{70f}', '\u{74a}'), ('ݍ', 'ݏ'), ('ࡠ', 'ࡪ')]; + +pub const TAGALOG: &'static [(char, char)] = &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ')]; + +pub const TAGBANWA: &'static [(char, char)] = + &[('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; + +pub const TAI_LE: &'static [(char, char)] = &[('ᥐ', 'ᥭ'), ('ᥰ', 'ᥴ')]; + +pub const TAI_THAM: &'static [(char, char)] = &[ + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), +]; + +pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; + +pub const TAKRI: &'static [(char, char)] = &[('𑚀', '𑚹'), ('𑛀', '𑛉')]; + +pub const TAMIL: &'static [(char, char)] = &[ + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௺'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), +]; + +pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; + +pub const TANGUT: &'static [(char, char)] = + &[('𖿠', '𖿠'), ('𗀀', '𘟷'), ('𘠀', '𘫿'), ('𘴀', '𘴈')]; + +pub const TELUGU: &'static [(char, char)] = &[ + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('౷', '౿'), +]; + +pub const THAANA: &'static [(char, char)] = &[('ހ', 'ޱ')]; + +pub const THAI: &'static [(char, char)] = &[('ก', '\u{e3a}'), ('เ', '๛')]; + +pub const TIBETAN: &'static [(char, char)] = &[ + ('ༀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('྾', '࿌'), + ('࿎', '࿔'), + ('࿙', '࿚'), +]; + +pub const TIFINAGH: &'static [(char, char)] = + &[('ⴰ', 'ⵧ'), ('ⵯ', '⵰'), ('\u{2d7f}', '\u{2d7f}')]; + +pub const TIRHUTA: &'static [(char, char)] = &[('𑒀', '𑓇'), ('𑓐', '𑓙')]; + +pub const TODHRI: &'static [(char, char)] = &[('𐗀', '𐗳')]; + +pub const TOTO: &'static [(char, char)] = &[('𞊐', '\u{1e2ae}')]; + +pub const TULU_TIGALARI: &'static [(char, char)] = &[ + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), +]; + +pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; + +pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; + +pub const VITHKUQI: &'static [(char, char)] = &[ + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), +]; + +pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; + +pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; + +pub const YEZIDI: &'static [(char, char)] = + &[('𐺀', '𐺩'), ('\u{10eab}', '𐺭'), ('𐺰', '𐺱')]; + +pub const YI: &'static [(char, char)] = &[('ꀀ', 'ꒌ'), ('꒐', '꓆')]; + +pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script_extension.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script_extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3f492e2d6bee1c147e4b4e0fceba9d312ae14d5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/script_extension.rs @@ -0,0 +1,1718 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate script-extension ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("Adlam", ADLAM), + ("Ahom", AHOM), + ("Anatolian_Hieroglyphs", ANATOLIAN_HIEROGLYPHS), + ("Arabic", ARABIC), + ("Armenian", ARMENIAN), + ("Avestan", AVESTAN), + ("Balinese", BALINESE), + ("Bamum", BAMUM), + ("Bassa_Vah", BASSA_VAH), + ("Batak", BATAK), + ("Bengali", BENGALI), + ("Bhaiksuki", BHAIKSUKI), + ("Bopomofo", BOPOMOFO), + ("Brahmi", BRAHMI), + ("Braille", BRAILLE), + ("Buginese", BUGINESE), + ("Buhid", BUHID), + ("Canadian_Aboriginal", CANADIAN_ABORIGINAL), + ("Carian", CARIAN), + ("Caucasian_Albanian", CAUCASIAN_ALBANIAN), + ("Chakma", CHAKMA), + ("Cham", CHAM), + ("Cherokee", CHEROKEE), + ("Chorasmian", CHORASMIAN), + ("Common", COMMON), + ("Coptic", COPTIC), + ("Cuneiform", CUNEIFORM), + ("Cypriot", CYPRIOT), + ("Cypro_Minoan", CYPRO_MINOAN), + ("Cyrillic", CYRILLIC), + ("Deseret", DESERET), + ("Devanagari", DEVANAGARI), + ("Dives_Akuru", DIVES_AKURU), + ("Dogra", DOGRA), + ("Duployan", DUPLOYAN), + ("Egyptian_Hieroglyphs", EGYPTIAN_HIEROGLYPHS), + ("Elbasan", ELBASAN), + ("Elymaic", ELYMAIC), + ("Ethiopic", ETHIOPIC), + ("Garay", GARAY), + ("Georgian", GEORGIAN), + ("Glagolitic", GLAGOLITIC), + ("Gothic", GOTHIC), + ("Grantha", GRANTHA), + ("Greek", GREEK), + ("Gujarati", GUJARATI), + ("Gunjala_Gondi", GUNJALA_GONDI), + ("Gurmukhi", GURMUKHI), + ("Gurung_Khema", GURUNG_KHEMA), + ("Han", HAN), + ("Hangul", HANGUL), + ("Hanifi_Rohingya", HANIFI_ROHINGYA), + ("Hanunoo", HANUNOO), + ("Hatran", HATRAN), + ("Hebrew", HEBREW), + ("Hiragana", HIRAGANA), + ("Imperial_Aramaic", IMPERIAL_ARAMAIC), + ("Inherited", INHERITED), + ("Inscriptional_Pahlavi", INSCRIPTIONAL_PAHLAVI), + ("Inscriptional_Parthian", INSCRIPTIONAL_PARTHIAN), + ("Javanese", JAVANESE), + ("Kaithi", KAITHI), + ("Kannada", KANNADA), + ("Katakana", KATAKANA), + ("Kawi", KAWI), + ("Kayah_Li", KAYAH_LI), + ("Kharoshthi", KHAROSHTHI), + ("Khitan_Small_Script", KHITAN_SMALL_SCRIPT), + ("Khmer", KHMER), + ("Khojki", KHOJKI), + ("Khudawadi", KHUDAWADI), + ("Kirat_Rai", KIRAT_RAI), + ("Lao", LAO), + ("Latin", LATIN), + ("Lepcha", LEPCHA), + ("Limbu", LIMBU), + ("Linear_A", LINEAR_A), + ("Linear_B", LINEAR_B), + ("Lisu", LISU), + ("Lycian", LYCIAN), + ("Lydian", LYDIAN), + ("Mahajani", MAHAJANI), + ("Makasar", MAKASAR), + ("Malayalam", MALAYALAM), + ("Mandaic", MANDAIC), + ("Manichaean", MANICHAEAN), + ("Marchen", MARCHEN), + ("Masaram_Gondi", MASARAM_GONDI), + ("Medefaidrin", MEDEFAIDRIN), + ("Meetei_Mayek", MEETEI_MAYEK), + ("Mende_Kikakui", MENDE_KIKAKUI), + ("Meroitic_Cursive", MEROITIC_CURSIVE), + ("Meroitic_Hieroglyphs", MEROITIC_HIEROGLYPHS), + ("Miao", MIAO), + ("Modi", MODI), + ("Mongolian", MONGOLIAN), + ("Mro", MRO), + ("Multani", MULTANI), + ("Myanmar", MYANMAR), + ("Nabataean", NABATAEAN), + ("Nag_Mundari", NAG_MUNDARI), + ("Nandinagari", NANDINAGARI), + ("New_Tai_Lue", NEW_TAI_LUE), + ("Newa", NEWA), + ("Nko", NKO), + ("Nushu", NUSHU), + ("Nyiakeng_Puachue_Hmong", NYIAKENG_PUACHUE_HMONG), + ("Ogham", OGHAM), + ("Ol_Chiki", OL_CHIKI), + ("Ol_Onal", OL_ONAL), + ("Old_Hungarian", OLD_HUNGARIAN), + ("Old_Italic", OLD_ITALIC), + ("Old_North_Arabian", OLD_NORTH_ARABIAN), + ("Old_Permic", OLD_PERMIC), + ("Old_Persian", OLD_PERSIAN), + ("Old_Sogdian", OLD_SOGDIAN), + ("Old_South_Arabian", OLD_SOUTH_ARABIAN), + ("Old_Turkic", OLD_TURKIC), + ("Old_Uyghur", OLD_UYGHUR), + ("Oriya", ORIYA), + ("Osage", OSAGE), + ("Osmanya", OSMANYA), + ("Pahawh_Hmong", PAHAWH_HMONG), + ("Palmyrene", PALMYRENE), + ("Pau_Cin_Hau", PAU_CIN_HAU), + ("Phags_Pa", PHAGS_PA), + ("Phoenician", PHOENICIAN), + ("Psalter_Pahlavi", PSALTER_PAHLAVI), + ("Rejang", REJANG), + ("Runic", RUNIC), + ("Samaritan", SAMARITAN), + ("Saurashtra", SAURASHTRA), + ("Sharada", SHARADA), + ("Shavian", SHAVIAN), + ("Siddham", SIDDHAM), + ("SignWriting", SIGNWRITING), + ("Sinhala", SINHALA), + ("Sogdian", SOGDIAN), + ("Sora_Sompeng", SORA_SOMPENG), + ("Soyombo", SOYOMBO), + ("Sundanese", SUNDANESE), + ("Sunuwar", SUNUWAR), + ("Syloti_Nagri", SYLOTI_NAGRI), + ("Syriac", SYRIAC), + ("Tagalog", TAGALOG), + ("Tagbanwa", TAGBANWA), + ("Tai_Le", TAI_LE), + ("Tai_Tham", TAI_THAM), + ("Tai_Viet", TAI_VIET), + ("Takri", TAKRI), + ("Tamil", TAMIL), + ("Tangsa", TANGSA), + ("Tangut", TANGUT), + ("Telugu", TELUGU), + ("Thaana", THAANA), + ("Thai", THAI), + ("Tibetan", TIBETAN), + ("Tifinagh", TIFINAGH), + ("Tirhuta", TIRHUTA), + ("Todhri", TODHRI), + ("Toto", TOTO), + ("Tulu_Tigalari", TULU_TIGALARI), + ("Ugaritic", UGARITIC), + ("Vai", VAI), + ("Vithkuqi", VITHKUQI), + ("Wancho", WANCHO), + ("Warang_Citi", WARANG_CITI), + ("Yezidi", YEZIDI), + ("Yi", YI), + ("Zanabazar_Square", ZANABAZAR_SQUARE), +]; + +pub const ADLAM: &'static [(char, char)] = &[ + ('؟', '؟'), + ('ـ', 'ـ'), + ('⁏', '⁏'), + ('⹁', '⹁'), + ('𞤀', '𞥋'), + ('𞥐', '𞥙'), + ('𞥞', '𞥟'), +]; + +pub const AHOM: &'static [(char, char)] = + &[('𑜀', '𑜚'), ('\u{1171d}', '\u{1172b}'), ('𑜰', '𑝆')]; + +pub const ANATOLIAN_HIEROGLYPHS: &'static [(char, char)] = &[('𔐀', '𔙆')]; + +pub const ARABIC: &'static [(char, char)] = &[ + ('\u{600}', '\u{604}'), + ('؆', '\u{6dc}'), + ('۞', 'ۿ'), + ('ݐ', 'ݿ'), + ('ࡰ', 'ࢎ'), + ('\u{890}', '\u{891}'), + ('\u{897}', '\u{8e1}'), + ('\u{8e3}', '\u{8ff}'), + ('⁏', '⁏'), + ('⹁', '⹁'), + ('ﭐ', '﯂'), + ('ﯓ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('﷏', '﷏'), + ('ﷰ', '﷿'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('\u{102e0}', '𐋻'), + ('𐹠', '𐹾'), + ('𐻂', '𐻄'), + ('\u{10efc}', '\u{10eff}'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𞻰', '𞻱'), +]; + +pub const ARMENIAN: &'static [(char, char)] = + &[('\u{308}', '\u{308}'), ('Ա', 'Ֆ'), ('ՙ', '֊'), ('֍', '֏'), ('ﬓ', 'ﬗ')]; + +pub const AVESTAN: &'static [(char, char)] = + &[('·', '·'), ('⸰', '⸱'), ('𐬀', '𐬵'), ('𐬹', '𐬿')]; + +pub const BALINESE: &'static [(char, char)] = &[('\u{1b00}', 'ᭌ'), ('᭎', '᭿')]; + +pub const BAMUM: &'static [(char, char)] = &[('ꚠ', '꛷'), ('𖠀', '𖨸')]; + +pub const BASSA_VAH: &'static [(char, char)] = + &[('𖫐', '𖫭'), ('\u{16af0}', '𖫵')]; + +pub const BATAK: &'static [(char, char)] = &[('ᯀ', '\u{1bf3}'), ('᯼', '᯿')]; + +pub const BENGALI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ঀ', 'ঃ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('\u{9bc}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', 'ৎ'), + ('\u{9d7}', '\u{9d7}'), + ('ড়', 'ঢ়'), + ('য়', '\u{9e3}'), + ('০', '\u{9fe}'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '\u{1cd2}'), + ('\u{1cd5}', '\u{1cd6}'), + ('\u{1cd8}', '\u{1cd8}'), + ('᳡', '᳡'), + ('ᳪ', 'ᳪ'), + ('\u{1ced}', '\u{1ced}'), + ('ᳲ', 'ᳲ'), + ('ᳵ', '᳷'), + ('\u{a8f1}', '\u{a8f1}'), +]; + +pub const BHAIKSUKI: &'static [(char, char)] = + &[('𑰀', '𑰈'), ('𑰊', '\u{11c36}'), ('\u{11c38}', '𑱅'), ('𑱐', '𑱬')]; + +pub const BOPOMOFO: &'static [(char, char)] = &[ + ('ˇ', 'ˇ'), + ('ˉ', 'ˋ'), + ('˙', '˙'), + ('˪', '˫'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('\u{302a}', '\u{302d}'), + ('〰', '〰'), + ('〷', '〷'), + ('・', '・'), + ('ㄅ', 'ㄯ'), + ('ㆠ', 'ㆿ'), + ('﹅', '﹆'), + ('。', '・'), +]; + +pub const BRAHMI: &'static [(char, char)] = + &[('𑀀', '𑁍'), ('𑁒', '𑁵'), ('\u{1107f}', '\u{1107f}')]; + +pub const BRAILLE: &'static [(char, char)] = &[('⠀', '⣿')]; + +pub const BUGINESE: &'static [(char, char)] = + &[('ᨀ', '\u{1a1b}'), ('᨞', '᨟'), ('ꧏ', 'ꧏ')]; + +pub const BUHID: &'static [(char, char)] = &[('᜵', '᜶'), ('ᝀ', '\u{1753}')]; + +pub const CANADIAN_ABORIGINAL: &'static [(char, char)] = + &[('᐀', 'ᙿ'), ('ᢰ', 'ᣵ'), ('𑪰', '𑪿')]; + +pub const CARIAN: &'static [(char, char)] = + &[('·', '·'), ('⁚', '⁚'), ('⁝', '⁝'), ('⸱', '⸱'), ('𐊠', '𐋐')]; + +pub const CAUCASIAN_ALBANIAN: &'static [(char, char)] = &[ + ('\u{304}', '\u{304}'), + ('\u{331}', '\u{331}'), + ('\u{35e}', '\u{35e}'), + ('𐔰', '𐕣'), + ('𐕯', '𐕯'), +]; + +pub const CHAKMA: &'static [(char, char)] = + &[('০', '৯'), ('၀', '၉'), ('\u{11100}', '\u{11134}'), ('𑄶', '𑅇')]; + +pub const CHAM: &'static [(char, char)] = + &[('ꨀ', '\u{aa36}'), ('ꩀ', 'ꩍ'), ('꩐', '꩙'), ('꩜', '꩟')]; + +pub const CHEROKEE: &'static [(char, char)] = &[ + ('\u{300}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{30b}', '\u{30c}'), + ('\u{323}', '\u{324}'), + ('\u{330}', '\u{331}'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ꭰ', 'ꮿ'), +]; + +pub const CHORASMIAN: &'static [(char, char)] = &[('𐾰', '𐿋')]; + +pub const COMMON: &'static [(char, char)] = &[ + ('\0', '@'), + ('[', '`'), + ('{', '©'), + ('«', '¶'), + ('¸', '¹'), + ('»', '¿'), + ('×', '×'), + ('÷', '÷'), + ('ʹ', 'ʻ'), + ('ʽ', 'ˆ'), + ('ˈ', 'ˈ'), + ('ˌ', 'ˌ'), + ('ˎ', '˖'), + ('˘', '˘'), + ('˚', '˟'), + ('˥', '˩'), + ('ˬ', '˿'), + (';', ';'), + ('΅', '΅'), + ('·', '·'), + ('\u{605}', '\u{605}'), + ('\u{6dd}', '\u{6dd}'), + ('\u{8e2}', '\u{8e2}'), + ('฿', '฿'), + ('࿕', '࿘'), + ('\u{2000}', '\u{200b}'), + ('\u{200e}', '\u{202e}'), + ('‰', '⁎'), + ('⁐', '⁙'), + ('⁛', '⁜'), + ('⁞', '\u{2064}'), + ('\u{2066}', '⁰'), + ('⁴', '⁾'), + ('₀', '₎'), + ('₠', '⃀'), + ('℀', '℥'), + ('℧', '℩'), + ('ℬ', 'ℱ'), + ('ℳ', '⅍'), + ('⅏', '⅟'), + ('↉', '↋'), + ('←', '␩'), + ('⑀', '⑊'), + ('①', '⟿'), + ('⤀', '⭳'), + ('⭶', '⮕'), + ('⮗', '⯿'), + ('⸀', '⸖'), + ('⸘', 'ⸯ'), + ('⸲', '⸻'), + ('⸽', '⹀'), + ('⹂', '⹂'), + ('⹄', '⹝'), + ('\u{3000}', '\u{3000}'), + ('〄', '〄'), + ('〒', '〒'), + ('〠', '〠'), + ('〶', '〶'), + ('㉈', '㉟'), + ('㉿', '㉿'), + ('㊱', '㊿'), + ('㋌', '㋏'), + ('㍱', '㍺'), + ('㎀', '㏟'), + ('㏿', '㏿'), + ('䷀', '䷿'), + ('꜈', '꜡'), + ('ꞈ', '꞊'), + ('꭛', '꭛'), + ('꭪', '꭫'), + ('︐', '︙'), + ('︰', '﹄'), + ('﹇', '﹒'), + ('﹔', '﹦'), + ('﹨', '﹫'), + ('\u{feff}', '\u{feff}'), + ('!', '@'), + ('[', '`'), + ('{', '⦆'), + ('¢', '₩'), + ('│', '○'), + ('\u{fff9}', '�'), + ('𐆐', '𐆜'), + ('𐇐', '𐇼'), + ('𜰀', '𜳹'), + ('𜴀', '𜺳'), + ('𜽐', '𜿃'), + ('𝀀', '𝃵'), + ('𝄀', '𝄦'), + ('𝄩', '\u{1d166}'), + ('𝅪', '\u{1d17a}'), + ('𝆃', '𝆄'), + ('𝆌', '𝆩'), + ('𝆮', '𝇪'), + ('𝋀', '𝋓'), + ('𝋠', '𝋳'), + ('𝌀', '𝍖'), + ('𝍲', '𝍸'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝟋'), + ('𝟎', '𝟿'), + ('𞱱', '𞲴'), + ('𞴁', '𞴽'), + ('🀀', '🀫'), + ('🀰', '🂓'), + ('🂠', '🂮'), + ('🂱', '🂿'), + ('🃁', '🃏'), + ('🃑', '🃵'), + ('🄀', '🆭'), + ('🇦', '🇿'), + ('🈁', '🈂'), + ('🈐', '🈻'), + ('🉀', '🉈'), + ('🉠', '🉥'), + ('🌀', '🛗'), + ('🛜', '🛬'), + ('🛰', '🛼'), + ('🜀', '🝶'), + ('🝻', '🟙'), + ('🟠', '🟫'), + ('🟰', '🟰'), + ('🠀', '🠋'), + ('🠐', '🡇'), + ('🡐', '🡙'), + ('🡠', '🢇'), + ('🢐', '🢭'), + ('🢰', '🢻'), + ('🣀', '🣁'), + ('🤀', '🩓'), + ('🩠', '🩭'), + ('🩰', '🩼'), + ('🪀', '🪉'), + ('🪏', '🫆'), + ('🫎', '🫜'), + ('🫟', '🫩'), + ('🫰', '🫸'), + ('🬀', '🮒'), + ('🮔', '🯹'), + ('\u{e0001}', '\u{e0001}'), + ('\u{e0020}', '\u{e007f}'), +]; + +pub const COPTIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{300}'), + ('\u{304}', '\u{305}'), + ('\u{307}', '\u{307}'), + ('ʹ', '͵'), + ('Ϣ', 'ϯ'), + ('Ⲁ', 'ⳳ'), + ('⳹', '⳿'), + ('⸗', '⸗'), + ('\u{102e0}', '𐋻'), +]; + +pub const CUNEIFORM: &'static [(char, char)] = + &[('𒀀', '𒎙'), ('𒐀', '𒑮'), ('𒑰', '𒑴'), ('𒒀', '𒕃')]; + +pub const CYPRIOT: &'static [(char, char)] = &[ + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐠿'), +]; + +pub const CYPRO_MINOAN: &'static [(char, char)] = &[('𐄀', '𐄁'), ('𒾐', '𒿲')]; + +pub const CYRILLIC: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{300}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{306}', '\u{306}'), + ('\u{308}', '\u{308}'), + ('\u{30b}', '\u{30b}'), + ('\u{311}', '\u{311}'), + ('Ѐ', 'ԯ'), + ('ᲀ', 'ᲊ'), + ('ᴫ', 'ᴫ'), + ('ᵸ', 'ᵸ'), + ('\u{1df8}', '\u{1df8}'), + ('\u{2de0}', '\u{2dff}'), + ('⹃', '⹃'), + ('Ꙁ', '\u{a69f}'), + ('\u{fe2e}', '\u{fe2f}'), + ('𞀰', '𞁭'), + ('\u{1e08f}', '\u{1e08f}'), +]; + +pub const DESERET: &'static [(char, char)] = &[('𐐀', '𐑏')]; + +pub const DEVANAGARI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('\u{900}', '\u{952}'), + ('\u{955}', 'ॿ'), + ('\u{1cd0}', 'ᳶ'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{20f0}', '\u{20f0}'), + ('꠰', '꠹'), + ('\u{a8e0}', '\u{a8ff}'), + ('𑬀', '𑬉'), +]; + +pub const DIVES_AKURU: &'static [(char, char)] = &[ + ('𑤀', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '𑥆'), + ('𑥐', '𑥙'), +]; + +pub const DOGRA: &'static [(char, char)] = + &[('।', '९'), ('꠰', '꠹'), ('𑠀', '𑠻')]; + +pub const DUPLOYAN: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{307}', '\u{308}'), + ('\u{30a}', '\u{30a}'), + ('\u{323}', '\u{324}'), + ('⸼', '⸼'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𛲜', '\u{1bca3}'), +]; + +pub const EGYPTIAN_HIEROGLYPHS: &'static [(char, char)] = + &[('𓀀', '\u{13455}'), ('𓑠', '𔏺')]; + +pub const ELBASAN: &'static [(char, char)] = + &[('·', '·'), ('\u{305}', '\u{305}'), ('𐔀', '𐔧')]; + +pub const ELYMAIC: &'static [(char, char)] = &[('𐿠', '𐿶')]; + +pub const ETHIOPIC: &'static [(char, char)] = &[ + ('\u{30e}', '\u{30e}'), + ('ሀ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('\u{135d}', '፼'), + ('ᎀ', '᎙'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), +]; + +pub const GARAY: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('𐵀', '𐵥'), + ('\u{10d69}', '𐶅'), + ('𐶎', '𐶏'), +]; + +pub const GEORGIAN: &'static [(char, char)] = &[ + ('·', '·'), + ('։', '։'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჿ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('⁚', '⁚'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('⸱', '⸱'), +]; + +pub const GLAGOLITIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{303}', '\u{303}'), + ('\u{305}', '\u{305}'), + ('\u{484}', '\u{484}'), + ('\u{487}', '\u{487}'), + ('։', '։'), + ('჻', '჻'), + ('⁚', '⁚'), + ('Ⰰ', 'ⱟ'), + ('⹃', '⹃'), + ('\u{a66f}', '\u{a66f}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), +]; + +pub const GOTHIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{304}', '\u{305}'), + ('\u{308}', '\u{308}'), + ('\u{331}', '\u{331}'), + ('𐌰', '𐍊'), +]; + +pub const GRANTHA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('௦', '௳'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '᳓'), + ('ᳲ', '\u{1cf4}'), + ('\u{1cf8}', '\u{1cf9}'), + ('\u{20f0}', '\u{20f0}'), + ('\u{11300}', '𑌃'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('\u{1133b}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('𑍐', '𑍐'), + ('\u{11357}', '\u{11357}'), + ('𑍝', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('𑿐', '𑿑'), + ('𑿓', '𑿓'), +]; + +pub const GREEK: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{306}', '\u{306}'), + ('\u{308}', '\u{308}'), + ('\u{313}', '\u{313}'), + ('\u{342}', '\u{342}'), + ('\u{345}', '\u{345}'), + ('Ͱ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('΄', '΄'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϡ'), + ('ϰ', 'Ͽ'), + ('ᴦ', 'ᴪ'), + ('ᵝ', 'ᵡ'), + ('ᵦ', 'ᵪ'), + ('ᶿ', '\u{1dc1}'), + ('ἀ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ῄ'), + ('ῆ', 'ΐ'), + ('ῖ', 'Ί'), + ('῝', '`'), + ('ῲ', 'ῴ'), + ('ῶ', '῾'), + ('⁝', '⁝'), + ('Ω', 'Ω'), + ('ꭥ', 'ꭥ'), + ('𐅀', '𐆎'), + ('𐆠', '𐆠'), + ('𝈀', '𝉅'), +]; + +pub const GUJARATI: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{a81}', 'ઃ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('\u{abc}', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('ૐ', 'ૐ'), + ('ૠ', '\u{ae3}'), + ('૦', '૱'), + ('ૹ', '\u{aff}'), + ('꠰', '꠹'), +]; + +pub const GUNJALA_GONDI: &'static [(char, char)] = &[ + ('·', '·'), + ('।', '॥'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '𑶘'), + ('𑶠', '𑶩'), +]; + +pub const GURMUKHI: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{a01}', 'ਃ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('੦', '੶'), + ('꠰', '꠹'), +]; + +pub const GURUNG_KHEMA: &'static [(char, char)] = &[('॥', '॥'), ('𖄀', '𖄹')]; + +pub const HAN: &'static [(char, char)] = &[ + ('·', '·'), + ('⺀', '⺙'), + ('⺛', '⻳'), + ('⼀', '⿕'), + ('⿰', '⿿'), + ('、', '〃'), + ('々', '】'), + ('〓', '〟'), + ('〡', '\u{302d}'), + ('〰', '〰'), + ('〷', '〿'), + ('・', '・'), + ('㆐', '㆟'), + ('㇀', '㇥'), + ('㇯', '㇯'), + ('㈠', '㉇'), + ('㊀', '㊰'), + ('㋀', '㋋'), + ('㋿', '㋿'), + ('㍘', '㍰'), + ('㍻', '㍿'), + ('㏠', '㏾'), + ('㐀', '䶿'), + ('一', '鿿'), + ('꜀', '꜇'), + ('豈', '舘'), + ('並', '龎'), + ('﹅', '﹆'), + ('。', '・'), + ('𖿢', '𖿣'), + ('\u{16ff0}', '\u{16ff1}'), + ('𝍠', '𝍱'), + ('🉐', '🉑'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const HANGUL: &'static [(char, char)] = &[ + ('ᄀ', 'ᇿ'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('\u{302e}', '〰'), + ('〷', '〷'), + ('・', '・'), + ('ㄱ', 'ㆎ'), + ('㈀', '㈞'), + ('㉠', '㉾'), + ('ꥠ', 'ꥼ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('﹅', '﹆'), + ('。', '・'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), +]; + +pub const HANIFI_ROHINGYA: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('۔', '۔'), + ('𐴀', '\u{10d27}'), + ('𐴰', '𐴹'), +]; + +pub const HANUNOO: &'static [(char, char)] = &[('ᜠ', '᜶')]; + +pub const HATRAN: &'static [(char, char)] = + &[('𐣠', '𐣲'), ('𐣴', '𐣵'), ('𐣻', '𐣿')]; + +pub const HEBREW: &'static [(char, char)] = &[ + ('\u{307}', '\u{308}'), + ('\u{591}', '\u{5c7}'), + ('א', 'ת'), + ('ׯ', '״'), + ('יִ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const HIRAGANA: &'static [(char, char)] = &[ + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('〰', '〵'), + ('〷', '〷'), + ('〼', '〽'), + ('ぁ', 'ゖ'), + ('\u{3099}', '゠'), + ('・', 'ー'), + ('﹅', '﹆'), + ('。', '・'), + ('ー', 'ー'), + ('\u{ff9e}', '\u{ff9f}'), + ('𛀁', '𛄟'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('🈀', '🈀'), +]; + +pub const IMPERIAL_ARAMAIC: &'static [(char, char)] = + &[('𐡀', '𐡕'), ('𐡗', '𐡟')]; + +pub const INHERITED: &'static [(char, char)] = &[ + ('\u{30f}', '\u{30f}'), + ('\u{312}', '\u{312}'), + ('\u{314}', '\u{31f}'), + ('\u{321}', '\u{322}'), + ('\u{326}', '\u{32c}'), + ('\u{32f}', '\u{32f}'), + ('\u{332}', '\u{341}'), + ('\u{343}', '\u{344}'), + ('\u{346}', '\u{357}'), + ('\u{359}', '\u{35d}'), + ('\u{35f}', '\u{362}'), + ('\u{953}', '\u{954}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1dc2}', '\u{1df7}'), + ('\u{1df9}', '\u{1df9}'), + ('\u{1dfb}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20ef}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2d}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d167}', '\u{1d169}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const INSCRIPTIONAL_PAHLAVI: &'static [(char, char)] = + &[('𐭠', '𐭲'), ('𐭸', '𐭿')]; + +pub const INSCRIPTIONAL_PARTHIAN: &'static [(char, char)] = + &[('𐭀', '𐭕'), ('𐭘', '𐭟')]; + +pub const JAVANESE: &'static [(char, char)] = + &[('\u{a980}', '꧍'), ('ꧏ', '꧙'), ('꧞', '꧟')]; + +pub const KAITHI: &'static [(char, char)] = &[ + ('०', '९'), + ('⸱', '⸱'), + ('꠰', '꠹'), + ('\u{11080}', '\u{110c2}'), + ('\u{110cd}', '\u{110cd}'), +]; + +pub const KANNADA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ಀ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('\u{cbc}', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('ೝ', 'ೞ'), + ('ೠ', '\u{ce3}'), + ('೦', '೯'), + ('ೱ', 'ೳ'), + ('\u{1cd0}', '\u{1cd0}'), + ('\u{1cd2}', '᳓'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), + ('\u{1cf4}', '\u{1cf4}'), + ('꠰', '꠵'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('\u{305}', '\u{305}'), + ('\u{323}', '\u{323}'), + ('、', '〃'), + ('〈', '】'), + ('〓', '〟'), + ('〰', '〵'), + ('〷', '〷'), + ('〼', '〽'), + ('\u{3099}', '゜'), + ('゠', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('﹅', '﹆'), + ('。', '\u{ff9f}'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const KAWI: &'static [(char, char)] = + &[('\u{11f00}', '𑼐'), ('𑼒', '\u{11f3a}'), ('𑼾', '\u{11f5a}')]; + +pub const KAYAH_LI: &'static [(char, char)] = &[('꤀', '꤯')]; + +pub const KHAROSHTHI: &'static [(char, char)] = &[ + ('𐨀', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '𐩈'), + ('𐩐', '𐩘'), +]; + +pub const KHITAN_SMALL_SCRIPT: &'static [(char, char)] = + &[('\u{16fe4}', '\u{16fe4}'), ('𘬀', '𘳕'), ('𘳿', '𘳿')]; + +pub const KHMER: &'static [(char, char)] = + &[('ក', '\u{17dd}'), ('០', '៩'), ('៰', '៹'), ('᧠', '᧿')]; + +pub const KHOJKI: &'static [(char, char)] = + &[('૦', '૯'), ('꠰', '꠹'), ('𑈀', '𑈑'), ('𑈓', '\u{11241}')]; + +pub const KHUDAWADI: &'static [(char, char)] = + &[('।', '॥'), ('꠰', '꠹'), ('𑊰', '\u{112ea}'), ('𑋰', '𑋹')]; + +pub const KIRAT_RAI: &'static [(char, char)] = &[('𖵀', '𖵹')]; + +pub const LAO: &'static [(char, char)] = &[ + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('\u{ec8}', '\u{ece}'), + ('໐', '໙'), + ('ໜ', 'ໟ'), +]; + +pub const LATIN: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('·', '·'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', 'ʸ'), + ('ʼ', 'ʼ'), + ('ˇ', 'ˇ'), + ('ˉ', 'ˋ'), + ('ˍ', 'ˍ'), + ('˗', '˗'), + ('˙', '˙'), + ('ˠ', 'ˤ'), + ('\u{300}', '\u{30e}'), + ('\u{310}', '\u{311}'), + ('\u{313}', '\u{313}'), + ('\u{320}', '\u{320}'), + ('\u{323}', '\u{325}'), + ('\u{32d}', '\u{32e}'), + ('\u{330}', '\u{331}'), + ('\u{358}', '\u{358}'), + ('\u{35e}', '\u{35e}'), + ('\u{363}', '\u{36f}'), + ('\u{485}', '\u{486}'), + ('\u{951}', '\u{952}'), + ('჻', '჻'), + ('ᴀ', 'ᴥ'), + ('ᴬ', 'ᵜ'), + ('ᵢ', 'ᵥ'), + ('ᵫ', 'ᵷ'), + ('ᵹ', 'ᶾ'), + ('\u{1df8}', '\u{1df8}'), + ('Ḁ', 'ỿ'), + ('\u{202f}', '\u{202f}'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('\u{20f0}', '\u{20f0}'), + ('K', 'Å'), + ('Ⅎ', 'Ⅎ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⱡ', 'Ɀ'), + ('⸗', '⸗'), + ('꜀', '꜇'), + ('Ꜣ', 'ꞇ'), + ('Ꞌ', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꟿ'), + ('꤮', '꤮'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭤ'), + ('ꭦ', 'ꭩ'), + ('ff', 'st'), + ('A', 'Z'), + ('a', 'z'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), +]; + +pub const LEPCHA: &'static [(char, char)] = + &[('ᰀ', '\u{1c37}'), ('᰻', '᱉'), ('ᱍ', 'ᱏ')]; + +pub const LIMBU: &'static [(char, char)] = &[ + ('॥', '॥'), + ('ᤀ', 'ᤞ'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('᥀', '᥀'), + ('᥄', '᥏'), +]; + +pub const LINEAR_A: &'static [(char, char)] = + &[('𐄇', '𐄳'), ('𐘀', '𐜶'), ('𐝀', '𐝕'), ('𐝠', '𐝧')]; + +pub const LINEAR_B: &'static [(char, char)] = &[ + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐄀', '𐄂'), + ('𐄇', '𐄳'), + ('𐄷', '𐄿'), +]; + +pub const LISU: &'static [(char, char)] = + &[('ʼ', 'ʼ'), ('ˍ', 'ˍ'), ('《', '》'), ('ꓐ', '꓿'), ('𑾰', '𑾰')]; + +pub const LYCIAN: &'static [(char, char)] = &[('⁚', '⁚'), ('𐊀', '𐊜')]; + +pub const LYDIAN: &'static [(char, char)] = + &[('·', '·'), ('⸱', '⸱'), ('𐤠', '𐤹'), ('𐤿', '𐤿')]; + +pub const MAHAJANI: &'static [(char, char)] = + &[('·', '·'), ('।', '९'), ('꠰', '꠹'), ('𑅐', '𑅶')]; + +pub const MAKASAR: &'static [(char, char)] = &[('𑻠', '𑻸')]; + +pub const MALAYALAM: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{d00}', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '൏'), + ('ൔ', '\u{d63}'), + ('൦', 'ൿ'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), + ('꠰', '꠲'), +]; + +pub const MANDAIC: &'static [(char, char)] = + &[('ـ', 'ـ'), ('ࡀ', '\u{85b}'), ('࡞', '࡞')]; + +pub const MANICHAEAN: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐫀', '\u{10ae6}'), ('𐫫', '𐫶')]; + +pub const MARCHEN: &'static [(char, char)] = + &[('𑱰', '𑲏'), ('\u{11c92}', '\u{11ca7}'), ('𑲩', '\u{11cb6}')]; + +pub const MASARAM_GONDI: &'static [(char, char)] = &[ + ('।', '॥'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d47}'), + ('𑵐', '𑵙'), +]; + +pub const MEDEFAIDRIN: &'static [(char, char)] = &[('𖹀', '𖺚')]; + +pub const MEETEI_MAYEK: &'static [(char, char)] = + &[('ꫠ', '\u{aaf6}'), ('ꯀ', '\u{abed}'), ('꯰', '꯹')]; + +pub const MENDE_KIKAKUI: &'static [(char, char)] = + &[('𞠀', '𞣄'), ('𞣇', '\u{1e8d6}')]; + +pub const MEROITIC_CURSIVE: &'static [(char, char)] = + &[('𐦠', '𐦷'), ('𐦼', '𐧏'), ('𐧒', '𐧿')]; + +pub const MEROITIC_HIEROGLYPHS: &'static [(char, char)] = + &[('⁝', '⁝'), ('𐦀', '𐦟')]; + +pub const MIAO: &'static [(char, char)] = + &[('𖼀', '𖽊'), ('\u{16f4f}', '𖾇'), ('\u{16f8f}', '𖾟')]; + +pub const MODI: &'static [(char, char)] = + &[('꠰', '꠹'), ('𑘀', '𑙄'), ('𑙐', '𑙙')]; + +pub const MONGOLIAN: &'static [(char, char)] = &[ + ('᠀', '᠙'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢪ'), + ('\u{202f}', '\u{202f}'), + ('、', '。'), + ('〈', '》'), + ('𑙠', '𑙬'), +]; + +pub const MRO: &'static [(char, char)] = &[('𖩀', '𖩞'), ('𖩠', '𖩩'), ('𖩮', '𖩯')]; + +pub const MULTANI: &'static [(char, char)] = + &[('੦', '੯'), ('𑊀', '𑊆'), ('𑊈', '𑊈'), ('𑊊', '𑊍'), ('𑊏', '𑊝'), ('𑊟', '𑊩')]; + +pub const MYANMAR: &'static [(char, char)] = + &[('က', '႟'), ('꤮', '꤮'), ('ꧠ', 'ꧾ'), ('ꩠ', 'ꩿ'), ('𑛐', '𑛣')]; + +pub const NABATAEAN: &'static [(char, char)] = &[('𐢀', '𐢞'), ('𐢧', '𐢯')]; + +pub const NAG_MUNDARI: &'static [(char, char)] = &[('𞓐', '𞓹')]; + +pub const NANDINAGARI: &'static [(char, char)] = &[ + ('।', '॥'), + ('೦', '೯'), + ('ᳩ', 'ᳩ'), + ('ᳲ', 'ᳲ'), + ('ᳺ', 'ᳺ'), + ('꠰', '꠵'), + ('𑦠', '𑦧'), + ('𑦪', '\u{119d7}'), + ('\u{119da}', '𑧤'), +]; + +pub const NEW_TAI_LUE: &'static [(char, char)] = + &[('ᦀ', 'ᦫ'), ('ᦰ', 'ᧉ'), ('᧐', '᧚'), ('᧞', '᧟')]; + +pub const NEWA: &'static [(char, char)] = &[('𑐀', '𑑛'), ('𑑝', '𑑡')]; + +pub const NKO: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('߀', 'ߺ'), + ('\u{7fd}', '߿'), + ('﴾', '﴿'), +]; + +pub const NUSHU: &'static [(char, char)] = &[('𖿡', '𖿡'), ('𛅰', '𛋻')]; + +pub const NYIAKENG_PUACHUE_HMONG: &'static [(char, char)] = + &[('𞄀', '𞄬'), ('\u{1e130}', '𞄽'), ('𞅀', '𞅉'), ('𞅎', '𞅏')]; + +pub const OGHAM: &'static [(char, char)] = &[('\u{1680}', '᚜')]; + +pub const OL_CHIKI: &'static [(char, char)] = &[('᱐', '᱿')]; + +pub const OL_ONAL: &'static [(char, char)] = + &[('।', '॥'), ('𞗐', '𞗺'), ('𞗿', '𞗿')]; + +pub const OLD_HUNGARIAN: &'static [(char, char)] = &[ + ('⁚', '⁚'), + ('⁝', '⁝'), + ('⸱', '⸱'), + ('⹁', '⹁'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐳺', '𐳿'), +]; + +pub const OLD_ITALIC: &'static [(char, char)] = &[('𐌀', '𐌣'), ('𐌭', '𐌯')]; + +pub const OLD_NORTH_ARABIAN: &'static [(char, char)] = &[('𐪀', '𐪟')]; + +pub const OLD_PERMIC: &'static [(char, char)] = &[ + ('·', '·'), + ('\u{300}', '\u{300}'), + ('\u{306}', '\u{308}'), + ('\u{313}', '\u{313}'), + ('\u{483}', '\u{483}'), + ('𐍐', '\u{1037a}'), +]; + +pub const OLD_PERSIAN: &'static [(char, char)] = &[('𐎠', '𐏃'), ('𐏈', '𐏕')]; + +pub const OLD_SOGDIAN: &'static [(char, char)] = &[('𐼀', '𐼧')]; + +pub const OLD_SOUTH_ARABIAN: &'static [(char, char)] = &[('𐩠', '𐩿')]; + +pub const OLD_TURKIC: &'static [(char, char)] = + &[('⁚', '⁚'), ('⸰', '⸰'), ('𐰀', '𐱈')]; + +pub const OLD_UYGHUR: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐫲', '𐫲'), ('𐽰', '𐾉')]; + +pub const ORIYA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{b01}', 'ଃ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('\u{b3c}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', '\u{b63}'), + ('୦', '୷'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), +]; + +pub const OSAGE: &'static [(char, char)] = &[ + ('\u{301}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{30b}', '\u{30b}'), + ('\u{358}', '\u{358}'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), +]; + +pub const OSMANYA: &'static [(char, char)] = &[('𐒀', '𐒝'), ('𐒠', '𐒩')]; + +pub const PAHAWH_HMONG: &'static [(char, char)] = + &[('𖬀', '𖭅'), ('𖭐', '𖭙'), ('𖭛', '𖭡'), ('𖭣', '𖭷'), ('𖭽', '𖮏')]; + +pub const PALMYRENE: &'static [(char, char)] = &[('𐡠', '𐡿')]; + +pub const PAU_CIN_HAU: &'static [(char, char)] = &[('𑫀', '𑫸')]; + +pub const PHAGS_PA: &'static [(char, char)] = &[ + ('᠂', '᠃'), + ('᠅', '᠅'), + ('\u{202f}', '\u{202f}'), + ('。', '。'), + ('ꡀ', '꡷'), +]; + +pub const PHOENICIAN: &'static [(char, char)] = &[('𐤀', '𐤛'), ('𐤟', '𐤟')]; + +pub const PSALTER_PAHLAVI: &'static [(char, char)] = + &[('ـ', 'ـ'), ('𐮀', '𐮑'), ('𐮙', '𐮜'), ('𐮩', '𐮯')]; + +pub const REJANG: &'static [(char, char)] = &[('ꤰ', '\u{a953}'), ('꥟', '꥟')]; + +pub const RUNIC: &'static [(char, char)] = &[('ᚠ', 'ᛸ')]; + +pub const SAMARITAN: &'static [(char, char)] = + &[('ࠀ', '\u{82d}'), ('࠰', '࠾'), ('⸱', '⸱')]; + +pub const SAURASHTRA: &'static [(char, char)] = + &[('ꢀ', '\u{a8c5}'), ('꣎', '꣙')]; + +pub const SHARADA: &'static [(char, char)] = &[ + ('\u{951}', '\u{951}'), + ('\u{1cd7}', '\u{1cd7}'), + ('\u{1cd9}', '\u{1cd9}'), + ('\u{1cdc}', '\u{1cdd}'), + ('\u{1ce0}', '\u{1ce0}'), + ('꠰', '꠵'), + ('꠸', '꠸'), + ('\u{11180}', '𑇟'), +]; + +pub const SHAVIAN: &'static [(char, char)] = &[('·', '·'), ('𐑐', '𐑿')]; + +pub const SIDDHAM: &'static [(char, char)] = + &[('𑖀', '\u{115b5}'), ('𑖸', '\u{115dd}')]; + +pub const SIGNWRITING: &'static [(char, char)] = + &[('𝠀', '𝪋'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}')]; + +pub const SINHALA: &'static [(char, char)] = &[ + ('।', '॥'), + ('\u{d81}', 'ඃ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('෦', '෯'), + ('ෲ', '෴'), + ('ᳲ', 'ᳲ'), + ('𑇡', '𑇴'), +]; + +pub const SOGDIAN: &'static [(char, char)] = &[('ـ', 'ـ'), ('𐼰', '𐽙')]; + +pub const SORA_SOMPENG: &'static [(char, char)] = &[('𑃐', '𑃨'), ('𑃰', '𑃹')]; + +pub const SOYOMBO: &'static [(char, char)] = &[('𑩐', '𑪢')]; + +pub const SUNDANESE: &'static [(char, char)] = + &[('\u{1b80}', 'ᮿ'), ('᳀', '᳇')]; + +pub const SUNUWAR: &'static [(char, char)] = &[ + ('\u{300}', '\u{301}'), + ('\u{303}', '\u{303}'), + ('\u{30d}', '\u{30d}'), + ('\u{310}', '\u{310}'), + ('\u{32d}', '\u{32d}'), + ('\u{331}', '\u{331}'), + ('𑯀', '𑯡'), + ('𑯰', '𑯹'), +]; + +pub const SYLOTI_NAGRI: &'static [(char, char)] = + &[('।', '॥'), ('০', '৯'), ('ꠀ', '\u{a82c}')]; + +pub const SYRIAC: &'static [(char, char)] = &[ + ('\u{303}', '\u{304}'), + ('\u{307}', '\u{308}'), + ('\u{30a}', '\u{30a}'), + ('\u{320}', '\u{320}'), + ('\u{323}', '\u{325}'), + ('\u{32d}', '\u{32e}'), + ('\u{330}', '\u{330}'), + ('،', '،'), + ('؛', '\u{61c}'), + ('؟', '؟'), + ('ـ', 'ـ'), + ('\u{64b}', '\u{655}'), + ('\u{670}', '\u{670}'), + ('܀', '܍'), + ('\u{70f}', '\u{74a}'), + ('ݍ', 'ݏ'), + ('ࡠ', 'ࡪ'), + ('\u{1df8}', '\u{1df8}'), + ('\u{1dfa}', '\u{1dfa}'), +]; + +pub const TAGALOG: &'static [(char, char)] = + &[('ᜀ', '\u{1715}'), ('ᜟ', 'ᜟ'), ('᜵', '᜶')]; + +pub const TAGBANWA: &'static [(char, char)] = + &[('᜵', '᜶'), ('ᝠ', 'ᝬ'), ('ᝮ', 'ᝰ'), ('\u{1772}', '\u{1773}')]; + +pub const TAI_LE: &'static [(char, char)] = &[ + ('\u{300}', '\u{301}'), + ('\u{307}', '\u{308}'), + ('\u{30c}', '\u{30c}'), + ('၀', '၉'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), +]; + +pub const TAI_THAM: &'static [(char, char)] = &[ + ('ᨠ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '᪉'), + ('᪐', '᪙'), + ('᪠', '᪭'), +]; + +pub const TAI_VIET: &'static [(char, char)] = &[('ꪀ', 'ꫂ'), ('ꫛ', '꫟')]; + +pub const TAKRI: &'static [(char, char)] = + &[('।', '॥'), ('꠰', '꠹'), ('𑚀', '𑚹'), ('𑛀', '𑛉')]; + +pub const TAMIL: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{b82}', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('ௐ', 'ௐ'), + ('\u{bd7}', '\u{bd7}'), + ('௦', '௺'), + ('\u{1cda}', '\u{1cda}'), + ('ꣳ', 'ꣳ'), + ('\u{11301}', '\u{11301}'), + ('𑌃', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('𑿀', '𑿱'), + ('𑿿', '𑿿'), +]; + +pub const TANGSA: &'static [(char, char)] = &[('𖩰', '𖪾'), ('𖫀', '𖫉')]; + +pub const TANGUT: &'static [(char, char)] = &[ + ('⿰', '⿿'), + ('㇯', '㇯'), + ('𖿠', '𖿠'), + ('𗀀', '𘟷'), + ('𘠀', '𘫿'), + ('𘴀', '𘴈'), +]; + +pub const TELUGU: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('\u{c00}', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('\u{c3c}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', '\u{c63}'), + ('౦', '౯'), + ('౷', '౿'), + ('\u{1cda}', '\u{1cda}'), + ('ᳲ', 'ᳲ'), +]; + +pub const THAANA: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '\u{61c}'), + ('؟', '؟'), + ('٠', '٩'), + ('ހ', 'ޱ'), + ('ﷲ', 'ﷲ'), + ('﷽', '﷽'), +]; + +pub const THAI: &'static [(char, char)] = &[ + ('ʼ', 'ʼ'), + ('˗', '˗'), + ('\u{303}', '\u{303}'), + ('\u{331}', '\u{331}'), + ('ก', '\u{e3a}'), + ('เ', '๛'), +]; + +pub const TIBETAN: &'static [(char, char)] = &[ + ('ༀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('\u{f71}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('྾', '࿌'), + ('࿎', '࿔'), + ('࿙', '࿚'), + ('〈', '》'), +]; + +pub const TIFINAGH: &'static [(char, char)] = &[ + ('\u{302}', '\u{302}'), + ('\u{304}', '\u{304}'), + ('\u{307}', '\u{307}'), + ('\u{309}', '\u{309}'), + ('ⴰ', 'ⵧ'), + ('ⵯ', '⵰'), + ('\u{2d7f}', '\u{2d7f}'), +]; + +pub const TIRHUTA: &'static [(char, char)] = &[ + ('\u{951}', '\u{952}'), + ('।', '॥'), + ('ᳲ', 'ᳲ'), + ('꠰', '꠹'), + ('𑒀', '𑓇'), + ('𑓐', '𑓙'), +]; + +pub const TODHRI: &'static [(char, char)] = &[ + ('\u{301}', '\u{301}'), + ('\u{304}', '\u{304}'), + ('\u{307}', '\u{307}'), + ('\u{311}', '\u{311}'), + ('\u{313}', '\u{313}'), + ('\u{35e}', '\u{35e}'), + ('𐗀', '𐗳'), +]; + +pub const TOTO: &'static [(char, char)] = &[('ʼ', 'ʼ'), ('𞊐', '\u{1e2ae}')]; + +pub const TULU_TIGALARI: &'static [(char, char)] = &[ + ('೦', '೯'), + ('ᳲ', 'ᳲ'), + ('\u{1cf4}', '\u{1cf4}'), + ('꠰', '꠵'), + ('\u{a8f1}', '\u{a8f1}'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '𑏕'), + ('𑏗', '𑏘'), + ('\u{113e1}', '\u{113e2}'), +]; + +pub const UGARITIC: &'static [(char, char)] = &[('𐎀', '𐎝'), ('𐎟', '𐎟')]; + +pub const VAI: &'static [(char, char)] = &[('ꔀ', 'ꘫ')]; + +pub const VITHKUQI: &'static [(char, char)] = &[ + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), +]; + +pub const WANCHO: &'static [(char, char)] = &[('𞋀', '𞋹'), ('𞋿', '𞋿')]; + +pub const WARANG_CITI: &'static [(char, char)] = &[('𑢠', '𑣲'), ('𑣿', '𑣿')]; + +pub const YEZIDI: &'static [(char, char)] = &[ + ('،', '،'), + ('؛', '؛'), + ('؟', '؟'), + ('٠', '٩'), + ('𐺀', '𐺩'), + ('\u{10eab}', '𐺭'), + ('𐺰', '𐺱'), +]; + +pub const YI: &'static [(char, char)] = &[ + ('、', '。'), + ('〈', '】'), + ('〔', '〛'), + ('・', '・'), + ('ꀀ', 'ꒌ'), + ('꒐', '꓆'), + ('。', '・'), +]; + +pub const ZANABAZAR_SQUARE: &'static [(char, char)] = &[('𑨀', '\u{11a47}')]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/sentence_break.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/sentence_break.rs new file mode 100644 index 0000000000000000000000000000000000000000..af1c5bea91b6d870f3dc86675c7c0e053b2df33f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/sentence_break.rs @@ -0,0 +1,2530 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate sentence-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ATerm", ATERM), + ("CR", CR), + ("Close", CLOSE), + ("Extend", EXTEND), + ("Format", FORMAT), + ("LF", LF), + ("Lower", LOWER), + ("Numeric", NUMERIC), + ("OLetter", OLETTER), + ("SContinue", SCONTINUE), + ("STerm", STERM), + ("Sep", SEP), + ("Sp", SP), + ("Upper", UPPER), +]; + +pub const ATERM: &'static [(char, char)] = + &[('.', '.'), ('․', '․'), ('﹒', '﹒'), ('.', '.')]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const CLOSE: &'static [(char, char)] = &[ + ('"', '"'), + ('\'', ')'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('«', '«'), + ('»', '»'), + ('༺', '༽'), + ('᚛', '᚜'), + ('‘', '‟'), + ('‹', '›'), + ('⁅', '⁆'), + ('⁽', '⁾'), + ('₍', '₎'), + ('⌈', '⌋'), + ('〈', '〉'), + ('❛', '❠'), + ('❨', '❵'), + ('⟅', '⟆'), + ('⟦', '⟯'), + ('⦃', '⦘'), + ('⧘', '⧛'), + ('⧼', '⧽'), + ('⸀', '⸍'), + ('⸜', '⸝'), + ('⸠', '⸩'), + ('⹂', '⹂'), + ('⹕', '⹜'), + ('〈', '】'), + ('〔', '〛'), + ('〝', '〟'), + ('﴾', '﴿'), + ('︗', '︘'), + ('︵', '﹄'), + ('﹇', '﹈'), + ('﹙', '﹞'), + ('(', ')'), + ('[', '['), + (']', ']'), + ('{', '{'), + ('}', '}'), + ('⦅', '⦆'), + ('「', '」'), + ('🙶', '🙸'), +]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200d}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{70f}', '\u{70f}'), + ('\u{180e}', '\u{180e}'), + ('\u{200b}', '\u{200b}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const LOWER: &'static [(char, char)] = &[ + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('ß', 'ö'), + ('ø', 'ÿ'), + ('ā', 'ā'), + ('ă', 'ă'), + ('ą', 'ą'), + ('ć', 'ć'), + ('ĉ', 'ĉ'), + ('ċ', 'ċ'), + ('č', 'č'), + ('ď', 'ď'), + ('đ', 'đ'), + ('ē', 'ē'), + ('ĕ', 'ĕ'), + ('ė', 'ė'), + ('ę', 'ę'), + ('ě', 'ě'), + ('ĝ', 'ĝ'), + ('ğ', 'ğ'), + ('ġ', 'ġ'), + ('ģ', 'ģ'), + ('ĥ', 'ĥ'), + ('ħ', 'ħ'), + ('ĩ', 'ĩ'), + ('ī', 'ī'), + ('ĭ', 'ĭ'), + ('į', 'į'), + ('ı', 'ı'), + ('ij', 'ij'), + ('ĵ', 'ĵ'), + ('ķ', 'ĸ'), + ('ĺ', 'ĺ'), + ('ļ', 'ļ'), + ('ľ', 'ľ'), + ('ŀ', 'ŀ'), + ('ł', 'ł'), + ('ń', 'ń'), + ('ņ', 'ņ'), + ('ň', 'ʼn'), + ('ŋ', 'ŋ'), + ('ō', 'ō'), + ('ŏ', 'ŏ'), + ('ő', 'ő'), + ('œ', 'œ'), + ('ŕ', 'ŕ'), + ('ŗ', 'ŗ'), + ('ř', 'ř'), + ('ś', 'ś'), + ('ŝ', 'ŝ'), + ('ş', 'ş'), + ('š', 'š'), + ('ţ', 'ţ'), + ('ť', 'ť'), + ('ŧ', 'ŧ'), + ('ũ', 'ũ'), + ('ū', 'ū'), + ('ŭ', 'ŭ'), + ('ů', 'ů'), + ('ű', 'ű'), + ('ų', 'ų'), + ('ŵ', 'ŵ'), + ('ŷ', 'ŷ'), + ('ź', 'ź'), + ('ż', 'ż'), + ('ž', 'ƀ'), + ('ƃ', 'ƃ'), + ('ƅ', 'ƅ'), + ('ƈ', 'ƈ'), + ('ƌ', 'ƍ'), + ('ƒ', 'ƒ'), + ('ƕ', 'ƕ'), + ('ƙ', 'ƛ'), + ('ƞ', 'ƞ'), + ('ơ', 'ơ'), + ('ƣ', 'ƣ'), + ('ƥ', 'ƥ'), + ('ƨ', 'ƨ'), + ('ƪ', 'ƫ'), + ('ƭ', 'ƭ'), + ('ư', 'ư'), + ('ƴ', 'ƴ'), + ('ƶ', 'ƶ'), + ('ƹ', 'ƺ'), + ('ƽ', 'ƿ'), + ('dž', 'dž'), + ('lj', 'lj'), + ('nj', 'nj'), + ('ǎ', 'ǎ'), + ('ǐ', 'ǐ'), + ('ǒ', 'ǒ'), + ('ǔ', 'ǔ'), + ('ǖ', 'ǖ'), + ('ǘ', 'ǘ'), + ('ǚ', 'ǚ'), + ('ǜ', 'ǝ'), + ('ǟ', 'ǟ'), + ('ǡ', 'ǡ'), + ('ǣ', 'ǣ'), + ('ǥ', 'ǥ'), + ('ǧ', 'ǧ'), + ('ǩ', 'ǩ'), + ('ǫ', 'ǫ'), + ('ǭ', 'ǭ'), + ('ǯ', 'ǰ'), + ('dz', 'dz'), + ('ǵ', 'ǵ'), + ('ǹ', 'ǹ'), + ('ǻ', 'ǻ'), + ('ǽ', 'ǽ'), + ('ǿ', 'ǿ'), + ('ȁ', 'ȁ'), + ('ȃ', 'ȃ'), + ('ȅ', 'ȅ'), + ('ȇ', 'ȇ'), + ('ȉ', 'ȉ'), + ('ȋ', 'ȋ'), + ('ȍ', 'ȍ'), + ('ȏ', 'ȏ'), + ('ȑ', 'ȑ'), + ('ȓ', 'ȓ'), + ('ȕ', 'ȕ'), + ('ȗ', 'ȗ'), + ('ș', 'ș'), + ('ț', 'ț'), + ('ȝ', 'ȝ'), + ('ȟ', 'ȟ'), + ('ȡ', 'ȡ'), + ('ȣ', 'ȣ'), + ('ȥ', 'ȥ'), + ('ȧ', 'ȧ'), + ('ȩ', 'ȩ'), + ('ȫ', 'ȫ'), + ('ȭ', 'ȭ'), + ('ȯ', 'ȯ'), + ('ȱ', 'ȱ'), + ('ȳ', 'ȹ'), + ('ȼ', 'ȼ'), + ('ȿ', 'ɀ'), + ('ɂ', 'ɂ'), + ('ɇ', 'ɇ'), + ('ɉ', 'ɉ'), + ('ɋ', 'ɋ'), + ('ɍ', 'ɍ'), + ('ɏ', 'ʓ'), + ('ʕ', 'ʸ'), + ('ˀ', 'ˁ'), + ('ˠ', 'ˤ'), + ('ͱ', 'ͱ'), + ('ͳ', 'ͳ'), + ('ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('ΐ', 'ΐ'), + ('ά', 'ώ'), + ('ϐ', 'ϑ'), + ('ϕ', 'ϗ'), + ('ϙ', 'ϙ'), + ('ϛ', 'ϛ'), + ('ϝ', 'ϝ'), + ('ϟ', 'ϟ'), + ('ϡ', 'ϡ'), + ('ϣ', 'ϣ'), + ('ϥ', 'ϥ'), + ('ϧ', 'ϧ'), + ('ϩ', 'ϩ'), + ('ϫ', 'ϫ'), + ('ϭ', 'ϭ'), + ('ϯ', 'ϳ'), + ('ϵ', 'ϵ'), + ('ϸ', 'ϸ'), + ('ϻ', 'ϼ'), + ('а', 'џ'), + ('ѡ', 'ѡ'), + ('ѣ', 'ѣ'), + ('ѥ', 'ѥ'), + ('ѧ', 'ѧ'), + ('ѩ', 'ѩ'), + ('ѫ', 'ѫ'), + ('ѭ', 'ѭ'), + ('ѯ', 'ѯ'), + ('ѱ', 'ѱ'), + ('ѳ', 'ѳ'), + ('ѵ', 'ѵ'), + ('ѷ', 'ѷ'), + ('ѹ', 'ѹ'), + ('ѻ', 'ѻ'), + ('ѽ', 'ѽ'), + ('ѿ', 'ѿ'), + ('ҁ', 'ҁ'), + ('ҋ', 'ҋ'), + ('ҍ', 'ҍ'), + ('ҏ', 'ҏ'), + ('ґ', 'ґ'), + ('ғ', 'ғ'), + ('ҕ', 'ҕ'), + ('җ', 'җ'), + ('ҙ', 'ҙ'), + ('қ', 'қ'), + ('ҝ', 'ҝ'), + ('ҟ', 'ҟ'), + ('ҡ', 'ҡ'), + ('ң', 'ң'), + ('ҥ', 'ҥ'), + ('ҧ', 'ҧ'), + ('ҩ', 'ҩ'), + ('ҫ', 'ҫ'), + ('ҭ', 'ҭ'), + ('ү', 'ү'), + ('ұ', 'ұ'), + ('ҳ', 'ҳ'), + ('ҵ', 'ҵ'), + ('ҷ', 'ҷ'), + ('ҹ', 'ҹ'), + ('һ', 'һ'), + ('ҽ', 'ҽ'), + ('ҿ', 'ҿ'), + ('ӂ', 'ӂ'), + ('ӄ', 'ӄ'), + ('ӆ', 'ӆ'), + ('ӈ', 'ӈ'), + ('ӊ', 'ӊ'), + ('ӌ', 'ӌ'), + ('ӎ', 'ӏ'), + ('ӑ', 'ӑ'), + ('ӓ', 'ӓ'), + ('ӕ', 'ӕ'), + ('ӗ', 'ӗ'), + ('ә', 'ә'), + ('ӛ', 'ӛ'), + ('ӝ', 'ӝ'), + ('ӟ', 'ӟ'), + ('ӡ', 'ӡ'), + ('ӣ', 'ӣ'), + ('ӥ', 'ӥ'), + ('ӧ', 'ӧ'), + ('ө', 'ө'), + ('ӫ', 'ӫ'), + ('ӭ', 'ӭ'), + ('ӯ', 'ӯ'), + ('ӱ', 'ӱ'), + ('ӳ', 'ӳ'), + ('ӵ', 'ӵ'), + ('ӷ', 'ӷ'), + ('ӹ', 'ӹ'), + ('ӻ', 'ӻ'), + ('ӽ', 'ӽ'), + ('ӿ', 'ӿ'), + ('ԁ', 'ԁ'), + ('ԃ', 'ԃ'), + ('ԅ', 'ԅ'), + ('ԇ', 'ԇ'), + ('ԉ', 'ԉ'), + ('ԋ', 'ԋ'), + ('ԍ', 'ԍ'), + ('ԏ', 'ԏ'), + ('ԑ', 'ԑ'), + ('ԓ', 'ԓ'), + ('ԕ', 'ԕ'), + ('ԗ', 'ԗ'), + ('ԙ', 'ԙ'), + ('ԛ', 'ԛ'), + ('ԝ', 'ԝ'), + ('ԟ', 'ԟ'), + ('ԡ', 'ԡ'), + ('ԣ', 'ԣ'), + ('ԥ', 'ԥ'), + ('ԧ', 'ԧ'), + ('ԩ', 'ԩ'), + ('ԫ', 'ԫ'), + ('ԭ', 'ԭ'), + ('ԯ', 'ԯ'), + ('ՠ', 'ֈ'), + ('ჼ', 'ჼ'), + ('ᏸ', 'ᏽ'), + ('ᲀ', 'ᲈ'), + ('ᲊ', 'ᲊ'), + ('ᴀ', 'ᶿ'), + ('ḁ', 'ḁ'), + ('ḃ', 'ḃ'), + ('ḅ', 'ḅ'), + ('ḇ', 'ḇ'), + ('ḉ', 'ḉ'), + ('ḋ', 'ḋ'), + ('ḍ', 'ḍ'), + ('ḏ', 'ḏ'), + ('ḑ', 'ḑ'), + ('ḓ', 'ḓ'), + ('ḕ', 'ḕ'), + ('ḗ', 'ḗ'), + ('ḙ', 'ḙ'), + ('ḛ', 'ḛ'), + ('ḝ', 'ḝ'), + ('ḟ', 'ḟ'), + ('ḡ', 'ḡ'), + ('ḣ', 'ḣ'), + ('ḥ', 'ḥ'), + ('ḧ', 'ḧ'), + ('ḩ', 'ḩ'), + ('ḫ', 'ḫ'), + ('ḭ', 'ḭ'), + ('ḯ', 'ḯ'), + ('ḱ', 'ḱ'), + ('ḳ', 'ḳ'), + ('ḵ', 'ḵ'), + ('ḷ', 'ḷ'), + ('ḹ', 'ḹ'), + ('ḻ', 'ḻ'), + ('ḽ', 'ḽ'), + ('ḿ', 'ḿ'), + ('ṁ', 'ṁ'), + ('ṃ', 'ṃ'), + ('ṅ', 'ṅ'), + ('ṇ', 'ṇ'), + ('ṉ', 'ṉ'), + ('ṋ', 'ṋ'), + ('ṍ', 'ṍ'), + ('ṏ', 'ṏ'), + ('ṑ', 'ṑ'), + ('ṓ', 'ṓ'), + ('ṕ', 'ṕ'), + ('ṗ', 'ṗ'), + ('ṙ', 'ṙ'), + ('ṛ', 'ṛ'), + ('ṝ', 'ṝ'), + ('ṟ', 'ṟ'), + ('ṡ', 'ṡ'), + ('ṣ', 'ṣ'), + ('ṥ', 'ṥ'), + ('ṧ', 'ṧ'), + ('ṩ', 'ṩ'), + ('ṫ', 'ṫ'), + ('ṭ', 'ṭ'), + ('ṯ', 'ṯ'), + ('ṱ', 'ṱ'), + ('ṳ', 'ṳ'), + ('ṵ', 'ṵ'), + ('ṷ', 'ṷ'), + ('ṹ', 'ṹ'), + ('ṻ', 'ṻ'), + ('ṽ', 'ṽ'), + ('ṿ', 'ṿ'), + ('ẁ', 'ẁ'), + ('ẃ', 'ẃ'), + ('ẅ', 'ẅ'), + ('ẇ', 'ẇ'), + ('ẉ', 'ẉ'), + ('ẋ', 'ẋ'), + ('ẍ', 'ẍ'), + ('ẏ', 'ẏ'), + ('ẑ', 'ẑ'), + ('ẓ', 'ẓ'), + ('ẕ', 'ẝ'), + ('ẟ', 'ẟ'), + ('ạ', 'ạ'), + ('ả', 'ả'), + ('ấ', 'ấ'), + ('ầ', 'ầ'), + ('ẩ', 'ẩ'), + ('ẫ', 'ẫ'), + ('ậ', 'ậ'), + ('ắ', 'ắ'), + ('ằ', 'ằ'), + ('ẳ', 'ẳ'), + ('ẵ', 'ẵ'), + ('ặ', 'ặ'), + ('ẹ', 'ẹ'), + ('ẻ', 'ẻ'), + ('ẽ', 'ẽ'), + ('ế', 'ế'), + ('ề', 'ề'), + ('ể', 'ể'), + ('ễ', 'ễ'), + ('ệ', 'ệ'), + ('ỉ', 'ỉ'), + ('ị', 'ị'), + ('ọ', 'ọ'), + ('ỏ', 'ỏ'), + ('ố', 'ố'), + ('ồ', 'ồ'), + ('ổ', 'ổ'), + ('ỗ', 'ỗ'), + ('ộ', 'ộ'), + ('ớ', 'ớ'), + ('ờ', 'ờ'), + ('ở', 'ở'), + ('ỡ', 'ỡ'), + ('ợ', 'ợ'), + ('ụ', 'ụ'), + ('ủ', 'ủ'), + ('ứ', 'ứ'), + ('ừ', 'ừ'), + ('ử', 'ử'), + ('ữ', 'ữ'), + ('ự', 'ự'), + ('ỳ', 'ỳ'), + ('ỵ', 'ỵ'), + ('ỷ', 'ỷ'), + ('ỹ', 'ỹ'), + ('ỻ', 'ỻ'), + ('ỽ', 'ỽ'), + ('ỿ', 'ἇ'), + ('ἐ', 'ἕ'), + ('ἠ', 'ἧ'), + ('ἰ', 'ἷ'), + ('ὀ', 'ὅ'), + ('ὐ', 'ὗ'), + ('ὠ', 'ὧ'), + ('ὰ', 'ώ'), + ('ᾀ', 'ᾇ'), + ('ᾐ', 'ᾗ'), + ('ᾠ', 'ᾧ'), + ('ᾰ', 'ᾴ'), + ('ᾶ', 'ᾷ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῇ'), + ('ῐ', 'ΐ'), + ('ῖ', 'ῗ'), + ('ῠ', 'ῧ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῷ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℊ', 'ℊ'), + ('ℎ', 'ℏ'), + ('ℓ', 'ℓ'), + ('ℯ', 'ℯ'), + ('ℴ', 'ℴ'), + ('ℹ', 'ℹ'), + ('ℼ', 'ℽ'), + ('ⅆ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('ⅰ', 'ⅿ'), + ('ↄ', 'ↄ'), + ('ⓐ', 'ⓩ'), + ('ⰰ', 'ⱟ'), + ('ⱡ', 'ⱡ'), + ('ⱥ', 'ⱦ'), + ('ⱨ', 'ⱨ'), + ('ⱪ', 'ⱪ'), + ('ⱬ', 'ⱬ'), + ('ⱱ', 'ⱱ'), + ('ⱳ', 'ⱴ'), + ('ⱶ', 'ⱽ'), + ('ⲁ', 'ⲁ'), + ('ⲃ', 'ⲃ'), + ('ⲅ', 'ⲅ'), + ('ⲇ', 'ⲇ'), + ('ⲉ', 'ⲉ'), + ('ⲋ', 'ⲋ'), + ('ⲍ', 'ⲍ'), + ('ⲏ', 'ⲏ'), + ('ⲑ', 'ⲑ'), + ('ⲓ', 'ⲓ'), + ('ⲕ', 'ⲕ'), + ('ⲗ', 'ⲗ'), + ('ⲙ', 'ⲙ'), + ('ⲛ', 'ⲛ'), + ('ⲝ', 'ⲝ'), + ('ⲟ', 'ⲟ'), + ('ⲡ', 'ⲡ'), + ('ⲣ', 'ⲣ'), + ('ⲥ', 'ⲥ'), + ('ⲧ', 'ⲧ'), + ('ⲩ', 'ⲩ'), + ('ⲫ', 'ⲫ'), + ('ⲭ', 'ⲭ'), + ('ⲯ', 'ⲯ'), + ('ⲱ', 'ⲱ'), + ('ⲳ', 'ⲳ'), + ('ⲵ', 'ⲵ'), + ('ⲷ', 'ⲷ'), + ('ⲹ', 'ⲹ'), + ('ⲻ', 'ⲻ'), + ('ⲽ', 'ⲽ'), + ('ⲿ', 'ⲿ'), + ('ⳁ', 'ⳁ'), + ('ⳃ', 'ⳃ'), + ('ⳅ', 'ⳅ'), + ('ⳇ', 'ⳇ'), + ('ⳉ', 'ⳉ'), + ('ⳋ', 'ⳋ'), + ('ⳍ', 'ⳍ'), + ('ⳏ', 'ⳏ'), + ('ⳑ', 'ⳑ'), + ('ⳓ', 'ⳓ'), + ('ⳕ', 'ⳕ'), + ('ⳗ', 'ⳗ'), + ('ⳙ', 'ⳙ'), + ('ⳛ', 'ⳛ'), + ('ⳝ', 'ⳝ'), + ('ⳟ', 'ⳟ'), + ('ⳡ', 'ⳡ'), + ('ⳣ', 'ⳤ'), + ('ⳬ', 'ⳬ'), + ('ⳮ', 'ⳮ'), + ('ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ꙁ', 'ꙁ'), + ('ꙃ', 'ꙃ'), + ('ꙅ', 'ꙅ'), + ('ꙇ', 'ꙇ'), + ('ꙉ', 'ꙉ'), + ('ꙋ', 'ꙋ'), + ('ꙍ', 'ꙍ'), + ('ꙏ', 'ꙏ'), + ('ꙑ', 'ꙑ'), + ('ꙓ', 'ꙓ'), + ('ꙕ', 'ꙕ'), + ('ꙗ', 'ꙗ'), + ('ꙙ', 'ꙙ'), + ('ꙛ', 'ꙛ'), + ('ꙝ', 'ꙝ'), + ('ꙟ', 'ꙟ'), + ('ꙡ', 'ꙡ'), + ('ꙣ', 'ꙣ'), + ('ꙥ', 'ꙥ'), + ('ꙧ', 'ꙧ'), + ('ꙩ', 'ꙩ'), + ('ꙫ', 'ꙫ'), + ('ꙭ', 'ꙭ'), + ('ꚁ', 'ꚁ'), + ('ꚃ', 'ꚃ'), + ('ꚅ', 'ꚅ'), + ('ꚇ', 'ꚇ'), + ('ꚉ', 'ꚉ'), + ('ꚋ', 'ꚋ'), + ('ꚍ', 'ꚍ'), + ('ꚏ', 'ꚏ'), + ('ꚑ', 'ꚑ'), + ('ꚓ', 'ꚓ'), + ('ꚕ', 'ꚕ'), + ('ꚗ', 'ꚗ'), + ('ꚙ', 'ꚙ'), + ('ꚛ', 'ꚝ'), + ('ꜣ', 'ꜣ'), + ('ꜥ', 'ꜥ'), + ('ꜧ', 'ꜧ'), + ('ꜩ', 'ꜩ'), + ('ꜫ', 'ꜫ'), + ('ꜭ', 'ꜭ'), + ('ꜯ', 'ꜱ'), + ('ꜳ', 'ꜳ'), + ('ꜵ', 'ꜵ'), + ('ꜷ', 'ꜷ'), + ('ꜹ', 'ꜹ'), + ('ꜻ', 'ꜻ'), + ('ꜽ', 'ꜽ'), + ('ꜿ', 'ꜿ'), + ('ꝁ', 'ꝁ'), + ('ꝃ', 'ꝃ'), + ('ꝅ', 'ꝅ'), + ('ꝇ', 'ꝇ'), + ('ꝉ', 'ꝉ'), + ('ꝋ', 'ꝋ'), + ('ꝍ', 'ꝍ'), + ('ꝏ', 'ꝏ'), + ('ꝑ', 'ꝑ'), + ('ꝓ', 'ꝓ'), + ('ꝕ', 'ꝕ'), + ('ꝗ', 'ꝗ'), + ('ꝙ', 'ꝙ'), + ('ꝛ', 'ꝛ'), + ('ꝝ', 'ꝝ'), + ('ꝟ', 'ꝟ'), + ('ꝡ', 'ꝡ'), + ('ꝣ', 'ꝣ'), + ('ꝥ', 'ꝥ'), + ('ꝧ', 'ꝧ'), + ('ꝩ', 'ꝩ'), + ('ꝫ', 'ꝫ'), + ('ꝭ', 'ꝭ'), + ('ꝯ', 'ꝸ'), + ('ꝺ', 'ꝺ'), + ('ꝼ', 'ꝼ'), + ('ꝿ', 'ꝿ'), + ('ꞁ', 'ꞁ'), + ('ꞃ', 'ꞃ'), + ('ꞅ', 'ꞅ'), + ('ꞇ', 'ꞇ'), + ('ꞌ', 'ꞌ'), + ('ꞎ', 'ꞎ'), + ('ꞑ', 'ꞑ'), + ('ꞓ', 'ꞕ'), + ('ꞗ', 'ꞗ'), + ('ꞙ', 'ꞙ'), + ('ꞛ', 'ꞛ'), + ('ꞝ', 'ꞝ'), + ('ꞟ', 'ꞟ'), + ('ꞡ', 'ꞡ'), + ('ꞣ', 'ꞣ'), + ('ꞥ', 'ꞥ'), + ('ꞧ', 'ꞧ'), + ('ꞩ', 'ꞩ'), + ('ꞯ', 'ꞯ'), + ('ꞵ', 'ꞵ'), + ('ꞷ', 'ꞷ'), + ('ꞹ', 'ꞹ'), + ('ꞻ', 'ꞻ'), + ('ꞽ', 'ꞽ'), + ('ꞿ', 'ꞿ'), + ('ꟁ', 'ꟁ'), + ('ꟃ', 'ꟃ'), + ('ꟈ', 'ꟈ'), + ('ꟊ', 'ꟊ'), + ('ꟍ', 'ꟍ'), + ('ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'ꟕ'), + ('ꟗ', 'ꟗ'), + ('ꟙ', 'ꟙ'), + ('ꟛ', 'ꟛ'), + ('ꟲ', 'ꟴ'), + ('ꟶ', 'ꟶ'), + ('ꟸ', 'ꟺ'), + ('ꬰ', 'ꭚ'), + ('ꭜ', 'ꭩ'), + ('ꭰ', 'ꮿ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('a', 'z'), + ('𐐨', '𐑏'), + ('𐓘', '𐓻'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐞀', '𐞀'), + ('𐞃', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐳀', '𐳲'), + ('𐵰', '𐶅'), + ('𑣀', '𑣟'), + ('𖹠', '𖹿'), + ('𝐚', '𝐳'), + ('𝑎', '𝑔'), + ('𝑖', '𝑧'), + ('𝒂', '𝒛'), + ('𝒶', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝓏'), + ('𝓪', '𝔃'), + ('𝔞', '𝔷'), + ('𝕒', '𝕫'), + ('𝖆', '𝖟'), + ('𝖺', '𝗓'), + ('𝗮', '𝘇'), + ('𝘢', '𝘻'), + ('𝙖', '𝙯'), + ('𝚊', '𝚥'), + ('𝛂', '𝛚'), + ('𝛜', '𝛡'), + ('𝛼', '𝜔'), + ('𝜖', '𝜛'), + ('𝜶', '𝝎'), + ('𝝐', '𝝕'), + ('𝝰', '𝞈'), + ('𝞊', '𝞏'), + ('𝞪', '𝟂'), + ('𝟄', '𝟉'), + ('𝟋', '𝟋'), + ('𝼀', '𝼉'), + ('𝼋', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞤢', '𞥃'), +]; + +pub const NUMERIC: &'static [(char, char)] = &[ + ('0', '9'), + ('\u{600}', '\u{605}'), + ('٠', '٩'), + ('٫', '٬'), + ('\u{6dd}', '\u{6dd}'), + ('۰', '۹'), + ('߀', '߉'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const OLETTER: &'static [(char, char)] = &[ + ('ƻ', 'ƻ'), + ('ǀ', 'ǃ'), + ('ʔ', 'ʔ'), + ('ʹ', 'ʿ'), + ('ˆ', 'ˑ'), + ('ˬ', 'ˬ'), + ('ˮ', 'ˮ'), + ('ʹ', 'ʹ'), + ('ՙ', 'ՙ'), + ('א', 'ת'), + ('ׯ', '׳'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('ܐ', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ก', 'ะ'), + ('า', 'ำ'), + ('เ', 'ๆ'), + ('ກ', 'ຂ'), + ('ຄ', 'ຄ'), + ('ຆ', 'ຊ'), + ('ຌ', 'ຣ'), + ('ລ', 'ລ'), + ('ວ', 'ະ'), + ('າ', 'ຳ'), + ('ຽ', 'ຽ'), + ('ເ', 'ໄ'), + ('ໆ', 'ໆ'), + ('ໜ', 'ໟ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('က', 'ဪ'), + ('ဿ', 'ဿ'), + ('ၐ', 'ၕ'), + ('ၚ', 'ၝ'), + ('ၡ', 'ၡ'), + ('ၥ', 'ၦ'), + ('ၮ', 'ၰ'), + ('ၵ', 'ႁ'), + ('ႎ', 'ႎ'), + ('ა', 'ჺ'), + ('ჽ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ក', 'ឳ'), + ('ៗ', 'ៗ'), + ('ៜ', 'ៜ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᥐ', 'ᥭ'), + ('ᥰ', 'ᥴ'), + ('ᦀ', 'ᦫ'), + ('ᦰ', 'ᧉ'), + ('ᨀ', 'ᨖ'), + ('ᨠ', 'ᩔ'), + ('ᪧ', 'ᪧ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ℵ', 'ℸ'), + ('ↀ', 'ↂ'), + ('ↅ', 'ↈ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '〇'), + ('〡', '〩'), + ('〱', '〵'), + ('〸', '〼'), + ('ぁ', 'ゖ'), + ('ゝ', 'ゟ'), + ('ァ', 'ヺ'), + ('ー', 'ヿ'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ㇰ', 'ㇿ'), + ('㐀', '䶿'), + ('一', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('ꙮ', 'ꙮ'), + ('ꙿ', 'ꙿ'), + ('ꚠ', 'ꛯ'), + ('ꜗ', 'ꜟ'), + ('ꞈ', 'ꞈ'), + ('ꞏ', 'ꞏ'), + ('ꟷ', 'ꟷ'), + ('ꟻ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꧠ', 'ꧤ'), + ('ꧦ', 'ꧯ'), + ('ꧺ', 'ꧾ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꩠ', 'ꩶ'), + ('ꩺ', 'ꩺ'), + ('ꩾ', 'ꪯ'), + ('ꪱ', 'ꪱ'), + ('ꪵ', 'ꪶ'), + ('ꪹ', 'ꪽ'), + ('ꫀ', 'ꫀ'), + ('ꫂ', 'ꫂ'), + ('ꫛ', 'ꫝ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꯀ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('豈', '舘'), + ('並', '龎'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('ヲ', 'ン'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐑐', '𐒝'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞁', '𐞂'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐴀', '𐴣'), + ('𐵊', '𐵏'), + ('𐵯', '𐵯'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑜀', '𑜚'), + ('𑝀', '𑝆'), + ('𑠀', '𑠫'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𗀀', '𘟷'), + ('𘠀', '𘳕'), + ('𘳿', '𘴈'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛄢'), + ('𛄲', '𛄲'), + ('𛅐', '𛅒'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), + ('𛅰', '𛋻'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝼊', '𝼊'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('𠀀', '𪛟'), + ('𪜀', '𫜹'), + ('𫝀', '𫠝'), + ('𫠠', '𬺡'), + ('𬺰', '𮯠'), + ('𮯰', '𮹝'), + ('丽', '𪘀'), + ('𰀀', '𱍊'), + ('𱍐', '𲎯'), +]; + +pub const SCONTINUE: &'static [(char, char)] = &[ + (',', '-'), + (':', ';'), + (';', ';'), + ('՝', '՝'), + ('،', '؍'), + ('߸', '߸'), + ('᠂', '᠂'), + ('᠈', '᠈'), + ('–', '—'), + ('、', '、'), + ('︐', '︑'), + ('︓', '︔'), + ('︱', '︲'), + ('﹐', '﹑'), + ('﹔', '﹕'), + ('﹘', '﹘'), + ('﹣', '﹣'), + (',', '-'), + (':', ';'), + ('、', '、'), +]; + +pub const STERM: &'static [(char, char)] = &[ + ('!', '!'), + ('?', '?'), + ('։', '։'), + ('؝', '؟'), + ('۔', '۔'), + ('܀', '܂'), + ('߹', '߹'), + ('࠷', '࠷'), + ('࠹', '࠹'), + ('࠽', '࠾'), + ('।', '॥'), + ('၊', '။'), + ('።', '።'), + ('፧', '፨'), + ('᙮', '᙮'), + ('᜵', '᜶'), + ('។', '៕'), + ('᠃', '᠃'), + ('᠉', '᠉'), + ('᥄', '᥅'), + ('᪨', '᪫'), + ('᭎', '᭏'), + ('᭚', '᭛'), + ('᭞', '᭟'), + ('᭽', '᭿'), + ('᰻', '᰼'), + ('᱾', '᱿'), + ('‼', '‽'), + ('⁇', '⁉'), + ('⳹', '⳻'), + ('⸮', '⸮'), + ('⸼', '⸼'), + ('⹓', '⹔'), + ('。', '。'), + ('꓿', '꓿'), + ('꘎', '꘏'), + ('꛳', '꛳'), + ('꛷', '꛷'), + ('꡶', '꡷'), + ('꣎', '꣏'), + ('꤯', '꤯'), + ('꧈', '꧉'), + ('꩝', '꩟'), + ('꫰', '꫱'), + ('꯫', '꯫'), + ('︒', '︒'), + ('︕', '︖'), + ('﹖', '﹗'), + ('!', '!'), + ('?', '?'), + ('。', '。'), + ('𐩖', '𐩗'), + ('𐽕', '𐽙'), + ('𐾆', '𐾉'), + ('𑁇', '𑁈'), + ('𑂾', '𑃁'), + ('𑅁', '𑅃'), + ('𑇅', '𑇆'), + ('𑇍', '𑇍'), + ('𑇞', '𑇟'), + ('𑈸', '𑈹'), + ('𑈻', '𑈼'), + ('𑊩', '𑊩'), + ('𑏔', '𑏕'), + ('𑑋', '𑑌'), + ('𑗂', '𑗃'), + ('𑗉', '𑗗'), + ('𑙁', '𑙂'), + ('𑜼', '𑜾'), + ('𑥄', '𑥄'), + ('𑥆', '𑥆'), + ('𑩂', '𑩃'), + ('𑪛', '𑪜'), + ('𑱁', '𑱂'), + ('𑻷', '𑻸'), + ('𑽃', '𑽄'), + ('𖩮', '𖩯'), + ('𖫵', '𖫵'), + ('𖬷', '𖬸'), + ('𖭄', '𖭄'), + ('𖵮', '𖵯'), + ('𖺘', '𖺘'), + ('𛲟', '𛲟'), + ('𝪈', '𝪈'), +]; + +pub const SEP: &'static [(char, char)] = + &[('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; + +pub const SP: &'static [(char, char)] = &[ + ('\t', '\t'), + ('\u{b}', '\u{c}'), + (' ', ' '), + ('\u{a0}', '\u{a0}'), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{200a}'), + ('\u{202f}', '\u{202f}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const UPPER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('À', 'Ö'), + ('Ø', 'Þ'), + ('Ā', 'Ā'), + ('Ă', 'Ă'), + ('Ą', 'Ą'), + ('Ć', 'Ć'), + ('Ĉ', 'Ĉ'), + ('Ċ', 'Ċ'), + ('Č', 'Č'), + ('Ď', 'Ď'), + ('Đ', 'Đ'), + ('Ē', 'Ē'), + ('Ĕ', 'Ĕ'), + ('Ė', 'Ė'), + ('Ę', 'Ę'), + ('Ě', 'Ě'), + ('Ĝ', 'Ĝ'), + ('Ğ', 'Ğ'), + ('Ġ', 'Ġ'), + ('Ģ', 'Ģ'), + ('Ĥ', 'Ĥ'), + ('Ħ', 'Ħ'), + ('Ĩ', 'Ĩ'), + ('Ī', 'Ī'), + ('Ĭ', 'Ĭ'), + ('Į', 'Į'), + ('İ', 'İ'), + ('IJ', 'IJ'), + ('Ĵ', 'Ĵ'), + ('Ķ', 'Ķ'), + ('Ĺ', 'Ĺ'), + ('Ļ', 'Ļ'), + ('Ľ', 'Ľ'), + ('Ŀ', 'Ŀ'), + ('Ł', 'Ł'), + ('Ń', 'Ń'), + ('Ņ', 'Ņ'), + ('Ň', 'Ň'), + ('Ŋ', 'Ŋ'), + ('Ō', 'Ō'), + ('Ŏ', 'Ŏ'), + ('Ő', 'Ő'), + ('Œ', 'Œ'), + ('Ŕ', 'Ŕ'), + ('Ŗ', 'Ŗ'), + ('Ř', 'Ř'), + ('Ś', 'Ś'), + ('Ŝ', 'Ŝ'), + ('Ş', 'Ş'), + ('Š', 'Š'), + ('Ţ', 'Ţ'), + ('Ť', 'Ť'), + ('Ŧ', 'Ŧ'), + ('Ũ', 'Ũ'), + ('Ū', 'Ū'), + ('Ŭ', 'Ŭ'), + ('Ů', 'Ů'), + ('Ű', 'Ű'), + ('Ų', 'Ų'), + ('Ŵ', 'Ŵ'), + ('Ŷ', 'Ŷ'), + ('Ÿ', 'Ź'), + ('Ż', 'Ż'), + ('Ž', 'Ž'), + ('Ɓ', 'Ƃ'), + ('Ƅ', 'Ƅ'), + ('Ɔ', 'Ƈ'), + ('Ɖ', 'Ƌ'), + ('Ǝ', 'Ƒ'), + ('Ɠ', 'Ɣ'), + ('Ɩ', 'Ƙ'), + ('Ɯ', 'Ɲ'), + ('Ɵ', 'Ơ'), + ('Ƣ', 'Ƣ'), + ('Ƥ', 'Ƥ'), + ('Ʀ', 'Ƨ'), + ('Ʃ', 'Ʃ'), + ('Ƭ', 'Ƭ'), + ('Ʈ', 'Ư'), + ('Ʊ', 'Ƴ'), + ('Ƶ', 'Ƶ'), + ('Ʒ', 'Ƹ'), + ('Ƽ', 'Ƽ'), + ('DŽ', 'Dž'), + ('LJ', 'Lj'), + ('NJ', 'Nj'), + ('Ǎ', 'Ǎ'), + ('Ǐ', 'Ǐ'), + ('Ǒ', 'Ǒ'), + ('Ǔ', 'Ǔ'), + ('Ǖ', 'Ǖ'), + ('Ǘ', 'Ǘ'), + ('Ǚ', 'Ǚ'), + ('Ǜ', 'Ǜ'), + ('Ǟ', 'Ǟ'), + ('Ǡ', 'Ǡ'), + ('Ǣ', 'Ǣ'), + ('Ǥ', 'Ǥ'), + ('Ǧ', 'Ǧ'), + ('Ǩ', 'Ǩ'), + ('Ǫ', 'Ǫ'), + ('Ǭ', 'Ǭ'), + ('Ǯ', 'Ǯ'), + ('DZ', 'Dz'), + ('Ǵ', 'Ǵ'), + ('Ƕ', 'Ǹ'), + ('Ǻ', 'Ǻ'), + ('Ǽ', 'Ǽ'), + ('Ǿ', 'Ǿ'), + ('Ȁ', 'Ȁ'), + ('Ȃ', 'Ȃ'), + ('Ȅ', 'Ȅ'), + ('Ȇ', 'Ȇ'), + ('Ȉ', 'Ȉ'), + ('Ȋ', 'Ȋ'), + ('Ȍ', 'Ȍ'), + ('Ȏ', 'Ȏ'), + ('Ȑ', 'Ȑ'), + ('Ȓ', 'Ȓ'), + ('Ȕ', 'Ȕ'), + ('Ȗ', 'Ȗ'), + ('Ș', 'Ș'), + ('Ț', 'Ț'), + ('Ȝ', 'Ȝ'), + ('Ȟ', 'Ȟ'), + ('Ƞ', 'Ƞ'), + ('Ȣ', 'Ȣ'), + ('Ȥ', 'Ȥ'), + ('Ȧ', 'Ȧ'), + ('Ȩ', 'Ȩ'), + ('Ȫ', 'Ȫ'), + ('Ȭ', 'Ȭ'), + ('Ȯ', 'Ȯ'), + ('Ȱ', 'Ȱ'), + ('Ȳ', 'Ȳ'), + ('Ⱥ', 'Ȼ'), + ('Ƚ', 'Ⱦ'), + ('Ɂ', 'Ɂ'), + ('Ƀ', 'Ɇ'), + ('Ɉ', 'Ɉ'), + ('Ɋ', 'Ɋ'), + ('Ɍ', 'Ɍ'), + ('Ɏ', 'Ɏ'), + ('Ͱ', 'Ͱ'), + ('Ͳ', 'Ͳ'), + ('Ͷ', 'Ͷ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ώ'), + ('Α', 'Ρ'), + ('Σ', 'Ϋ'), + ('Ϗ', 'Ϗ'), + ('ϒ', 'ϔ'), + ('Ϙ', 'Ϙ'), + ('Ϛ', 'Ϛ'), + ('Ϝ', 'Ϝ'), + ('Ϟ', 'Ϟ'), + ('Ϡ', 'Ϡ'), + ('Ϣ', 'Ϣ'), + ('Ϥ', 'Ϥ'), + ('Ϧ', 'Ϧ'), + ('Ϩ', 'Ϩ'), + ('Ϫ', 'Ϫ'), + ('Ϭ', 'Ϭ'), + ('Ϯ', 'Ϯ'), + ('ϴ', 'ϴ'), + ('Ϸ', 'Ϸ'), + ('Ϲ', 'Ϻ'), + ('Ͻ', 'Я'), + ('Ѡ', 'Ѡ'), + ('Ѣ', 'Ѣ'), + ('Ѥ', 'Ѥ'), + ('Ѧ', 'Ѧ'), + ('Ѩ', 'Ѩ'), + ('Ѫ', 'Ѫ'), + ('Ѭ', 'Ѭ'), + ('Ѯ', 'Ѯ'), + ('Ѱ', 'Ѱ'), + ('Ѳ', 'Ѳ'), + ('Ѵ', 'Ѵ'), + ('Ѷ', 'Ѷ'), + ('Ѹ', 'Ѹ'), + ('Ѻ', 'Ѻ'), + ('Ѽ', 'Ѽ'), + ('Ѿ', 'Ѿ'), + ('Ҁ', 'Ҁ'), + ('Ҋ', 'Ҋ'), + ('Ҍ', 'Ҍ'), + ('Ҏ', 'Ҏ'), + ('Ґ', 'Ґ'), + ('Ғ', 'Ғ'), + ('Ҕ', 'Ҕ'), + ('Җ', 'Җ'), + ('Ҙ', 'Ҙ'), + ('Қ', 'Қ'), + ('Ҝ', 'Ҝ'), + ('Ҟ', 'Ҟ'), + ('Ҡ', 'Ҡ'), + ('Ң', 'Ң'), + ('Ҥ', 'Ҥ'), + ('Ҧ', 'Ҧ'), + ('Ҩ', 'Ҩ'), + ('Ҫ', 'Ҫ'), + ('Ҭ', 'Ҭ'), + ('Ү', 'Ү'), + ('Ұ', 'Ұ'), + ('Ҳ', 'Ҳ'), + ('Ҵ', 'Ҵ'), + ('Ҷ', 'Ҷ'), + ('Ҹ', 'Ҹ'), + ('Һ', 'Һ'), + ('Ҽ', 'Ҽ'), + ('Ҿ', 'Ҿ'), + ('Ӏ', 'Ӂ'), + ('Ӄ', 'Ӄ'), + ('Ӆ', 'Ӆ'), + ('Ӈ', 'Ӈ'), + ('Ӊ', 'Ӊ'), + ('Ӌ', 'Ӌ'), + ('Ӎ', 'Ӎ'), + ('Ӑ', 'Ӑ'), + ('Ӓ', 'Ӓ'), + ('Ӕ', 'Ӕ'), + ('Ӗ', 'Ӗ'), + ('Ә', 'Ә'), + ('Ӛ', 'Ӛ'), + ('Ӝ', 'Ӝ'), + ('Ӟ', 'Ӟ'), + ('Ӡ', 'Ӡ'), + ('Ӣ', 'Ӣ'), + ('Ӥ', 'Ӥ'), + ('Ӧ', 'Ӧ'), + ('Ө', 'Ө'), + ('Ӫ', 'Ӫ'), + ('Ӭ', 'Ӭ'), + ('Ӯ', 'Ӯ'), + ('Ӱ', 'Ӱ'), + ('Ӳ', 'Ӳ'), + ('Ӵ', 'Ӵ'), + ('Ӷ', 'Ӷ'), + ('Ӹ', 'Ӹ'), + ('Ӻ', 'Ӻ'), + ('Ӽ', 'Ӽ'), + ('Ӿ', 'Ӿ'), + ('Ԁ', 'Ԁ'), + ('Ԃ', 'Ԃ'), + ('Ԅ', 'Ԅ'), + ('Ԇ', 'Ԇ'), + ('Ԉ', 'Ԉ'), + ('Ԋ', 'Ԋ'), + ('Ԍ', 'Ԍ'), + ('Ԏ', 'Ԏ'), + ('Ԑ', 'Ԑ'), + ('Ԓ', 'Ԓ'), + ('Ԕ', 'Ԕ'), + ('Ԗ', 'Ԗ'), + ('Ԙ', 'Ԙ'), + ('Ԛ', 'Ԛ'), + ('Ԝ', 'Ԝ'), + ('Ԟ', 'Ԟ'), + ('Ԡ', 'Ԡ'), + ('Ԣ', 'Ԣ'), + ('Ԥ', 'Ԥ'), + ('Ԧ', 'Ԧ'), + ('Ԩ', 'Ԩ'), + ('Ԫ', 'Ԫ'), + ('Ԭ', 'Ԭ'), + ('Ԯ', 'Ԯ'), + ('Ա', 'Ֆ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('Ꭰ', 'Ᏽ'), + ('Ᲊ', 'Ᲊ'), + ('Ḁ', 'Ḁ'), + ('Ḃ', 'Ḃ'), + ('Ḅ', 'Ḅ'), + ('Ḇ', 'Ḇ'), + ('Ḉ', 'Ḉ'), + ('Ḋ', 'Ḋ'), + ('Ḍ', 'Ḍ'), + ('Ḏ', 'Ḏ'), + ('Ḑ', 'Ḑ'), + ('Ḓ', 'Ḓ'), + ('Ḕ', 'Ḕ'), + ('Ḗ', 'Ḗ'), + ('Ḙ', 'Ḙ'), + ('Ḛ', 'Ḛ'), + ('Ḝ', 'Ḝ'), + ('Ḟ', 'Ḟ'), + ('Ḡ', 'Ḡ'), + ('Ḣ', 'Ḣ'), + ('Ḥ', 'Ḥ'), + ('Ḧ', 'Ḧ'), + ('Ḩ', 'Ḩ'), + ('Ḫ', 'Ḫ'), + ('Ḭ', 'Ḭ'), + ('Ḯ', 'Ḯ'), + ('Ḱ', 'Ḱ'), + ('Ḳ', 'Ḳ'), + ('Ḵ', 'Ḵ'), + ('Ḷ', 'Ḷ'), + ('Ḹ', 'Ḹ'), + ('Ḻ', 'Ḻ'), + ('Ḽ', 'Ḽ'), + ('Ḿ', 'Ḿ'), + ('Ṁ', 'Ṁ'), + ('Ṃ', 'Ṃ'), + ('Ṅ', 'Ṅ'), + ('Ṇ', 'Ṇ'), + ('Ṉ', 'Ṉ'), + ('Ṋ', 'Ṋ'), + ('Ṍ', 'Ṍ'), + ('Ṏ', 'Ṏ'), + ('Ṑ', 'Ṑ'), + ('Ṓ', 'Ṓ'), + ('Ṕ', 'Ṕ'), + ('Ṗ', 'Ṗ'), + ('Ṙ', 'Ṙ'), + ('Ṛ', 'Ṛ'), + ('Ṝ', 'Ṝ'), + ('Ṟ', 'Ṟ'), + ('Ṡ', 'Ṡ'), + ('Ṣ', 'Ṣ'), + ('Ṥ', 'Ṥ'), + ('Ṧ', 'Ṧ'), + ('Ṩ', 'Ṩ'), + ('Ṫ', 'Ṫ'), + ('Ṭ', 'Ṭ'), + ('Ṯ', 'Ṯ'), + ('Ṱ', 'Ṱ'), + ('Ṳ', 'Ṳ'), + ('Ṵ', 'Ṵ'), + ('Ṷ', 'Ṷ'), + ('Ṹ', 'Ṹ'), + ('Ṻ', 'Ṻ'), + ('Ṽ', 'Ṽ'), + ('Ṿ', 'Ṿ'), + ('Ẁ', 'Ẁ'), + ('Ẃ', 'Ẃ'), + ('Ẅ', 'Ẅ'), + ('Ẇ', 'Ẇ'), + ('Ẉ', 'Ẉ'), + ('Ẋ', 'Ẋ'), + ('Ẍ', 'Ẍ'), + ('Ẏ', 'Ẏ'), + ('Ẑ', 'Ẑ'), + ('Ẓ', 'Ẓ'), + ('Ẕ', 'Ẕ'), + ('ẞ', 'ẞ'), + ('Ạ', 'Ạ'), + ('Ả', 'Ả'), + ('Ấ', 'Ấ'), + ('Ầ', 'Ầ'), + ('Ẩ', 'Ẩ'), + ('Ẫ', 'Ẫ'), + ('Ậ', 'Ậ'), + ('Ắ', 'Ắ'), + ('Ằ', 'Ằ'), + ('Ẳ', 'Ẳ'), + ('Ẵ', 'Ẵ'), + ('Ặ', 'Ặ'), + ('Ẹ', 'Ẹ'), + ('Ẻ', 'Ẻ'), + ('Ẽ', 'Ẽ'), + ('Ế', 'Ế'), + ('Ề', 'Ề'), + ('Ể', 'Ể'), + ('Ễ', 'Ễ'), + ('Ệ', 'Ệ'), + ('Ỉ', 'Ỉ'), + ('Ị', 'Ị'), + ('Ọ', 'Ọ'), + ('Ỏ', 'Ỏ'), + ('Ố', 'Ố'), + ('Ồ', 'Ồ'), + ('Ổ', 'Ổ'), + ('Ỗ', 'Ỗ'), + ('Ộ', 'Ộ'), + ('Ớ', 'Ớ'), + ('Ờ', 'Ờ'), + ('Ở', 'Ở'), + ('Ỡ', 'Ỡ'), + ('Ợ', 'Ợ'), + ('Ụ', 'Ụ'), + ('Ủ', 'Ủ'), + ('Ứ', 'Ứ'), + ('Ừ', 'Ừ'), + ('Ử', 'Ử'), + ('Ữ', 'Ữ'), + ('Ự', 'Ự'), + ('Ỳ', 'Ỳ'), + ('Ỵ', 'Ỵ'), + ('Ỷ', 'Ỷ'), + ('Ỹ', 'Ỹ'), + ('Ỻ', 'Ỻ'), + ('Ỽ', 'Ỽ'), + ('Ỿ', 'Ỿ'), + ('Ἀ', 'Ἇ'), + ('Ἐ', 'Ἕ'), + ('Ἠ', 'Ἧ'), + ('Ἰ', 'Ἷ'), + ('Ὀ', 'Ὅ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'Ὗ'), + ('Ὠ', 'Ὧ'), + ('ᾈ', 'ᾏ'), + ('ᾘ', 'ᾟ'), + ('ᾨ', 'ᾯ'), + ('Ᾰ', 'ᾼ'), + ('Ὲ', 'ῌ'), + ('Ῐ', 'Ί'), + ('Ῠ', 'Ῥ'), + ('Ὸ', 'ῼ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℋ', 'ℍ'), + ('ℐ', 'ℒ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℰ', 'ℳ'), + ('ℾ', 'ℿ'), + ('ⅅ', 'ⅅ'), + ('Ⅰ', 'Ⅿ'), + ('Ↄ', 'Ↄ'), + ('Ⓐ', 'Ⓩ'), + ('Ⰰ', 'Ⱟ'), + ('Ⱡ', 'Ⱡ'), + ('Ɫ', 'Ɽ'), + ('Ⱨ', 'Ⱨ'), + ('Ⱪ', 'Ⱪ'), + ('Ⱬ', 'Ⱬ'), + ('Ɑ', 'Ɒ'), + ('Ⱳ', 'Ⱳ'), + ('Ⱶ', 'Ⱶ'), + ('Ȿ', 'Ⲁ'), + ('Ⲃ', 'Ⲃ'), + ('Ⲅ', 'Ⲅ'), + ('Ⲇ', 'Ⲇ'), + ('Ⲉ', 'Ⲉ'), + ('Ⲋ', 'Ⲋ'), + ('Ⲍ', 'Ⲍ'), + ('Ⲏ', 'Ⲏ'), + ('Ⲑ', 'Ⲑ'), + ('Ⲓ', 'Ⲓ'), + ('Ⲕ', 'Ⲕ'), + ('Ⲗ', 'Ⲗ'), + ('Ⲙ', 'Ⲙ'), + ('Ⲛ', 'Ⲛ'), + ('Ⲝ', 'Ⲝ'), + ('Ⲟ', 'Ⲟ'), + ('Ⲡ', 'Ⲡ'), + ('Ⲣ', 'Ⲣ'), + ('Ⲥ', 'Ⲥ'), + ('Ⲧ', 'Ⲧ'), + ('Ⲩ', 'Ⲩ'), + ('Ⲫ', 'Ⲫ'), + ('Ⲭ', 'Ⲭ'), + ('Ⲯ', 'Ⲯ'), + ('Ⲱ', 'Ⲱ'), + ('Ⲳ', 'Ⲳ'), + ('Ⲵ', 'Ⲵ'), + ('Ⲷ', 'Ⲷ'), + ('Ⲹ', 'Ⲹ'), + ('Ⲻ', 'Ⲻ'), + ('Ⲽ', 'Ⲽ'), + ('Ⲿ', 'Ⲿ'), + ('Ⳁ', 'Ⳁ'), + ('Ⳃ', 'Ⳃ'), + ('Ⳅ', 'Ⳅ'), + ('Ⳇ', 'Ⳇ'), + ('Ⳉ', 'Ⳉ'), + ('Ⳋ', 'Ⳋ'), + ('Ⳍ', 'Ⳍ'), + ('Ⳏ', 'Ⳏ'), + ('Ⳑ', 'Ⳑ'), + ('Ⳓ', 'Ⳓ'), + ('Ⳕ', 'Ⳕ'), + ('Ⳗ', 'Ⳗ'), + ('Ⳙ', 'Ⳙ'), + ('Ⳛ', 'Ⳛ'), + ('Ⳝ', 'Ⳝ'), + ('Ⳟ', 'Ⳟ'), + ('Ⳡ', 'Ⳡ'), + ('Ⳣ', 'Ⳣ'), + ('Ⳬ', 'Ⳬ'), + ('Ⳮ', 'Ⳮ'), + ('Ⳳ', 'Ⳳ'), + ('Ꙁ', 'Ꙁ'), + ('Ꙃ', 'Ꙃ'), + ('Ꙅ', 'Ꙅ'), + ('Ꙇ', 'Ꙇ'), + ('Ꙉ', 'Ꙉ'), + ('Ꙋ', 'Ꙋ'), + ('Ꙍ', 'Ꙍ'), + ('Ꙏ', 'Ꙏ'), + ('Ꙑ', 'Ꙑ'), + ('Ꙓ', 'Ꙓ'), + ('Ꙕ', 'Ꙕ'), + ('Ꙗ', 'Ꙗ'), + ('Ꙙ', 'Ꙙ'), + ('Ꙛ', 'Ꙛ'), + ('Ꙝ', 'Ꙝ'), + ('Ꙟ', 'Ꙟ'), + ('Ꙡ', 'Ꙡ'), + ('Ꙣ', 'Ꙣ'), + ('Ꙥ', 'Ꙥ'), + ('Ꙧ', 'Ꙧ'), + ('Ꙩ', 'Ꙩ'), + ('Ꙫ', 'Ꙫ'), + ('Ꙭ', 'Ꙭ'), + ('Ꚁ', 'Ꚁ'), + ('Ꚃ', 'Ꚃ'), + ('Ꚅ', 'Ꚅ'), + ('Ꚇ', 'Ꚇ'), + ('Ꚉ', 'Ꚉ'), + ('Ꚋ', 'Ꚋ'), + ('Ꚍ', 'Ꚍ'), + ('Ꚏ', 'Ꚏ'), + ('Ꚑ', 'Ꚑ'), + ('Ꚓ', 'Ꚓ'), + ('Ꚕ', 'Ꚕ'), + ('Ꚗ', 'Ꚗ'), + ('Ꚙ', 'Ꚙ'), + ('Ꚛ', 'Ꚛ'), + ('Ꜣ', 'Ꜣ'), + ('Ꜥ', 'Ꜥ'), + ('Ꜧ', 'Ꜧ'), + ('Ꜩ', 'Ꜩ'), + ('Ꜫ', 'Ꜫ'), + ('Ꜭ', 'Ꜭ'), + ('Ꜯ', 'Ꜯ'), + ('Ꜳ', 'Ꜳ'), + ('Ꜵ', 'Ꜵ'), + ('Ꜷ', 'Ꜷ'), + ('Ꜹ', 'Ꜹ'), + ('Ꜻ', 'Ꜻ'), + ('Ꜽ', 'Ꜽ'), + ('Ꜿ', 'Ꜿ'), + ('Ꝁ', 'Ꝁ'), + ('Ꝃ', 'Ꝃ'), + ('Ꝅ', 'Ꝅ'), + ('Ꝇ', 'Ꝇ'), + ('Ꝉ', 'Ꝉ'), + ('Ꝋ', 'Ꝋ'), + ('Ꝍ', 'Ꝍ'), + ('Ꝏ', 'Ꝏ'), + ('Ꝑ', 'Ꝑ'), + ('Ꝓ', 'Ꝓ'), + ('Ꝕ', 'Ꝕ'), + ('Ꝗ', 'Ꝗ'), + ('Ꝙ', 'Ꝙ'), + ('Ꝛ', 'Ꝛ'), + ('Ꝝ', 'Ꝝ'), + ('Ꝟ', 'Ꝟ'), + ('Ꝡ', 'Ꝡ'), + ('Ꝣ', 'Ꝣ'), + ('Ꝥ', 'Ꝥ'), + ('Ꝧ', 'Ꝧ'), + ('Ꝩ', 'Ꝩ'), + ('Ꝫ', 'Ꝫ'), + ('Ꝭ', 'Ꝭ'), + ('Ꝯ', 'Ꝯ'), + ('Ꝺ', 'Ꝺ'), + ('Ꝼ', 'Ꝼ'), + ('Ᵹ', 'Ꝿ'), + ('Ꞁ', 'Ꞁ'), + ('Ꞃ', 'Ꞃ'), + ('Ꞅ', 'Ꞅ'), + ('Ꞇ', 'Ꞇ'), + ('Ꞌ', 'Ꞌ'), + ('Ɥ', 'Ɥ'), + ('Ꞑ', 'Ꞑ'), + ('Ꞓ', 'Ꞓ'), + ('Ꞗ', 'Ꞗ'), + ('Ꞙ', 'Ꞙ'), + ('Ꞛ', 'Ꞛ'), + ('Ꞝ', 'Ꞝ'), + ('Ꞟ', 'Ꞟ'), + ('Ꞡ', 'Ꞡ'), + ('Ꞣ', 'Ꞣ'), + ('Ꞥ', 'Ꞥ'), + ('Ꞧ', 'Ꞧ'), + ('Ꞩ', 'Ꞩ'), + ('Ɦ', 'Ɪ'), + ('Ʞ', 'Ꞵ'), + ('Ꞷ', 'Ꞷ'), + ('Ꞹ', 'Ꞹ'), + ('Ꞻ', 'Ꞻ'), + ('Ꞽ', 'Ꞽ'), + ('Ꞿ', 'Ꞿ'), + ('Ꟁ', 'Ꟁ'), + ('Ꟃ', 'Ꟃ'), + ('Ꞔ', 'Ꟈ'), + ('Ꟊ', 'Ꟊ'), + ('Ɤ', 'Ꟍ'), + ('Ꟑ', 'Ꟑ'), + ('Ꟗ', 'Ꟗ'), + ('Ꟙ', 'Ꟙ'), + ('Ꟛ', 'Ꟛ'), + ('Ƛ', 'Ƛ'), + ('Ꟶ', 'Ꟶ'), + ('A', 'Z'), + ('𐐀', '𐐧'), + ('𐒰', '𐓓'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐲀', '𐲲'), + ('𐵐', '𐵥'), + ('𑢠', '𑢿'), + ('𖹀', '𖹟'), + ('𝐀', '𝐙'), + ('𝐴', '𝑍'), + ('𝑨', '𝒁'), + ('𝒜', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒵'), + ('𝓐', '𝓩'), + ('𝔄', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔸', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕬', '𝖅'), + ('𝖠', '𝖹'), + ('𝗔', '𝗭'), + ('𝘈', '𝘡'), + ('𝘼', '𝙕'), + ('𝙰', '𝚉'), + ('𝚨', '𝛀'), + ('𝛢', '𝛺'), + ('𝜜', '𝜴'), + ('𝝖', '𝝮'), + ('𝞐', '𝞨'), + ('𝟊', '𝟊'), + ('𞤀', '𞤡'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/word_break.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/word_break.rs new file mode 100644 index 0000000000000000000000000000000000000000..b764d34ac724513f540b874fcf6a31ec637a387c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/unicode_tables/word_break.rs @@ -0,0 +1,1152 @@ +// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: +// +// ucd-generate word-break ucd-16.0.0 --chars +// +// Unicode version: 16.0.0. +// +// ucd-generate 0.3.1 is available on crates.io. + +pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] = &[ + ("ALetter", ALETTER), + ("CR", CR), + ("Double_Quote", DOUBLE_QUOTE), + ("Extend", EXTEND), + ("ExtendNumLet", EXTENDNUMLET), + ("Format", FORMAT), + ("Hebrew_Letter", HEBREW_LETTER), + ("Katakana", KATAKANA), + ("LF", LF), + ("MidLetter", MIDLETTER), + ("MidNum", MIDNUM), + ("MidNumLet", MIDNUMLET), + ("Newline", NEWLINE), + ("Numeric", NUMERIC), + ("Regional_Indicator", REGIONAL_INDICATOR), + ("Single_Quote", SINGLE_QUOTE), + ("WSegSpace", WSEGSPACE), + ("ZWJ", ZWJ), +]; + +pub const ALETTER: &'static [(char, char)] = &[ + ('A', 'Z'), + ('a', 'z'), + ('ª', 'ª'), + ('µ', 'µ'), + ('º', 'º'), + ('À', 'Ö'), + ('Ø', 'ö'), + ('ø', '˗'), + ('˞', '˿'), + ('Ͱ', 'ʹ'), + ('Ͷ', 'ͷ'), + ('ͺ', 'ͽ'), + ('Ϳ', 'Ϳ'), + ('Ά', 'Ά'), + ('Έ', 'Ί'), + ('Ό', 'Ό'), + ('Ύ', 'Ρ'), + ('Σ', 'ϵ'), + ('Ϸ', 'ҁ'), + ('Ҋ', 'ԯ'), + ('Ա', 'Ֆ'), + ('ՙ', '՜'), + ('՞', '՞'), + ('ՠ', 'ֈ'), + ('֊', '֊'), + ('׳', '׳'), + ('ؠ', 'ي'), + ('ٮ', 'ٯ'), + ('ٱ', 'ۓ'), + ('ە', 'ە'), + ('ۥ', 'ۦ'), + ('ۮ', 'ۯ'), + ('ۺ', 'ۼ'), + ('ۿ', 'ۿ'), + ('\u{70f}', 'ܐ'), + ('ܒ', 'ܯ'), + ('ݍ', 'ޥ'), + ('ޱ', 'ޱ'), + ('ߊ', 'ߪ'), + ('ߴ', 'ߵ'), + ('ߺ', 'ߺ'), + ('ࠀ', 'ࠕ'), + ('ࠚ', 'ࠚ'), + ('ࠤ', 'ࠤ'), + ('ࠨ', 'ࠨ'), + ('ࡀ', 'ࡘ'), + ('ࡠ', 'ࡪ'), + ('ࡰ', 'ࢇ'), + ('ࢉ', 'ࢎ'), + ('ࢠ', 'ࣉ'), + ('ऄ', 'ह'), + ('ऽ', 'ऽ'), + ('ॐ', 'ॐ'), + ('क़', 'ॡ'), + ('ॱ', 'ঀ'), + ('অ', 'ঌ'), + ('এ', 'ঐ'), + ('ও', 'ন'), + ('প', 'র'), + ('ল', 'ল'), + ('শ', 'হ'), + ('ঽ', 'ঽ'), + ('ৎ', 'ৎ'), + ('ড়', 'ঢ়'), + ('য়', 'ৡ'), + ('ৰ', 'ৱ'), + ('ৼ', 'ৼ'), + ('ਅ', 'ਊ'), + ('ਏ', 'ਐ'), + ('ਓ', 'ਨ'), + ('ਪ', 'ਰ'), + ('ਲ', 'ਲ਼'), + ('ਵ', 'ਸ਼'), + ('ਸ', 'ਹ'), + ('ਖ਼', 'ੜ'), + ('ਫ਼', 'ਫ਼'), + ('ੲ', 'ੴ'), + ('અ', 'ઍ'), + ('એ', 'ઑ'), + ('ઓ', 'ન'), + ('પ', 'ર'), + ('લ', 'ળ'), + ('વ', 'હ'), + ('ઽ', 'ઽ'), + ('ૐ', 'ૐ'), + ('ૠ', 'ૡ'), + ('ૹ', 'ૹ'), + ('ଅ', 'ଌ'), + ('ଏ', 'ଐ'), + ('ଓ', 'ନ'), + ('ପ', 'ର'), + ('ଲ', 'ଳ'), + ('ଵ', 'ହ'), + ('ଽ', 'ଽ'), + ('ଡ଼', 'ଢ଼'), + ('ୟ', 'ୡ'), + ('ୱ', 'ୱ'), + ('ஃ', 'ஃ'), + ('அ', 'ஊ'), + ('எ', 'ஐ'), + ('ஒ', 'க'), + ('ங', 'ச'), + ('ஜ', 'ஜ'), + ('ஞ', 'ட'), + ('ண', 'த'), + ('ந', 'ப'), + ('ம', 'ஹ'), + ('ௐ', 'ௐ'), + ('అ', 'ఌ'), + ('ఎ', 'ఐ'), + ('ఒ', 'న'), + ('ప', 'హ'), + ('ఽ', 'ఽ'), + ('ౘ', 'ౚ'), + ('ౝ', 'ౝ'), + ('ౠ', 'ౡ'), + ('ಀ', 'ಀ'), + ('ಅ', 'ಌ'), + ('ಎ', 'ಐ'), + ('ಒ', 'ನ'), + ('ಪ', 'ಳ'), + ('ವ', 'ಹ'), + ('ಽ', 'ಽ'), + ('ೝ', 'ೞ'), + ('ೠ', 'ೡ'), + ('ೱ', 'ೲ'), + ('ഄ', 'ഌ'), + ('എ', 'ഐ'), + ('ഒ', 'ഺ'), + ('ഽ', 'ഽ'), + ('ൎ', 'ൎ'), + ('ൔ', 'ൖ'), + ('ൟ', 'ൡ'), + ('ൺ', 'ൿ'), + ('අ', 'ඖ'), + ('ක', 'න'), + ('ඳ', 'ර'), + ('ල', 'ල'), + ('ව', 'ෆ'), + ('ༀ', 'ༀ'), + ('ཀ', 'ཇ'), + ('ཉ', 'ཬ'), + ('ྈ', 'ྌ'), + ('Ⴀ', 'Ⴥ'), + ('Ⴧ', 'Ⴧ'), + ('Ⴭ', 'Ⴭ'), + ('ა', 'ჺ'), + ('ჼ', 'ቈ'), + ('ቊ', 'ቍ'), + ('ቐ', 'ቖ'), + ('ቘ', 'ቘ'), + ('ቚ', 'ቝ'), + ('በ', 'ኈ'), + ('ኊ', 'ኍ'), + ('ነ', 'ኰ'), + ('ኲ', 'ኵ'), + ('ኸ', 'ኾ'), + ('ዀ', 'ዀ'), + ('ዂ', 'ዅ'), + ('ወ', 'ዖ'), + ('ዘ', 'ጐ'), + ('ጒ', 'ጕ'), + ('ጘ', 'ፚ'), + ('ᎀ', 'ᎏ'), + ('Ꭰ', 'Ᏽ'), + ('ᏸ', 'ᏽ'), + ('ᐁ', 'ᙬ'), + ('ᙯ', 'ᙿ'), + ('ᚁ', 'ᚚ'), + ('ᚠ', 'ᛪ'), + ('ᛮ', 'ᛸ'), + ('ᜀ', 'ᜑ'), + ('ᜟ', 'ᜱ'), + ('ᝀ', 'ᝑ'), + ('ᝠ', 'ᝬ'), + ('ᝮ', 'ᝰ'), + ('ᠠ', 'ᡸ'), + ('ᢀ', 'ᢄ'), + ('ᢇ', 'ᢨ'), + ('ᢪ', 'ᢪ'), + ('ᢰ', 'ᣵ'), + ('ᤀ', 'ᤞ'), + ('ᨀ', 'ᨖ'), + ('ᬅ', 'ᬳ'), + ('ᭅ', 'ᭌ'), + ('ᮃ', 'ᮠ'), + ('ᮮ', 'ᮯ'), + ('ᮺ', 'ᯥ'), + ('ᰀ', 'ᰣ'), + ('ᱍ', 'ᱏ'), + ('ᱚ', 'ᱽ'), + ('ᲀ', 'ᲊ'), + ('Ა', 'Ჺ'), + ('Ჽ', 'Ჿ'), + ('ᳩ', 'ᳬ'), + ('ᳮ', 'ᳳ'), + ('ᳵ', 'ᳶ'), + ('ᳺ', 'ᳺ'), + ('ᴀ', 'ᶿ'), + ('Ḁ', 'ἕ'), + ('Ἐ', 'Ἕ'), + ('ἠ', 'ὅ'), + ('Ὀ', 'Ὅ'), + ('ὐ', 'ὗ'), + ('Ὑ', 'Ὑ'), + ('Ὓ', 'Ὓ'), + ('Ὕ', 'Ὕ'), + ('Ὗ', 'ώ'), + ('ᾀ', 'ᾴ'), + ('ᾶ', 'ᾼ'), + ('ι', 'ι'), + ('ῂ', 'ῄ'), + ('ῆ', 'ῌ'), + ('ῐ', 'ΐ'), + ('ῖ', 'Ί'), + ('ῠ', 'Ῥ'), + ('ῲ', 'ῴ'), + ('ῶ', 'ῼ'), + ('ⁱ', 'ⁱ'), + ('ⁿ', 'ⁿ'), + ('ₐ', 'ₜ'), + ('ℂ', 'ℂ'), + ('ℇ', 'ℇ'), + ('ℊ', 'ℓ'), + ('ℕ', 'ℕ'), + ('ℙ', 'ℝ'), + ('ℤ', 'ℤ'), + ('Ω', 'Ω'), + ('ℨ', 'ℨ'), + ('K', 'ℭ'), + ('ℯ', 'ℹ'), + ('ℼ', 'ℿ'), + ('ⅅ', 'ⅉ'), + ('ⅎ', 'ⅎ'), + ('Ⅰ', 'ↈ'), + ('Ⓐ', 'ⓩ'), + ('Ⰰ', 'ⳤ'), + ('Ⳬ', 'ⳮ'), + ('Ⳳ', 'ⳳ'), + ('ⴀ', 'ⴥ'), + ('ⴧ', 'ⴧ'), + ('ⴭ', 'ⴭ'), + ('ⴰ', 'ⵧ'), + ('ⵯ', 'ⵯ'), + ('ⶀ', 'ⶖ'), + ('ⶠ', 'ⶦ'), + ('ⶨ', 'ⶮ'), + ('ⶰ', 'ⶶ'), + ('ⶸ', 'ⶾ'), + ('ⷀ', 'ⷆ'), + ('ⷈ', 'ⷎ'), + ('ⷐ', 'ⷖ'), + ('ⷘ', 'ⷞ'), + ('ⸯ', 'ⸯ'), + ('々', '々'), + ('〻', '〼'), + ('ㄅ', 'ㄯ'), + ('ㄱ', 'ㆎ'), + ('ㆠ', 'ㆿ'), + ('ꀀ', 'ꒌ'), + ('ꓐ', 'ꓽ'), + ('ꔀ', 'ꘌ'), + ('ꘐ', 'ꘟ'), + ('ꘪ', 'ꘫ'), + ('Ꙁ', 'ꙮ'), + ('ꙿ', 'ꚝ'), + ('ꚠ', 'ꛯ'), + ('꜈', 'ꟍ'), + ('Ꟑ', 'ꟑ'), + ('ꟓ', 'ꟓ'), + ('ꟕ', 'Ƛ'), + ('ꟲ', 'ꠁ'), + ('ꠃ', 'ꠅ'), + ('ꠇ', 'ꠊ'), + ('ꠌ', 'ꠢ'), + ('ꡀ', 'ꡳ'), + ('ꢂ', 'ꢳ'), + ('ꣲ', 'ꣷ'), + ('ꣻ', 'ꣻ'), + ('ꣽ', 'ꣾ'), + ('ꤊ', 'ꤥ'), + ('ꤰ', 'ꥆ'), + ('ꥠ', 'ꥼ'), + ('ꦄ', 'ꦲ'), + ('ꧏ', 'ꧏ'), + ('ꨀ', 'ꨨ'), + ('ꩀ', 'ꩂ'), + ('ꩄ', 'ꩋ'), + ('ꫠ', 'ꫪ'), + ('ꫲ', 'ꫴ'), + ('ꬁ', 'ꬆ'), + ('ꬉ', 'ꬎ'), + ('ꬑ', 'ꬖ'), + ('ꬠ', 'ꬦ'), + ('ꬨ', 'ꬮ'), + ('ꬰ', 'ꭩ'), + ('ꭰ', 'ꯢ'), + ('가', '힣'), + ('ힰ', 'ퟆ'), + ('ퟋ', 'ퟻ'), + ('ff', 'st'), + ('ﬓ', 'ﬗ'), + ('ﭐ', 'ﮱ'), + ('ﯓ', 'ﴽ'), + ('ﵐ', 'ﶏ'), + ('ﶒ', 'ﷇ'), + ('ﷰ', 'ﷻ'), + ('ﹰ', 'ﹴ'), + ('ﹶ', 'ﻼ'), + ('A', 'Z'), + ('a', 'z'), + ('ᅠ', 'ᄒ'), + ('ᅡ', 'ᅦ'), + ('ᅧ', 'ᅬ'), + ('ᅭ', 'ᅲ'), + ('ᅳ', 'ᅵ'), + ('𐀀', '𐀋'), + ('𐀍', '𐀦'), + ('𐀨', '𐀺'), + ('𐀼', '𐀽'), + ('𐀿', '𐁍'), + ('𐁐', '𐁝'), + ('𐂀', '𐃺'), + ('𐅀', '𐅴'), + ('𐊀', '𐊜'), + ('𐊠', '𐋐'), + ('𐌀', '𐌟'), + ('𐌭', '𐍊'), + ('𐍐', '𐍵'), + ('𐎀', '𐎝'), + ('𐎠', '𐏃'), + ('𐏈', '𐏏'), + ('𐏑', '𐏕'), + ('𐐀', '𐒝'), + ('𐒰', '𐓓'), + ('𐓘', '𐓻'), + ('𐔀', '𐔧'), + ('𐔰', '𐕣'), + ('𐕰', '𐕺'), + ('𐕼', '𐖊'), + ('𐖌', '𐖒'), + ('𐖔', '𐖕'), + ('𐖗', '𐖡'), + ('𐖣', '𐖱'), + ('𐖳', '𐖹'), + ('𐖻', '𐖼'), + ('𐗀', '𐗳'), + ('𐘀', '𐜶'), + ('𐝀', '𐝕'), + ('𐝠', '𐝧'), + ('𐞀', '𐞅'), + ('𐞇', '𐞰'), + ('𐞲', '𐞺'), + ('𐠀', '𐠅'), + ('𐠈', '𐠈'), + ('𐠊', '𐠵'), + ('𐠷', '𐠸'), + ('𐠼', '𐠼'), + ('𐠿', '𐡕'), + ('𐡠', '𐡶'), + ('𐢀', '𐢞'), + ('𐣠', '𐣲'), + ('𐣴', '𐣵'), + ('𐤀', '𐤕'), + ('𐤠', '𐤹'), + ('𐦀', '𐦷'), + ('𐦾', '𐦿'), + ('𐨀', '𐨀'), + ('𐨐', '𐨓'), + ('𐨕', '𐨗'), + ('𐨙', '𐨵'), + ('𐩠', '𐩼'), + ('𐪀', '𐪜'), + ('𐫀', '𐫇'), + ('𐫉', '𐫤'), + ('𐬀', '𐬵'), + ('𐭀', '𐭕'), + ('𐭠', '𐭲'), + ('𐮀', '𐮑'), + ('𐰀', '𐱈'), + ('𐲀', '𐲲'), + ('𐳀', '𐳲'), + ('𐴀', '𐴣'), + ('𐵊', '𐵥'), + ('𐵯', '𐶅'), + ('𐺀', '𐺩'), + ('𐺰', '𐺱'), + ('𐻂', '𐻄'), + ('𐼀', '𐼜'), + ('𐼧', '𐼧'), + ('𐼰', '𐽅'), + ('𐽰', '𐾁'), + ('𐾰', '𐿄'), + ('𐿠', '𐿶'), + ('𑀃', '𑀷'), + ('𑁱', '𑁲'), + ('𑁵', '𑁵'), + ('𑂃', '𑂯'), + ('𑃐', '𑃨'), + ('𑄃', '𑄦'), + ('𑅄', '𑅄'), + ('𑅇', '𑅇'), + ('𑅐', '𑅲'), + ('𑅶', '𑅶'), + ('𑆃', '𑆲'), + ('𑇁', '𑇄'), + ('𑇚', '𑇚'), + ('𑇜', '𑇜'), + ('𑈀', '𑈑'), + ('𑈓', '𑈫'), + ('𑈿', '𑉀'), + ('𑊀', '𑊆'), + ('𑊈', '𑊈'), + ('𑊊', '𑊍'), + ('𑊏', '𑊝'), + ('𑊟', '𑊨'), + ('𑊰', '𑋞'), + ('𑌅', '𑌌'), + ('𑌏', '𑌐'), + ('𑌓', '𑌨'), + ('𑌪', '𑌰'), + ('𑌲', '𑌳'), + ('𑌵', '𑌹'), + ('𑌽', '𑌽'), + ('𑍐', '𑍐'), + ('𑍝', '𑍡'), + ('𑎀', '𑎉'), + ('𑎋', '𑎋'), + ('𑎎', '𑎎'), + ('𑎐', '𑎵'), + ('𑎷', '𑎷'), + ('𑏑', '𑏑'), + ('𑏓', '𑏓'), + ('𑐀', '𑐴'), + ('𑑇', '𑑊'), + ('𑑟', '𑑡'), + ('𑒀', '𑒯'), + ('𑓄', '𑓅'), + ('𑓇', '𑓇'), + ('𑖀', '𑖮'), + ('𑗘', '𑗛'), + ('𑘀', '𑘯'), + ('𑙄', '𑙄'), + ('𑚀', '𑚪'), + ('𑚸', '𑚸'), + ('𑠀', '𑠫'), + ('𑢠', '𑣟'), + ('𑣿', '𑤆'), + ('𑤉', '𑤉'), + ('𑤌', '𑤓'), + ('𑤕', '𑤖'), + ('𑤘', '𑤯'), + ('𑤿', '𑤿'), + ('𑥁', '𑥁'), + ('𑦠', '𑦧'), + ('𑦪', '𑧐'), + ('𑧡', '𑧡'), + ('𑧣', '𑧣'), + ('𑨀', '𑨀'), + ('𑨋', '𑨲'), + ('𑨺', '𑨺'), + ('𑩐', '𑩐'), + ('𑩜', '𑪉'), + ('𑪝', '𑪝'), + ('𑪰', '𑫸'), + ('𑯀', '𑯠'), + ('𑰀', '𑰈'), + ('𑰊', '𑰮'), + ('𑱀', '𑱀'), + ('𑱲', '𑲏'), + ('𑴀', '𑴆'), + ('𑴈', '𑴉'), + ('𑴋', '𑴰'), + ('𑵆', '𑵆'), + ('𑵠', '𑵥'), + ('𑵧', '𑵨'), + ('𑵪', '𑶉'), + ('𑶘', '𑶘'), + ('𑻠', '𑻲'), + ('𑼂', '𑼂'), + ('𑼄', '𑼐'), + ('𑼒', '𑼳'), + ('𑾰', '𑾰'), + ('𒀀', '𒎙'), + ('𒐀', '𒑮'), + ('𒒀', '𒕃'), + ('𒾐', '𒿰'), + ('𓀀', '𓐯'), + ('𓑁', '𓑆'), + ('𓑠', '𔏺'), + ('𔐀', '𔙆'), + ('𖄀', '𖄝'), + ('𖠀', '𖨸'), + ('𖩀', '𖩞'), + ('𖩰', '𖪾'), + ('𖫐', '𖫭'), + ('𖬀', '𖬯'), + ('𖭀', '𖭃'), + ('𖭣', '𖭷'), + ('𖭽', '𖮏'), + ('𖵀', '𖵬'), + ('𖹀', '𖹿'), + ('𖼀', '𖽊'), + ('𖽐', '𖽐'), + ('𖾓', '𖾟'), + ('𖿠', '𖿡'), + ('𖿣', '𖿣'), + ('𛰀', '𛱪'), + ('𛱰', '𛱼'), + ('𛲀', '𛲈'), + ('𛲐', '𛲙'), + ('𝐀', '𝑔'), + ('𝑖', '𝒜'), + ('𝒞', '𝒟'), + ('𝒢', '𝒢'), + ('𝒥', '𝒦'), + ('𝒩', '𝒬'), + ('𝒮', '𝒹'), + ('𝒻', '𝒻'), + ('𝒽', '𝓃'), + ('𝓅', '𝔅'), + ('𝔇', '𝔊'), + ('𝔍', '𝔔'), + ('𝔖', '𝔜'), + ('𝔞', '𝔹'), + ('𝔻', '𝔾'), + ('𝕀', '𝕄'), + ('𝕆', '𝕆'), + ('𝕊', '𝕐'), + ('𝕒', '𝚥'), + ('𝚨', '𝛀'), + ('𝛂', '𝛚'), + ('𝛜', '𝛺'), + ('𝛼', '𝜔'), + ('𝜖', '𝜴'), + ('𝜶', '𝝎'), + ('𝝐', '𝝮'), + ('𝝰', '𝞈'), + ('𝞊', '𝞨'), + ('𝞪', '𝟂'), + ('𝟄', '𝟋'), + ('𝼀', '𝼞'), + ('𝼥', '𝼪'), + ('𞀰', '𞁭'), + ('𞄀', '𞄬'), + ('𞄷', '𞄽'), + ('𞅎', '𞅎'), + ('𞊐', '𞊭'), + ('𞋀', '𞋫'), + ('𞓐', '𞓫'), + ('𞗐', '𞗭'), + ('𞗰', '𞗰'), + ('𞟠', '𞟦'), + ('𞟨', '𞟫'), + ('𞟭', '𞟮'), + ('𞟰', '𞟾'), + ('𞠀', '𞣄'), + ('𞤀', '𞥃'), + ('𞥋', '𞥋'), + ('𞸀', '𞸃'), + ('𞸅', '𞸟'), + ('𞸡', '𞸢'), + ('𞸤', '𞸤'), + ('𞸧', '𞸧'), + ('𞸩', '𞸲'), + ('𞸴', '𞸷'), + ('𞸹', '𞸹'), + ('𞸻', '𞸻'), + ('𞹂', '𞹂'), + ('𞹇', '𞹇'), + ('𞹉', '𞹉'), + ('𞹋', '𞹋'), + ('𞹍', '𞹏'), + ('𞹑', '𞹒'), + ('𞹔', '𞹔'), + ('𞹗', '𞹗'), + ('𞹙', '𞹙'), + ('𞹛', '𞹛'), + ('𞹝', '𞹝'), + ('𞹟', '𞹟'), + ('𞹡', '𞹢'), + ('𞹤', '𞹤'), + ('𞹧', '𞹪'), + ('𞹬', '𞹲'), + ('𞹴', '𞹷'), + ('𞹹', '𞹼'), + ('𞹾', '𞹾'), + ('𞺀', '𞺉'), + ('𞺋', '𞺛'), + ('𞺡', '𞺣'), + ('𞺥', '𞺩'), + ('𞺫', '𞺻'), + ('🄰', '🅉'), + ('🅐', '🅩'), + ('🅰', '🆉'), +]; + +pub const CR: &'static [(char, char)] = &[('\r', '\r')]; + +pub const DOUBLE_QUOTE: &'static [(char, char)] = &[('"', '"')]; + +pub const EXTEND: &'static [(char, char)] = &[ + ('\u{300}', '\u{36f}'), + ('\u{483}', '\u{489}'), + ('\u{591}', '\u{5bd}'), + ('\u{5bf}', '\u{5bf}'), + ('\u{5c1}', '\u{5c2}'), + ('\u{5c4}', '\u{5c5}'), + ('\u{5c7}', '\u{5c7}'), + ('\u{610}', '\u{61a}'), + ('\u{64b}', '\u{65f}'), + ('\u{670}', '\u{670}'), + ('\u{6d6}', '\u{6dc}'), + ('\u{6df}', '\u{6e4}'), + ('\u{6e7}', '\u{6e8}'), + ('\u{6ea}', '\u{6ed}'), + ('\u{711}', '\u{711}'), + ('\u{730}', '\u{74a}'), + ('\u{7a6}', '\u{7b0}'), + ('\u{7eb}', '\u{7f3}'), + ('\u{7fd}', '\u{7fd}'), + ('\u{816}', '\u{819}'), + ('\u{81b}', '\u{823}'), + ('\u{825}', '\u{827}'), + ('\u{829}', '\u{82d}'), + ('\u{859}', '\u{85b}'), + ('\u{897}', '\u{89f}'), + ('\u{8ca}', '\u{8e1}'), + ('\u{8e3}', 'ः'), + ('\u{93a}', '\u{93c}'), + ('ा', 'ॏ'), + ('\u{951}', '\u{957}'), + ('\u{962}', '\u{963}'), + ('\u{981}', 'ঃ'), + ('\u{9bc}', '\u{9bc}'), + ('\u{9be}', '\u{9c4}'), + ('ে', 'ৈ'), + ('ো', '\u{9cd}'), + ('\u{9d7}', '\u{9d7}'), + ('\u{9e2}', '\u{9e3}'), + ('\u{9fe}', '\u{9fe}'), + ('\u{a01}', 'ਃ'), + ('\u{a3c}', '\u{a3c}'), + ('ਾ', '\u{a42}'), + ('\u{a47}', '\u{a48}'), + ('\u{a4b}', '\u{a4d}'), + ('\u{a51}', '\u{a51}'), + ('\u{a70}', '\u{a71}'), + ('\u{a75}', '\u{a75}'), + ('\u{a81}', 'ઃ'), + ('\u{abc}', '\u{abc}'), + ('ા', '\u{ac5}'), + ('\u{ac7}', 'ૉ'), + ('ો', '\u{acd}'), + ('\u{ae2}', '\u{ae3}'), + ('\u{afa}', '\u{aff}'), + ('\u{b01}', 'ଃ'), + ('\u{b3c}', '\u{b3c}'), + ('\u{b3e}', '\u{b44}'), + ('େ', 'ୈ'), + ('ୋ', '\u{b4d}'), + ('\u{b55}', '\u{b57}'), + ('\u{b62}', '\u{b63}'), + ('\u{b82}', '\u{b82}'), + ('\u{bbe}', 'ூ'), + ('ெ', 'ை'), + ('ொ', '\u{bcd}'), + ('\u{bd7}', '\u{bd7}'), + ('\u{c00}', '\u{c04}'), + ('\u{c3c}', '\u{c3c}'), + ('\u{c3e}', 'ౄ'), + ('\u{c46}', '\u{c48}'), + ('\u{c4a}', '\u{c4d}'), + ('\u{c55}', '\u{c56}'), + ('\u{c62}', '\u{c63}'), + ('\u{c81}', 'ಃ'), + ('\u{cbc}', '\u{cbc}'), + ('ಾ', 'ೄ'), + ('\u{cc6}', '\u{cc8}'), + ('\u{cca}', '\u{ccd}'), + ('\u{cd5}', '\u{cd6}'), + ('\u{ce2}', '\u{ce3}'), + ('ೳ', 'ೳ'), + ('\u{d00}', 'ഃ'), + ('\u{d3b}', '\u{d3c}'), + ('\u{d3e}', '\u{d44}'), + ('െ', 'ൈ'), + ('ൊ', '\u{d4d}'), + ('\u{d57}', '\u{d57}'), + ('\u{d62}', '\u{d63}'), + ('\u{d81}', 'ඃ'), + ('\u{dca}', '\u{dca}'), + ('\u{dcf}', '\u{dd4}'), + ('\u{dd6}', '\u{dd6}'), + ('ෘ', '\u{ddf}'), + ('ෲ', 'ෳ'), + ('\u{e31}', '\u{e31}'), + ('\u{e34}', '\u{e3a}'), + ('\u{e47}', '\u{e4e}'), + ('\u{eb1}', '\u{eb1}'), + ('\u{eb4}', '\u{ebc}'), + ('\u{ec8}', '\u{ece}'), + ('\u{f18}', '\u{f19}'), + ('\u{f35}', '\u{f35}'), + ('\u{f37}', '\u{f37}'), + ('\u{f39}', '\u{f39}'), + ('༾', '༿'), + ('\u{f71}', '\u{f84}'), + ('\u{f86}', '\u{f87}'), + ('\u{f8d}', '\u{f97}'), + ('\u{f99}', '\u{fbc}'), + ('\u{fc6}', '\u{fc6}'), + ('ါ', '\u{103e}'), + ('ၖ', '\u{1059}'), + ('\u{105e}', '\u{1060}'), + ('ၢ', 'ၤ'), + ('ၧ', 'ၭ'), + ('\u{1071}', '\u{1074}'), + ('\u{1082}', '\u{108d}'), + ('ႏ', 'ႏ'), + ('ႚ', '\u{109d}'), + ('\u{135d}', '\u{135f}'), + ('\u{1712}', '\u{1715}'), + ('\u{1732}', '\u{1734}'), + ('\u{1752}', '\u{1753}'), + ('\u{1772}', '\u{1773}'), + ('\u{17b4}', '\u{17d3}'), + ('\u{17dd}', '\u{17dd}'), + ('\u{180b}', '\u{180d}'), + ('\u{180f}', '\u{180f}'), + ('\u{1885}', '\u{1886}'), + ('\u{18a9}', '\u{18a9}'), + ('\u{1920}', 'ᤫ'), + ('ᤰ', '\u{193b}'), + ('\u{1a17}', '\u{1a1b}'), + ('ᩕ', '\u{1a5e}'), + ('\u{1a60}', '\u{1a7c}'), + ('\u{1a7f}', '\u{1a7f}'), + ('\u{1ab0}', '\u{1ace}'), + ('\u{1b00}', 'ᬄ'), + ('\u{1b34}', '\u{1b44}'), + ('\u{1b6b}', '\u{1b73}'), + ('\u{1b80}', 'ᮂ'), + ('ᮡ', '\u{1bad}'), + ('\u{1be6}', '\u{1bf3}'), + ('ᰤ', '\u{1c37}'), + ('\u{1cd0}', '\u{1cd2}'), + ('\u{1cd4}', '\u{1ce8}'), + ('\u{1ced}', '\u{1ced}'), + ('\u{1cf4}', '\u{1cf4}'), + ('᳷', '\u{1cf9}'), + ('\u{1dc0}', '\u{1dff}'), + ('\u{200c}', '\u{200c}'), + ('\u{20d0}', '\u{20f0}'), + ('\u{2cef}', '\u{2cf1}'), + ('\u{2d7f}', '\u{2d7f}'), + ('\u{2de0}', '\u{2dff}'), + ('\u{302a}', '\u{302f}'), + ('\u{3099}', '\u{309a}'), + ('\u{a66f}', '\u{a672}'), + ('\u{a674}', '\u{a67d}'), + ('\u{a69e}', '\u{a69f}'), + ('\u{a6f0}', '\u{a6f1}'), + ('\u{a802}', '\u{a802}'), + ('\u{a806}', '\u{a806}'), + ('\u{a80b}', '\u{a80b}'), + ('ꠣ', 'ꠧ'), + ('\u{a82c}', '\u{a82c}'), + ('ꢀ', 'ꢁ'), + ('ꢴ', '\u{a8c5}'), + ('\u{a8e0}', '\u{a8f1}'), + ('\u{a8ff}', '\u{a8ff}'), + ('\u{a926}', '\u{a92d}'), + ('\u{a947}', '\u{a953}'), + ('\u{a980}', 'ꦃ'), + ('\u{a9b3}', '\u{a9c0}'), + ('\u{a9e5}', '\u{a9e5}'), + ('\u{aa29}', '\u{aa36}'), + ('\u{aa43}', '\u{aa43}'), + ('\u{aa4c}', 'ꩍ'), + ('ꩻ', 'ꩽ'), + ('\u{aab0}', '\u{aab0}'), + ('\u{aab2}', '\u{aab4}'), + ('\u{aab7}', '\u{aab8}'), + ('\u{aabe}', '\u{aabf}'), + ('\u{aac1}', '\u{aac1}'), + ('ꫫ', 'ꫯ'), + ('ꫵ', '\u{aaf6}'), + ('ꯣ', 'ꯪ'), + ('꯬', '\u{abed}'), + ('\u{fb1e}', '\u{fb1e}'), + ('\u{fe00}', '\u{fe0f}'), + ('\u{fe20}', '\u{fe2f}'), + ('\u{ff9e}', '\u{ff9f}'), + ('\u{101fd}', '\u{101fd}'), + ('\u{102e0}', '\u{102e0}'), + ('\u{10376}', '\u{1037a}'), + ('\u{10a01}', '\u{10a03}'), + ('\u{10a05}', '\u{10a06}'), + ('\u{10a0c}', '\u{10a0f}'), + ('\u{10a38}', '\u{10a3a}'), + ('\u{10a3f}', '\u{10a3f}'), + ('\u{10ae5}', '\u{10ae6}'), + ('\u{10d24}', '\u{10d27}'), + ('\u{10d69}', '\u{10d6d}'), + ('\u{10eab}', '\u{10eac}'), + ('\u{10efc}', '\u{10eff}'), + ('\u{10f46}', '\u{10f50}'), + ('\u{10f82}', '\u{10f85}'), + ('𑀀', '𑀂'), + ('\u{11038}', '\u{11046}'), + ('\u{11070}', '\u{11070}'), + ('\u{11073}', '\u{11074}'), + ('\u{1107f}', '𑂂'), + ('𑂰', '\u{110ba}'), + ('\u{110c2}', '\u{110c2}'), + ('\u{11100}', '\u{11102}'), + ('\u{11127}', '\u{11134}'), + ('𑅅', '𑅆'), + ('\u{11173}', '\u{11173}'), + ('\u{11180}', '𑆂'), + ('𑆳', '\u{111c0}'), + ('\u{111c9}', '\u{111cc}'), + ('𑇎', '\u{111cf}'), + ('𑈬', '\u{11237}'), + ('\u{1123e}', '\u{1123e}'), + ('\u{11241}', '\u{11241}'), + ('\u{112df}', '\u{112ea}'), + ('\u{11300}', '𑌃'), + ('\u{1133b}', '\u{1133c}'), + ('\u{1133e}', '𑍄'), + ('𑍇', '𑍈'), + ('𑍋', '\u{1134d}'), + ('\u{11357}', '\u{11357}'), + ('𑍢', '𑍣'), + ('\u{11366}', '\u{1136c}'), + ('\u{11370}', '\u{11374}'), + ('\u{113b8}', '\u{113c0}'), + ('\u{113c2}', '\u{113c2}'), + ('\u{113c5}', '\u{113c5}'), + ('\u{113c7}', '𑏊'), + ('𑏌', '\u{113d0}'), + ('\u{113d2}', '\u{113d2}'), + ('\u{113e1}', '\u{113e2}'), + ('𑐵', '\u{11446}'), + ('\u{1145e}', '\u{1145e}'), + ('\u{114b0}', '\u{114c3}'), + ('\u{115af}', '\u{115b5}'), + ('𑖸', '\u{115c0}'), + ('\u{115dc}', '\u{115dd}'), + ('𑘰', '\u{11640}'), + ('\u{116ab}', '\u{116b7}'), + ('\u{1171d}', '\u{1172b}'), + ('𑠬', '\u{1183a}'), + ('\u{11930}', '𑤵'), + ('𑤷', '𑤸'), + ('\u{1193b}', '\u{1193e}'), + ('𑥀', '𑥀'), + ('𑥂', '\u{11943}'), + ('𑧑', '\u{119d7}'), + ('\u{119da}', '\u{119e0}'), + ('𑧤', '𑧤'), + ('\u{11a01}', '\u{11a0a}'), + ('\u{11a33}', '𑨹'), + ('\u{11a3b}', '\u{11a3e}'), + ('\u{11a47}', '\u{11a47}'), + ('\u{11a51}', '\u{11a5b}'), + ('\u{11a8a}', '\u{11a99}'), + ('𑰯', '\u{11c36}'), + ('\u{11c38}', '\u{11c3f}'), + ('\u{11c92}', '\u{11ca7}'), + ('𑲩', '\u{11cb6}'), + ('\u{11d31}', '\u{11d36}'), + ('\u{11d3a}', '\u{11d3a}'), + ('\u{11d3c}', '\u{11d3d}'), + ('\u{11d3f}', '\u{11d45}'), + ('\u{11d47}', '\u{11d47}'), + ('𑶊', '𑶎'), + ('\u{11d90}', '\u{11d91}'), + ('𑶓', '\u{11d97}'), + ('\u{11ef3}', '𑻶'), + ('\u{11f00}', '\u{11f01}'), + ('𑼃', '𑼃'), + ('𑼴', '\u{11f3a}'), + ('𑼾', '\u{11f42}'), + ('\u{11f5a}', '\u{11f5a}'), + ('\u{13440}', '\u{13440}'), + ('\u{13447}', '\u{13455}'), + ('\u{1611e}', '\u{1612f}'), + ('\u{16af0}', '\u{16af4}'), + ('\u{16b30}', '\u{16b36}'), + ('\u{16f4f}', '\u{16f4f}'), + ('𖽑', '𖾇'), + ('\u{16f8f}', '\u{16f92}'), + ('\u{16fe4}', '\u{16fe4}'), + ('\u{16ff0}', '\u{16ff1}'), + ('\u{1bc9d}', '\u{1bc9e}'), + ('\u{1cf00}', '\u{1cf2d}'), + ('\u{1cf30}', '\u{1cf46}'), + ('\u{1d165}', '\u{1d169}'), + ('\u{1d16d}', '\u{1d172}'), + ('\u{1d17b}', '\u{1d182}'), + ('\u{1d185}', '\u{1d18b}'), + ('\u{1d1aa}', '\u{1d1ad}'), + ('\u{1d242}', '\u{1d244}'), + ('\u{1da00}', '\u{1da36}'), + ('\u{1da3b}', '\u{1da6c}'), + ('\u{1da75}', '\u{1da75}'), + ('\u{1da84}', '\u{1da84}'), + ('\u{1da9b}', '\u{1da9f}'), + ('\u{1daa1}', '\u{1daaf}'), + ('\u{1e000}', '\u{1e006}'), + ('\u{1e008}', '\u{1e018}'), + ('\u{1e01b}', '\u{1e021}'), + ('\u{1e023}', '\u{1e024}'), + ('\u{1e026}', '\u{1e02a}'), + ('\u{1e08f}', '\u{1e08f}'), + ('\u{1e130}', '\u{1e136}'), + ('\u{1e2ae}', '\u{1e2ae}'), + ('\u{1e2ec}', '\u{1e2ef}'), + ('\u{1e4ec}', '\u{1e4ef}'), + ('\u{1e5ee}', '\u{1e5ef}'), + ('\u{1e8d0}', '\u{1e8d6}'), + ('\u{1e944}', '\u{1e94a}'), + ('🏻', '🏿'), + ('\u{e0020}', '\u{e007f}'), + ('\u{e0100}', '\u{e01ef}'), +]; + +pub const EXTENDNUMLET: &'static [(char, char)] = &[ + ('_', '_'), + ('\u{202f}', '\u{202f}'), + ('‿', '⁀'), + ('⁔', '⁔'), + ('︳', '︴'), + ('﹍', '﹏'), + ('_', '_'), +]; + +pub const FORMAT: &'static [(char, char)] = &[ + ('\u{ad}', '\u{ad}'), + ('\u{61c}', '\u{61c}'), + ('\u{180e}', '\u{180e}'), + ('\u{200e}', '\u{200f}'), + ('\u{202a}', '\u{202e}'), + ('\u{2060}', '\u{2064}'), + ('\u{2066}', '\u{206f}'), + ('\u{feff}', '\u{feff}'), + ('\u{fff9}', '\u{fffb}'), + ('\u{13430}', '\u{1343f}'), + ('\u{1bca0}', '\u{1bca3}'), + ('\u{1d173}', '\u{1d17a}'), + ('\u{e0001}', '\u{e0001}'), +]; + +pub const HEBREW_LETTER: &'static [(char, char)] = &[ + ('א', 'ת'), + ('ׯ', 'ײ'), + ('יִ', 'יִ'), + ('ײַ', 'ﬨ'), + ('שׁ', 'זּ'), + ('טּ', 'לּ'), + ('מּ', 'מּ'), + ('נּ', 'סּ'), + ('ףּ', 'פּ'), + ('צּ', 'ﭏ'), +]; + +pub const KATAKANA: &'static [(char, char)] = &[ + ('〱', '〵'), + ('゛', '゜'), + ('゠', 'ヺ'), + ('ー', 'ヿ'), + ('ㇰ', 'ㇿ'), + ('㋐', '㋾'), + ('㌀', '㍗'), + ('ヲ', 'ン'), + ('𚿰', '𚿳'), + ('𚿵', '𚿻'), + ('𚿽', '𚿾'), + ('𛀀', '𛀀'), + ('𛄠', '𛄢'), + ('𛅕', '𛅕'), + ('𛅤', '𛅧'), +]; + +pub const LF: &'static [(char, char)] = &[('\n', '\n')]; + +pub const MIDLETTER: &'static [(char, char)] = &[ + (':', ':'), + ('·', '·'), + ('·', '·'), + ('՟', '՟'), + ('״', '״'), + ('‧', '‧'), + ('︓', '︓'), + ('﹕', '﹕'), + (':', ':'), +]; + +pub const MIDNUM: &'static [(char, char)] = &[ + (',', ','), + (';', ';'), + (';', ';'), + ('։', '։'), + ('،', '؍'), + ('٬', '٬'), + ('߸', '߸'), + ('⁄', '⁄'), + ('﹐', '﹐'), + ('﹔', '﹔'), + (',', ','), + (';', ';'), +]; + +pub const MIDNUMLET: &'static [(char, char)] = &[ + ('.', '.'), + ('‘', '’'), + ('․', '․'), + ('﹒', '﹒'), + (''', '''), + ('.', '.'), +]; + +pub const NEWLINE: &'static [(char, char)] = + &[('\u{b}', '\u{c}'), ('\u{85}', '\u{85}'), ('\u{2028}', '\u{2029}')]; + +pub const NUMERIC: &'static [(char, char)] = &[ + ('0', '9'), + ('\u{600}', '\u{605}'), + ('٠', '٩'), + ('٫', '٫'), + ('\u{6dd}', '\u{6dd}'), + ('۰', '۹'), + ('߀', '߉'), + ('\u{890}', '\u{891}'), + ('\u{8e2}', '\u{8e2}'), + ('०', '९'), + ('০', '৯'), + ('੦', '੯'), + ('૦', '૯'), + ('୦', '୯'), + ('௦', '௯'), + ('౦', '౯'), + ('೦', '೯'), + ('൦', '൯'), + ('෦', '෯'), + ('๐', '๙'), + ('໐', '໙'), + ('༠', '༩'), + ('၀', '၉'), + ('႐', '႙'), + ('០', '៩'), + ('᠐', '᠙'), + ('᥆', '᥏'), + ('᧐', '᧚'), + ('᪀', '᪉'), + ('᪐', '᪙'), + ('᭐', '᭙'), + ('᮰', '᮹'), + ('᱀', '᱉'), + ('᱐', '᱙'), + ('꘠', '꘩'), + ('꣐', '꣙'), + ('꤀', '꤉'), + ('꧐', '꧙'), + ('꧰', '꧹'), + ('꩐', '꩙'), + ('꯰', '꯹'), + ('0', '9'), + ('𐒠', '𐒩'), + ('𐴰', '𐴹'), + ('𐵀', '𐵉'), + ('𑁦', '𑁯'), + ('\u{110bd}', '\u{110bd}'), + ('\u{110cd}', '\u{110cd}'), + ('𑃰', '𑃹'), + ('𑄶', '𑄿'), + ('𑇐', '𑇙'), + ('𑋰', '𑋹'), + ('𑑐', '𑑙'), + ('𑓐', '𑓙'), + ('𑙐', '𑙙'), + ('𑛀', '𑛉'), + ('𑛐', '𑛣'), + ('𑜰', '𑜹'), + ('𑣠', '𑣩'), + ('𑥐', '𑥙'), + ('𑯰', '𑯹'), + ('𑱐', '𑱙'), + ('𑵐', '𑵙'), + ('𑶠', '𑶩'), + ('𑽐', '𑽙'), + ('𖄰', '𖄹'), + ('𖩠', '𖩩'), + ('𖫀', '𖫉'), + ('𖭐', '𖭙'), + ('𖵰', '𖵹'), + ('𜳰', '𜳹'), + ('𝟎', '𝟿'), + ('𞅀', '𞅉'), + ('𞋰', '𞋹'), + ('𞓰', '𞓹'), + ('𞗱', '𞗺'), + ('𞥐', '𞥙'), + ('🯰', '🯹'), +]; + +pub const REGIONAL_INDICATOR: &'static [(char, char)] = &[('🇦', '🇿')]; + +pub const SINGLE_QUOTE: &'static [(char, char)] = &[('\'', '\'')]; + +pub const WSEGSPACE: &'static [(char, char)] = &[ + (' ', ' '), + ('\u{1680}', '\u{1680}'), + ('\u{2000}', '\u{2006}'), + ('\u{2008}', '\u{200a}'), + ('\u{205f}', '\u{205f}'), + ('\u{3000}', '\u{3000}'), +]; + +pub const ZWJ: &'static [(char, char)] = &[('\u{200d}', '\u{200d}')]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/utf8.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/utf8.rs new file mode 100644 index 0000000000000000000000000000000000000000..537035ed1d99b3f50c01078b885c25cacfb61ae7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/regex-syntax-0.8.9/src/utf8.rs @@ -0,0 +1,592 @@ +/*! +Converts ranges of Unicode scalar values to equivalent ranges of UTF-8 bytes. + +This is sub-module is useful for constructing byte based automatons that need +to embed UTF-8 decoding. The most common use of this module is in conjunction +with the [`hir::ClassUnicodeRange`](crate::hir::ClassUnicodeRange) type. + +See the documentation on the `Utf8Sequences` iterator for more details and +an example. + +# Wait, what is this? + +This is simplest to explain with an example. Let's say you wanted to test +whether a particular byte sequence was a Cyrillic character. One possible +scalar value range is `[0400-04FF]`. The set of allowed bytes for this +range can be expressed as a sequence of byte ranges: + +```text +[D0-D3][80-BF] +``` + +This is simple enough: simply encode the boundaries, `0400` encodes to +`D0 80` and `04FF` encodes to `D3 BF`, and create ranges from each +corresponding pair of bytes: `D0` to `D3` and `80` to `BF`. + +However, what if you wanted to add the Cyrillic Supplementary characters to +your range? Your range might then become `[0400-052F]`. The same procedure +as above doesn't quite work because `052F` encodes to `D4 AF`. The byte ranges +you'd get from the previous transformation would be `[D0-D4][80-AF]`. However, +this isn't quite correct because this range doesn't capture many characters, +for example, `04FF` (because its last byte, `BF` isn't in the range `80-AF`). + +Instead, you need multiple sequences of byte ranges: + +```text +[D0-D3][80-BF] # matches codepoints 0400-04FF +[D4][80-AF] # matches codepoints 0500-052F +``` + +This gets even more complicated if you want bigger ranges, particularly if +they naively contain surrogate codepoints. For example, the sequence of byte +ranges for the basic multilingual plane (`[0000-FFFF]`) look like this: + +```text +[0-7F] +[C2-DF][80-BF] +[E0][A0-BF][80-BF] +[E1-EC][80-BF][80-BF] +[ED][80-9F][80-BF] +[EE-EF][80-BF][80-BF] +``` + +Note that the byte ranges above will *not* match any erroneous encoding of +UTF-8, including encodings of surrogate codepoints. + +And, of course, for all of Unicode (`[000000-10FFFF]`): + +```text +[0-7F] +[C2-DF][80-BF] +[E0][A0-BF][80-BF] +[E1-EC][80-BF][80-BF] +[ED][80-9F][80-BF] +[EE-EF][80-BF][80-BF] +[F0][90-BF][80-BF][80-BF] +[F1-F3][80-BF][80-BF][80-BF] +[F4][80-8F][80-BF][80-BF] +``` + +This module automates the process of creating these byte ranges from ranges of +Unicode scalar values. + +# Lineage + +I got the idea and general implementation strategy from Russ Cox in his +[article on regexps](https://web.archive.org/web/20160404141123/https://swtch.com/~rsc/regexp/regexp3.html) and RE2. +Russ Cox got it from Ken Thompson's `grep` (no source, folk lore?). +I also got the idea from +[Lucene](https://github.com/apache/lucene-solr/blob/ae93f4e7ac6a3908046391de35d4f50a0d3c59ca/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java), +which uses it for executing automata on their term index. +*/ + +use core::{char, fmt, iter::FusedIterator, slice}; + +use alloc::{vec, vec::Vec}; + +const MAX_UTF8_BYTES: usize = 4; + +/// Utf8Sequence represents a sequence of byte ranges. +/// +/// To match a Utf8Sequence, a candidate byte sequence must match each +/// successive range. +/// +/// For example, if there are two ranges, `[C2-DF][80-BF]`, then the byte +/// sequence `\xDD\x61` would not match because `0x61 < 0x80`. +#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord)] +pub enum Utf8Sequence { + /// One byte range. + One(Utf8Range), + /// Two successive byte ranges. + Two([Utf8Range; 2]), + /// Three successive byte ranges. + Three([Utf8Range; 3]), + /// Four successive byte ranges. + Four([Utf8Range; 4]), +} + +impl Utf8Sequence { + /// Creates a new UTF-8 sequence from the encoded bytes of a scalar value + /// range. + /// + /// This assumes that `start` and `end` have the same length. + fn from_encoded_range(start: &[u8], end: &[u8]) -> Self { + assert_eq!(start.len(), end.len()); + match start.len() { + 2 => Utf8Sequence::Two([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + ]), + 3 => Utf8Sequence::Three([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + Utf8Range::new(start[2], end[2]), + ]), + 4 => Utf8Sequence::Four([ + Utf8Range::new(start[0], end[0]), + Utf8Range::new(start[1], end[1]), + Utf8Range::new(start[2], end[2]), + Utf8Range::new(start[3], end[3]), + ]), + n => unreachable!("invalid encoded length: {n}"), + } + } + + /// Returns the underlying sequence of byte ranges as a slice. + pub fn as_slice(&self) -> &[Utf8Range] { + use self::Utf8Sequence::*; + match *self { + One(ref r) => slice::from_ref(r), + Two(ref r) => &r[..], + Three(ref r) => &r[..], + Four(ref r) => &r[..], + } + } + + /// Returns the number of byte ranges in this sequence. + /// + /// The length is guaranteed to be in the closed interval `[1, 4]`. + pub fn len(&self) -> usize { + self.as_slice().len() + } + + /// Reverses the ranges in this sequence. + /// + /// For example, if this corresponds to the following sequence: + /// + /// ```text + /// [D0-D3][80-BF] + /// ``` + /// + /// Then after reversal, it will be + /// + /// ```text + /// [80-BF][D0-D3] + /// ``` + /// + /// This is useful when one is constructing a UTF-8 automaton to match + /// character classes in reverse. + pub fn reverse(&mut self) { + match *self { + Utf8Sequence::One(_) => {} + Utf8Sequence::Two(ref mut x) => x.reverse(), + Utf8Sequence::Three(ref mut x) => x.reverse(), + Utf8Sequence::Four(ref mut x) => x.reverse(), + } + } + + /// Returns true if and only if a prefix of `bytes` matches this sequence + /// of byte ranges. + pub fn matches(&self, bytes: &[u8]) -> bool { + if bytes.len() < self.len() { + return false; + } + for (&b, r) in bytes.iter().zip(self) { + if !r.matches(b) { + return false; + } + } + true + } +} + +impl<'a> IntoIterator for &'a Utf8Sequence { + type IntoIter = slice::Iter<'a, Utf8Range>; + type Item = &'a Utf8Range; + + fn into_iter(self) -> Self::IntoIter { + self.as_slice().iter() + } +} + +impl fmt::Debug for Utf8Sequence { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use self::Utf8Sequence::*; + match *self { + One(ref r) => write!(f, "{r:?}"), + Two(ref r) => write!(f, "{:?}{:?}", r[0], r[1]), + Three(ref r) => write!(f, "{:?}{:?}{:?}", r[0], r[1], r[2]), + Four(ref r) => { + write!(f, "{:?}{:?}{:?}{:?}", r[0], r[1], r[2], r[3]) + } + } + } +} + +/// A single inclusive range of UTF-8 bytes. +#[derive(Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] +pub struct Utf8Range { + /// Start of byte range (inclusive). + pub start: u8, + /// End of byte range (inclusive). + pub end: u8, +} + +impl Utf8Range { + fn new(start: u8, end: u8) -> Self { + Utf8Range { start, end } + } + + /// Returns true if and only if the given byte is in this range. + pub fn matches(&self, b: u8) -> bool { + self.start <= b && b <= self.end + } +} + +impl fmt::Debug for Utf8Range { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.start == self.end { + write!(f, "[{:X}]", self.start) + } else { + write!(f, "[{:X}-{:X}]", self.start, self.end) + } + } +} + +/// An iterator over ranges of matching UTF-8 byte sequences. +/// +/// The iteration represents an alternation of comprehensive byte sequences +/// that match precisely the set of UTF-8 encoded scalar values. +/// +/// A byte sequence corresponds to one of the scalar values in the range given +/// if and only if it completely matches exactly one of the sequences of byte +/// ranges produced by this iterator. +/// +/// Each sequence of byte ranges matches a unique set of bytes. That is, no two +/// sequences will match the same bytes. +/// +/// # Example +/// +/// This shows how to match an arbitrary byte sequence against a range of +/// scalar values. +/// +/// ```rust +/// use regex_syntax::utf8::{Utf8Sequences, Utf8Sequence}; +/// +/// fn matches(seqs: &[Utf8Sequence], bytes: &[u8]) -> bool { +/// for range in seqs { +/// if range.matches(bytes) { +/// return true; +/// } +/// } +/// false +/// } +/// +/// // Test the basic multilingual plane. +/// let seqs: Vec<_> = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect(); +/// +/// // UTF-8 encoding of 'a'. +/// assert!(matches(&seqs, &[0x61])); +/// // UTF-8 encoding of '☃' (`\u{2603}`). +/// assert!(matches(&seqs, &[0xE2, 0x98, 0x83])); +/// // UTF-8 encoding of `\u{10348}` (outside the BMP). +/// assert!(!matches(&seqs, &[0xF0, 0x90, 0x8D, 0x88])); +/// // Tries to match against a UTF-8 encoding of a surrogate codepoint, +/// // which is invalid UTF-8, and therefore fails, despite the fact that +/// // the corresponding codepoint (0xD800) falls in the range given. +/// assert!(!matches(&seqs, &[0xED, 0xA0, 0x80])); +/// // And fails against plain old invalid UTF-8. +/// assert!(!matches(&seqs, &[0xFF, 0xFF])); +/// ``` +/// +/// If this example seems circuitous, that's because it is! It's meant to be +/// illustrative. In practice, you could just try to decode your byte sequence +/// and compare it with the scalar value range directly. However, this is not +/// always possible (for example, in a byte based automaton). +#[derive(Debug)] +pub struct Utf8Sequences { + range_stack: Vec, +} + +impl Utf8Sequences { + /// Create a new iterator over UTF-8 byte ranges for the scalar value range + /// given. + pub fn new(start: char, end: char) -> Self { + let range = + ScalarRange { start: u32::from(start), end: u32::from(end) }; + Utf8Sequences { range_stack: vec![range] } + } + + /// reset resets the scalar value range. + /// Any existing state is cleared, but resources may be reused. + /// + /// N.B. Benchmarks say that this method is dubious. + #[doc(hidden)] + pub fn reset(&mut self, start: char, end: char) { + self.range_stack.clear(); + self.push(u32::from(start), u32::from(end)); + } + + fn push(&mut self, start: u32, end: u32) { + self.range_stack.push(ScalarRange { start, end }); + } +} + +struct ScalarRange { + start: u32, + end: u32, +} + +impl fmt::Debug for ScalarRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ScalarRange({:X}, {:X})", self.start, self.end) + } +} + +impl Iterator for Utf8Sequences { + type Item = Utf8Sequence; + + fn next(&mut self) -> Option { + 'TOP: while let Some(mut r) = self.range_stack.pop() { + 'INNER: loop { + if let Some((r1, r2)) = r.split() { + self.push(r2.start, r2.end); + r.start = r1.start; + r.end = r1.end; + continue 'INNER; + } + if !r.is_valid() { + continue 'TOP; + } + for i in 1..MAX_UTF8_BYTES { + let max = max_scalar_value(i); + if r.start <= max && max < r.end { + self.push(max + 1, r.end); + r.end = max; + continue 'INNER; + } + } + if let Some(ascii_range) = r.as_ascii() { + return Some(Utf8Sequence::One(ascii_range)); + } + for i in 1..MAX_UTF8_BYTES { + let m = (1 << (6 * i)) - 1; + if (r.start & !m) != (r.end & !m) { + if (r.start & m) != 0 { + self.push((r.start | m) + 1, r.end); + r.end = r.start | m; + continue 'INNER; + } + if (r.end & m) != m { + self.push(r.end & !m, r.end); + r.end = (r.end & !m) - 1; + continue 'INNER; + } + } + } + let mut start = [0; MAX_UTF8_BYTES]; + let mut end = [0; MAX_UTF8_BYTES]; + let n = r.encode(&mut start, &mut end); + return Some(Utf8Sequence::from_encoded_range( + &start[0..n], + &end[0..n], + )); + } + } + None + } +} + +impl FusedIterator for Utf8Sequences {} + +impl ScalarRange { + /// split splits this range if it overlaps with a surrogate codepoint. + /// + /// Either or both ranges may be invalid. + fn split(&self) -> Option<(ScalarRange, ScalarRange)> { + if self.start < 0xE000 && self.end > 0xD7FF { + Some(( + ScalarRange { start: self.start, end: 0xD7FF }, + ScalarRange { start: 0xE000, end: self.end }, + )) + } else { + None + } + } + + /// is_valid returns true if and only if start <= end. + fn is_valid(&self) -> bool { + self.start <= self.end + } + + /// as_ascii returns this range as a Utf8Range if and only if all scalar + /// values in this range can be encoded as a single byte. + fn as_ascii(&self) -> Option { + if self.is_ascii() { + let start = u8::try_from(self.start).unwrap(); + let end = u8::try_from(self.end).unwrap(); + Some(Utf8Range::new(start, end)) + } else { + None + } + } + + /// is_ascii returns true if the range is ASCII only (i.e., takes a single + /// byte to encode any scalar value). + fn is_ascii(&self) -> bool { + self.is_valid() && self.end <= 0x7f + } + + /// encode writes the UTF-8 encoding of the start and end of this range + /// to the corresponding destination slices, and returns the number of + /// bytes written. + /// + /// The slices should have room for at least `MAX_UTF8_BYTES`. + fn encode(&self, start: &mut [u8], end: &mut [u8]) -> usize { + let cs = char::from_u32(self.start).unwrap(); + let ce = char::from_u32(self.end).unwrap(); + let ss = cs.encode_utf8(start); + let se = ce.encode_utf8(end); + assert_eq!(ss.len(), se.len()); + ss.len() + } +} + +fn max_scalar_value(nbytes: usize) -> u32 { + match nbytes { + 1 => 0x007F, + 2 => 0x07FF, + 3 => 0xFFFF, + 4 => 0x0010_FFFF, + _ => unreachable!("invalid UTF-8 byte sequence size"), + } +} + +#[cfg(test)] +mod tests { + use core::char; + + use alloc::{vec, vec::Vec}; + + use crate::utf8::{Utf8Range, Utf8Sequences}; + + fn rutf8(s: u8, e: u8) -> Utf8Range { + Utf8Range::new(s, e) + } + + fn never_accepts_surrogate_codepoints(start: char, end: char) { + for cp in 0xD800..0xE000 { + let buf = encode_surrogate(cp); + for r in Utf8Sequences::new(start, end) { + if r.matches(&buf) { + panic!( + "Sequence ({:X}, {:X}) contains range {:?}, \ + which matches surrogate code point {:X} \ + with encoded bytes {:?}", + u32::from(start), + u32::from(end), + r, + cp, + buf, + ); + } + } + } + } + + #[test] + fn codepoints_no_surrogates() { + never_accepts_surrogate_codepoints('\u{0}', '\u{FFFF}'); + never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFF}'); + never_accepts_surrogate_codepoints('\u{0}', '\u{10FFFE}'); + never_accepts_surrogate_codepoints('\u{80}', '\u{10FFFF}'); + never_accepts_surrogate_codepoints('\u{D7FF}', '\u{E000}'); + } + + #[test] + fn single_codepoint_one_sequence() { + // Tests that every range of scalar values that contains a single + // scalar value is recognized by one sequence of byte ranges. + for i in 0x0..=0x0010_FFFF { + let c = match char::from_u32(i) { + None => continue, + Some(c) => c, + }; + let seqs: Vec<_> = Utf8Sequences::new(c, c).collect(); + assert_eq!(seqs.len(), 1); + } + } + + #[test] + fn bmp() { + use crate::utf8::Utf8Sequence::*; + + let seqs = Utf8Sequences::new('\u{0}', '\u{FFFF}').collect::>(); + assert_eq!( + seqs, + vec![ + One(rutf8(0x0, 0x7F)), + Two([rutf8(0xC2, 0xDF), rutf8(0x80, 0xBF)]), + Three([ + rutf8(0xE0, 0xE0), + rutf8(0xA0, 0xBF), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xE1, 0xEC), + rutf8(0x80, 0xBF), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xED, 0xED), + rutf8(0x80, 0x9F), + rutf8(0x80, 0xBF) + ]), + Three([ + rutf8(0xEE, 0xEF), + rutf8(0x80, 0xBF), + rutf8(0x80, 0xBF) + ]), + ] + ); + } + + #[test] + fn reverse() { + use crate::utf8::Utf8Sequence::*; + + let mut s = One(rutf8(0xA, 0xB)); + s.reverse(); + assert_eq!(s.as_slice(), &[rutf8(0xA, 0xB)]); + + let mut s = Two([rutf8(0xA, 0xB), rutf8(0xB, 0xC)]); + s.reverse(); + assert_eq!(s.as_slice(), &[rutf8(0xB, 0xC), rutf8(0xA, 0xB)]); + + let mut s = Three([rutf8(0xA, 0xB), rutf8(0xB, 0xC), rutf8(0xC, 0xD)]); + s.reverse(); + assert_eq!( + s.as_slice(), + &[rutf8(0xC, 0xD), rutf8(0xB, 0xC), rutf8(0xA, 0xB)] + ); + + let mut s = Four([ + rutf8(0xA, 0xB), + rutf8(0xB, 0xC), + rutf8(0xC, 0xD), + rutf8(0xD, 0xE), + ]); + s.reverse(); + assert_eq!( + s.as_slice(), + &[ + rutf8(0xD, 0xE), + rutf8(0xC, 0xD), + rutf8(0xB, 0xC), + rutf8(0xA, 0xB) + ] + ); + } + + fn encode_surrogate(cp: u32) -> [u8; 3] { + const TAG_CONT: u8 = 0b1000_0000; + const TAG_THREE_B: u8 = 0b1110_0000; + + assert!(0xD800 <= cp && cp < 0xE000); + let mut dst = [0; 3]; + dst[0] = u8::try_from(cp >> 12 & 0x0F).unwrap() | TAG_THREE_B; + dst[1] = u8::try_from(cp >> 6 & 0x3F).unwrap() | TAG_CONT; + dst[2] = u8::try_from(cp & 0x3F).unwrap() | TAG_CONT; + dst + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..35e932d307c3f21ac86dd426b00501324cd8445f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "38327261924aa71059a4cdeeea2286ec88bd9146" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CHANGES.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CHANGES.md new file mode 100644 index 0000000000000000000000000000000000000000..a13b83606c08f9277652ef3d84546f3c4cc8c576 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CHANGES.md @@ -0,0 +1,391 @@ +# Changes from 0.38.x to 1.x + +## Silent behavior changes + +[`rustix::pipe::fcntl_setpipe_size`] now returns the new size, which may be +greater than the requested size. + +[`rustix::pipe::fcntl_setpipe_size`]: https://docs.rs/rustix/1/rustix/pipe/fn.fcntl_setpipe_size.html + +When a `&mut Vec<_>` is passed to [`rustix::event::epoll::wait`], +[`rustix::event::kqueue::kevent`], or [`rustix::event::port::getn`], these +functions previously adjusted the length of the `Vec` to the number of elements +written, and now do not. A common alternative is to wrap the `&mut Vec<_>` +using [`spare_capacity`], and then to clear the `Vec` by iterating using +`.drain(..)` after each call. For an example of using `spare_capacity` in this +way, see [here]. + +[`rustix::event::epoll::wait`]: https://docs.rs/rustix/1/rustix/event/epoll/fn.wait.html +[`rustix::event::kqueue::kevent`]: https://docs.rs/rustix/1/x86_64-unknown-freebsd/rustix/event/kqueue/fn.kevent.html +[`rustix::event::port::getn`]: https://docs.rs/rustix/1/x86_64-unknown-illumos/rustix/event/port/fn.getn.html +[`spare_capacity`]: https://docs.rs/rustix/1/rustix/buffer/fn.spare_capacity.html +[here]: https://docs.rs/rustix/1/rustix/event/epoll/index.html#examples + +## API changes + +`rustix::thread::FutexOperation` and `rustix::thread::futex` are removed. Use +the functions in the [`rustix::thread::futex`] module instead. + +[`rustix::thread::futex`]: https://docs.rs/rustix/1/rustix/thread/futex/index.html + +[`rustix::process::waitpid`]'s return type changed from `WaitStatus` to +`(Pid, WaitStatus)`, to additionally return the pid of the child. + +[`rustix::process::waitpid`]: https://docs.rs/rustix/1/rustix/process/fn.waitpid.html + +[`terminating_signal`] and other functions in [`rustix::process::WaitStatus`] changed +from returning `u32` to returning `i32`, for better compatibility with the new +[`Signal`] type and [`exit`]. + +[`terminating_signal`]: https://docs.rs/rustix/1/rustix/process/struct.WaitStatus.html#method.terminating_signal +[`rustix::process::WaitStatus`]: https://docs.rs/rustix/1/rustix/process/struct.WaitStatus.html +[`Signal`]: https://docs.rs/rustix/1/rustix/process/struct.Signal.html +[`exit`]: std::process::exit + +The `SLAVE` flag in [`rustix::mount::MountPropagationFlags`] is renamed to +[`DOWNSTREAM`]. + +[`rustix::mount::MountPropagationFlags`]: https://docs.rs/rustix/1/rustix/mount/struct.MountPropagationFlags.html +[`DOWNSTREAM`]: https://docs.rs/rustix/1/rustix/mount/struct.MountPropagationFlags.html#associatedconstant.DOWNSTREAM + +The "cc" and "libc-extra-traits" features are removed. The "cc" feature hasn't +had any effect for several major releases. If you need the traits provided by +"libc-extra-traits", you should instead depend on libc directly and enable its +"extra_traits" feature. + +`rustix::net::Shutdown::ReadWrite` is renamed to +[`rustix::net::Shutdown::Both`] to [align with std]. + +[`rustix::net::Shutdown::Both`]: https://docs.rs/rustix/1/rustix/net/enum.Shutdown.html#variant.Both +[align with std]: https://doc.rust-lang.org/stable/std/net/enum.Shutdown.html#variant.Both + +The `rustix::io_uring::io_uring_register_files_skip` function is replaced with +a [`IORING_REGISTER_FILES_SKIP`] constant, similar to the [`rustix::fs::CWD`] +constant. + +[`IORING_REGISTER_FILES_SKIP`]: https://docs.rs/rustix/1/rustix/io_uring/constant.IORING_REGISTER_FILES_SKIP.html +[`rustix::fs::CWD`]: https://docs.rs/rustix/1/rustix/fs/constant.CWD.html + +Several structs in [`rustix::io_uring`] are now marked `#[non_exhaustive]` +because they contain padding or reserved fields. Instead of constructing +them with field values and `..Default::default()`, construct them with +`Default::default()` and separately assign the fields. + +[`rustix::io_uring`]: https://docs.rs/rustix/1/rustix/io_uring/index.html + +[`rustix::process::Resource`], [`rustix::thread::MembarrierCommand`], and +[`rustix::thread::Capability`] are now marked `#[non_exhaustive]` to ease +migration in case new constants are defined in the future. + +[`rustix::process::Resource`]: https://docs.rs/rustix/1/rustix/process/enum.Resource.html +[`rustix::thread::MembarrierCommand`]: https://docs.rs/rustix/1/rustix/thread/enum.MembarrierCommand.html +[`rustix::thread::Capability`]: https://docs.rs/rustix/1/rustix/thread/enum.Capability.html + +`rustix::process::WaitidOptions` and `rustix::process::WaitidStatus` are +renamed to +[`rustix::process::WaitIdOptions`] and [`rustix::process::WaitIdStatus`] (note +the capitalization), for consistency with [`rustix::process::WaitId`]. + +[`rustix::process::WaitIdOptions`]: https://docs.rs/rustix/1/rustix/process/struct.WaitIdOptions.html +[`rustix::process::WaitIdStatus`]: https://docs.rs/rustix/1/rustix/process/struct.WaitIdStatus.html +[`rustix::process::WaitId`]: https://docs.rs/rustix/1/rustix/process/enum.WaitId.html + +The offsets in [`rustix::fs::SeekFrom::Hole`] and +[`rustix::fs::SeekFrom::Data`] are changed from `i64` to `u64`, to +[align with std], since they represent absolute offsets. + +[`rustix::fs::SeekFrom::Hole`]: https://docs.rs/rustix/1/rustix/fs/enum.SeekFrom.html#variant.Hole +[`rustix::fs::SeekFrom::Data`]: https://docs.rs/rustix/1/rustix/fs/enum.SeekFrom.html#variant.Data +[align with std]: https://doc.rust-lang.org/stable/std/io/enum.SeekFrom.html#variant.Start + +Functions in [`rustix::net::sockopt`] are renamed to remove the `get_` prefix, +to [align with Rust conventions]. + +[`rustix::net::sockopt`]: https://docs.rs/rustix/1/rustix/net/sockopt/index.html +[align with Rust conventions]: https://rust-lang.github.io/api-guidelines/naming.html#getter-names-follow-rust-convention-c-getter + +`rustix::process::sched_*` and `rustix::process::membarrier_*` are moved from +[`rustix::process`] to [`rustix::thread`], as they operate on the current +thread rather than the current process. + +[`rustix::process`]: https://docs.rs/rustix/1/rustix/process/index.html +[`rustix::thread`]: https://docs.rs/rustix/1/rustix/thread/index.html + +The `udata` in [`rustix::event::kqueue::Event`] is changed from `isize` to +`*mut c_void` to better propagate pointer provenance. To use arbitrary integer +values, convert using the [`without_provenance_mut`] and the [`.addr()`] +functions. + +[`rustix::event::kqueue::Event`]: https://docs.rs/rustix/1/x86_64-unknown-freebsd/rustix/event/kqueue/struct.Event.html +[`without_provenance_mut`]: https://doc.rust-lang.org/stable/std/ptr/fn.without_provenance_mut.html +[`.addr()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.addr + +`rustix::mount::mount_recursive_bind` is renamed to +[`rustix::mount::mount_bind_recursive`]. See [this comment] for details. + +[`rustix::mount::mount_bind_recursive`]: https://docs.rs/rustix/1/rustix/mount/fn.mount_bind_recursive.html +[this comment]: https://github.com/bytecodealliance/rustix/pull/763#issuecomment-1662756184 + +The `rustix::procfs` is removed. This functionality is now available in the +[rustix-linux-procfs crate]. + +[rustix-linux-procfs crate]: https://crates.io/crates/rustix-linux-procfs + +`rustix::net::RecvMsgReturn` is renamed to [`rustix::net::RecvMsg`]. + +[`rustix::net::RecvMsg`]: https://docs.rs/rustix/1/rustix/net/struct.RecvMsg.html + +The `flags` field of [`rustix::net::RecvMsg`] changed type from [`RecvFlags`] +to a new [`ReturnFlags`], since it supports a different set of flags. + +[`rustix::net::RecvMsg`]: https://docs.rs/rustix/1/rustix/net/struct.RecvMsg.html +[`RecvFlags`]: https://docs.rs/rustix/1/rustix/net/struct.RecvFlags.html +[`ReturnFlags`]: https://docs.rs/rustix/1/rustix/net/struct.ReturnFlags.html + +[`rustix::event::poll`]'s and [`rustix::event::epoll`]'s `timeout` argument +changed from a `c_int` where `-1` means no timeout and non-negative numbers +mean a timeout in milliseconds to an `Option<&Timespec>`. The [`Timespec`]'s +fields are `tv_sec` which holds seconds and `tv_nsec` which holds nanoseconds. + +[`rustix::event::poll`]: https://docs.rs/rustix/1/rustix/event/fn.poll.html +[`rustix::event::epoll`]: https://docs.rs/rustix/1/rustix/event/epoll/index.html +[`Timespec`]: https://docs.rs/rustix/1/rustix/time/struct.Timespec.html + +The timeout argument in [`rustix::thread::futex::wait`], +[`rustix::thread::futex::lock_pi`], [`rustix::thread::futex::wait_bitset`], +[`rustix::thread::futex::wait_requeue_pi`], and +[`rustix::thread::futex::lock_pi2`] changed from `Option` to +`Option<&Timespec>`, for consistency with the rest of rustix's API, and for +low-level efficiency, as it means the implementation doesn't need to make a +copy of the `Timespec` to take its address. An easy way to convert an +`Option` to an `Option<&Timespec> is to use [`Option::as_ref`]. + +[`rustix::thread::futex::wait`]: https://docs.rs/rustix/1/rustix/thread/futex/fn.wait.html +[`rustix::thread::futex::lock_pi`]: https://docs.rs/rustix/1/rustix/thread/futex/fn.lock_pi.html +[`rustix::thread::futex::wait_bitset`]: https://docs.rs/rustix/1/rustix/thread/futex/fn.wait_bitset.html +[`rustix::thread::futex::wait_requeue_pi`]: https://docs.rs/rustix/1/rustix/thread/futex/fn.wait_requeue_pi.html +[`rustix::thread::futex::lock_pi2`]: https://docs.rs/rustix/1/rustix/thread/futex/fn.lock_pi2.html +[`Option::as_ref`]: https://doc.rust-lang.org/stable/std/option/enum.Option.html#method.as_ref + +Functions in [`rustix::event::port`] are renamed to remove the redundant +`port_*` prefix. + +[`rustix::event::port`]: https://docs.rs/rustix/1/x86_64-unknown-illumos/rustix/event/port/index.html + +`rustix::fs::inotify::InotifyEvent` is renamed to +[`rustix::fs::inotify::Event`] to remove the redundant prefix. + +[`rustix::fs::inotify::Event`]: https://docs.rs/rustix/1/rustix/fs/inotify/struct.Event.html + +`rustix::fs::StatExt` is removed, and the timestamp fields `st_atime`, +`st_mtime`, and `st_ctime` of [`rustix::fs::Stat`] may now be accessed +directly. They are now signed instead of unsigned, so that they can represent +times before the epoch. + +[`rustix::fs::Stat`]: https://docs.rs/rustix/1/rustix/fs/struct.Stat.html + +`rustix::io::is_read_write` is removed, as it's higher-level functionality that +can be implemented in terms of lower-level rustix calls. + +[`rustix::net::recv`] and [`rustix::net::recvfrom`] now include +the number of received bytes in their return types, as this number may differ +from the number of bytes written to the buffer when +[`rustix::net::RecvFlags::TRUNC`] is used. + +[`rustix::net::recv`]: https://docs.rs/rustix/1/rustix/net/fn.recv.html +[`rustix::net::recvfrom`]: https://docs.rs/rustix/1/rustix/net/fn.recvfrom.html +[`rustix::net::RecvFlags::TRUNC`]: https://docs.rs/rustix/1/rustix/net/struct.RecvFlags.html#associatedconstant.TRUNC + +[`rustix::process::Signal`] constants are now upper-cased; for example, +`Signal::Int` is now named [`Signal::INT`]. Also, `Signal` is no longer +directly convertible to `i32`; use [`Signal::as_raw`] instead. + +[`rustix::process::Signal`]: https://docs.rs/rustix/1/rustix/process/struct.Signal.html +[`Signal::INT`]: https://docs.rs/rustix/1/rustix/process/struct.Signal.html#variant.Int +[`Signal::as_raw`]: https://docs.rs/rustix/1/rustix/process/struct.Signal.html#method.as_raw + +`Signal::from_raw` is renamed to [`Signal::from_named_raw`]. + +[`Signal::from_named_raw`]: https://docs.rs/rustix/1/rustix/process/struct.Signal.html#method.from_named_raw + +The associated constant `rustix::ioctl::Ioctl::OPCODE` is now replaced with an +associated method [`rustix::ioctl::Ioctl::opcode`], to support ioctls where the +opcode is computed rather than a constant. + +[`rustix::ioctl::Ioctl::opcode`]: https://docs.rs/rustix/1/rustix/ioctl/trait.Ioctl.html#tymethod.opcode + +The `ifindex` argument in +[`rustix::net::sockopt::set_ip_add_membership_with_ifindex`] and +[`rustix::net::sockopt::set_ip_drop_membership_with_ifindex`] +changed from `i32` to `u32`. + +[`rustix::net::sockopt::set_ip_add_membership_with_ifindex`]: https://docs.rs/rustix/1/rustix/net/sockopt/fn.set_ip_add_membership_with_ifindex.html +[`rustix::net::sockopt::set_ip_drop_membership_with_ifindex`]: https://docs.rs/rustix/1/rustix/net/sockopt/fn.set_ip_drop_membership_with_ifindex.html + +The `list` argument in [`rustix::fs::listxattr`], [`rustix::fs::flistxattr`], +and [`rustix::fs::llistxattr`] changed from `[c_char]`, which is `[i8]` on some +architectures, to `[u8]`. + +[`rustix::fs::listxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.listxattr.html +[`rustix::fs::flistxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.flistxattr.html +[`rustix::fs::llistxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.llistxattr.html + +On NetBSD, the nanoseconds fields of [`Stat`] have been renamed, for consistency +with other platforms: + +| Old name | New Name | +| -------------- | --------------- | +| `st_atimensec` | `st_atime_nsec` | +| `st_mtimensec` | `st_mtime_nsec` | +| `st_ctimensec` | `st_ctime_nsec` | +| `st_birthtimensec` | `st_birthtime_nsec` | + +[`Stat`]: https://docs.rs/rustix/1/x86_64-unknown-netbsd/rustix/fs/struct.Stat.html + +[`rustix::mount::mount`]'s `data` argument is now an `Option`, so it can now +be used in place of `mount2`, and `mount2` is now removed. + +[`rustix::mount::mount`]: https://docs.rs/rustix/1/rustix/mount/fn.mount.html + +The [`rustix::net`] functions ending with `_v4`, `_v6`, `_unix` and `_xdp` have +been merged into a single function that accepts any address type. + +Specifically, the following functions are removed: + + * `bind_any`, `bind_unix`, `bind_v4`, `bind_v6`, `bind_xdp` in favor of + [`bind`], + * `connect_any`, `connect_unix`, `connect_v4`, `connect_v6` in favor of + [`connect`] (leaving address-less [`connect_unspec`]), + * `sendmsg_v4`, `sendmsg_v6`, `sendmsg_unix`, `sendmsg_xdp`, `sendmsg_any` in + favor of [`sendmsg_addr`] (leaving address-less [`sendmsg`]), + * `sendto_any`, `sendto_v4`, `sendto_v6`, `sendto_unix`, `sendto_xdp` in + favor of [`sendto`]. + +[`rustix::net`]: https://docs.rs/rustix/1/rustix/net/index.html +[`bind`]: https://docs.rs/rustix/1/rustix/net/fn.bind.html +[`connect`]: https://docs.rs/rustix/1/rustix/net/fn.connect.html +[`connect_unspec`]: https://docs.rs/rustix/1/rustix/net/fn.connect_unspec.html +[`sendmsg_addr`]: https://docs.rs/rustix/1/rustix/net/fn.sendmsg_addr.html +[`sendmsg`]: https://docs.rs/rustix/1/rustix/net/fn.sendmsg.html +[`sendto`]: https://docs.rs/rustix/1/rustix/net/fn.sendto.html + +The `SocketAddrAny` enum has changed to a [`SocketAddrAny`] struct which can +contain any kind of socket address. It can be converted to and from the more +specific socket types using `From`/`Into`/`TryFrom`/`TryInto` conversions. + +[`SocketAddrAny`]: https://docs.rs/rustix/1/rustix/net/struct.SocketAddrAny.html + +The `len` parameter to [`rustix::fs::fadvise`] has changed from `u64` to +`Option`, to reflect that zero is a special case meaning the +advice applies to the end of the file. To convert an arbitrary `u64` value to +`Option`, use `NonZeroU64::new`. + +[`rustix::fs::fadvise`]: https://docs.rs/rustix/1/rustix/fs/fn.fadvise.html + +[`rustix::io_uring::io_uring_enter`] no longer has `arg` and `size` arguments +providing a raw `*mut c_void` and `usize` describing the argument value. To +pass argumentts, there are now additional functions, `io_uring_enter_sigmask`, +and `io_uring_enter_arg`, which take a [`KernelSigSet`] or an +`io_uring_getevents_arg`, respectively. These are more ergonomic, and provide +a better path to adding `IORING_ENTER_EXT_ARG_REG` support in the future. + +[`rustix::io_uring::io_uring_enter`]: https://docs.rs/rustix/1/rustix/io_uring/fn.io_uring_enter.html +[`KernelSigSet`]: https://docs.rs/rustix/1/rustix/io_uring/struct.KernelSigSet.html + +The [`sigmask`] and [`ts`] fields of [`rustix::io_uring::getevents_arg`] +changed from `u64` to [`rustix::io_uring::io_uring_ptr`], to better preserve +pointer provenance. + +[`sigmask`]: https://docs.rs/rustix/1/rustix/io_uring/struct.io_uring_getevents_arg.html#structfield.sigmask +[`ts`]: https://docs.rs/rustix/1/rustix/io_uring/struct.io_uring_getevents_arg.html#structfield.ts +[`rustix::io_uring::getevents_arg`]: https://docs.rs/rustix/1/rustix/io_uring/struct.io_uring_getevents_arg.html +[`rustix::io_uring::io_uring_ptr`]: https://docs.rs/rustix/1/rustix/io_uring/struct.io_uring_ptr.html + +The aliases for [`fcntl_dupfd_cloexec`], [`fcntl_getfd`], and [`fcntl_setfd`] +in `rustix::fs` are removed; these functions are just available in +[`rustix::io`] now. + +[`fcntl_dupfd_cloexec`]: https://docs.rs/rustix/1/rustix/io/fn.fcntl_dupfd_cloexec.html +[`fcntl_getfd`]: https://docs.rs/rustix/1/rustix/io/fn.fcntl_getfd.html +[`fcntl_setfd`]: https://docs.rs/rustix/1/rustix/io/fn.fcntl_setfd.html +[`rustix::io`]: https://docs.rs/rustix/1/rustix/io/index.html + +[`SocketAddrXdp`] no longer has a shared UMEM field. A new +[`SocketAddrXdpWithSharedUmem`] is added for the purpose of calling `bind` and +passing it an XDP address with a shared UMEM fd. And `SockaddrXdpFlags` is +renamed to [`SocketAddrXdpFlags`]. + +[`SocketAddrXdp`]: https://docs.rs/rustix/1/rustix/net/xdp/struct.SocketAddrXdp.html +[`SocketAddrXdpWithSharedUmem`]: https://docs.rs/rustix/1/rustix/net/xdp/struct.SocketAddrXdpWithSharedUmem.html +[`SocketAddrXdpFlags`]: https://docs.rs/rustix/1/rustix/net/xdp/struct.SocketAddrXdpFlags.html + +[`rustix::io_uring::io_uring_setup`] is now unsafe, due its `io_uring_params` +argument optionally containing a raw file descriptor. + +[`rustix::io_uring::io_uring_setup`]: https://docs.rs/rustix/1/rustix/io_uring/fn.io_uring_setup.html + +The buffer for [`SendAncillaryBuffer`] and [`RecvAncillaryBuffer`] is now +a `[MaybeUninit]` instead of a `[u8]`. + +[`SendAncillaryBuffer`]: https://docs.rs/rustix/1/rustix/net/struct.SendAncillaryBuffer.html +[`RecvAncillaryBuffer`]: https://docs.rs/rustix/1/rustix/net/struct.RecvAncillaryBuffer.html + +[`read`], [`pread`], [`recv`], [`recvfrom`], [`getrandom`], [`readlinkat_raw`], +[`epoll::wait`], [`kevent`], [`port::getn`], [`getxattr`], [`lgetxattr`], +[`fgetxattr`], [`listxattr`], [`llistxattr`], and [`flistxattr`] now use the +new [`Buffer` trait]. + +This replaces `read_uninit`, `pread_uninit`, `recv_uninit`, `recvfrom_uninit`, +and `getrandom_uninit`, as the `Buffer` trait supports reading into +uninitialized slices. + +`epoll::wait`, `kevent`, and `port::getn` previously took a `Vec` which they +implicitly cleared before results were appended. When passing a `Vec` to +`epoll::wait`, `kevent`, or `port::getn` using [`spare_capacity`], the `Vec` is +not cleared first. Consider clearing the vector before calling `epoll::wait`, +`kevent`, or `port::getn`, or consuming it using `.drain(..)` before reusing it. + +[`read`]: https://docs.rs/rustix/1/rustix/io/fn.read.html +[`pread`]: https://docs.rs/rustix/1/rustix/io/fn.pread.html +[`recv`]: https://docs.rs/rustix/1/rustix/net/fn.recv.html +[`recvfrom`]: https://docs.rs/rustix/1/rustix/net/fn.recvfrom.html +[`getrandom`]: https://docs.rs/rustix/1/rustix/rand/fn.getrandom.html +[`readlinkat_raw`]: https://docs.rs/rustix/1/rustix/fs/fn.readlinkat_raw.html +[`epoll::wait`]: https://docs.rs/rustix/1/rustix/event/epoll/fn.wait.html +[`getxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.getxattr.html +[`lgetxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.lgetxattr.html +[`fgetxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.fgetxattr.html +[`listxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.listxattr.html +[`llistxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.llistxattr.html +[`flistxattr`]: https://docs.rs/rustix/1/rustix/fs/fn.flistxattr.html +[`kevent`]: https://docs.rs/rustix/1/x86_64-unknown-freebsd/rustix/event/kqueue/fn.kevent.html +[`port::getn`]: https://docs.rs/rustix/1/x86_64-unknown-illumos/rustix/event/port/fn.getn.html +[`Buffer` trait]: https://docs.rs/rustix/1/rustix/buffer/trait.Buffer.html +[`spare_capacity`]: https://docs.rs/rustix/1/rustix/buffer/fn.spare_capacity.html + +The [`rustix::ioctl::Opcode`] type has changed from a struct to a raw integer +value, and the associated utilities are change to `const` functions. In place +of `ReadOpcode`, `WriteOpcode`, `ReadWriteOpcode`, and `NoneOpcode`, use the +`read`, `write`, `read_write`, and `none` const functions in the +[`ioctl::opcode`] module. For example, in place of this: +```rust +ioctl::Setter::, c_uint>::new(interface) +``` +use this: +```rust ++ ioctl::Setter::<{ ioctl::opcode::read::(b'U', 15) }, c_uint>::new(interface) +``` +. + +In place of `BadOpcode`, use the opcode value directly. + +[`rustix::ioctl::Opcode`]: https://docs.rs/rustix/1/rustix/ioctl/type.Opcode.html +[`ioctl::opcode`]: https://docs.rs/rustix/1/rustix/ioctl/opcode/index.html + +[`rustix::event::port::getn`]'s `min_events` argument is now a `u32`, to +reflect the type in the underlying system API. + +[`rustix::event::port::getn`]: https://docs.rs/rustix/1/x86_64-unknown-illumos/rustix/event/port/fn.getn.html + +All explicitly deprecated functions and types have been removed. Their +deprecation messages will have identified alternatives. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..32ff0288e8e31cec4bd5a9df8fecd3e8fbb35090 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CODE_OF_CONDUCT.md @@ -0,0 +1,49 @@ +# Contributor Covenant Code of Conduct + +*Note*: this Code of Conduct pertains to individuals' behavior. Please also see the [Organizational Code of Conduct][OCoC]. + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the Bytecode Alliance CoC team at [report@bytecodealliance.org](mailto:report@bytecodealliance.org). The CoC team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The CoC team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the Bytecode Alliance's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[OCoC]: https://github.com/bytecodealliance/rustix/blob/main/ORG_CODE_OF_CONDUCT.md +[homepage]: https://www.contributor-covenant.org +[version]: https://www.contributor-covenant.org/version/1/4/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CONTRIBUTING.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..8e9a64fc77e6d7960299d026d4d2ab30a87fa40d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to rustix + +Rustix is a [Bytecode Alliance] project. It follows the Bytecode Alliance's +[Code of Conduct] and [Organizational Code of Conduct]. + +## Testing + +To keep compile times low, most features in rustix's API are behind cargo +features. A special feature, `all-apis` enables all APIs, which is useful +for testing. + +```console +cargo test --features=all-apis +``` + +And, rustix has two backends, linux_raw and libc, and only one is used in +any given build. To test on Linux with the libc backend explicitly, +additionally enable the `use-libc` feature: + +```console +cargo test --features=all-apis,use-libc +``` + +Beyond that, rustix's CI tests many targets and configurations. Asking for +help is always welcome, and it's especially encouraged when the issue is +getting all the `cfg`s lined up to get everything compiling on all the +configurations on CI. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/COPYRIGHT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/COPYRIGHT new file mode 100644 index 0000000000000000000000000000000000000000..a841525eff99fff45fe0e6fd36b95f11b8b82e4c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/COPYRIGHT @@ -0,0 +1,29 @@ +Short version for non-lawyers: + +`rustix` is triple-licensed under Apache 2.0 with the LLVM Exception, +Apache 2.0, and MIT terms. + + +Longer version: + +Copyrights in the `rustix` project are retained by their contributors. +No copyright assignment is required to contribute to the `rustix` +project. + +Some files include code derived from Rust's `libstd`; see the comments in +the code for details. + +Except as otherwise noted (below and/or in individual files), `rustix` +is licensed under: + + - the Apache License, Version 2.0, with the LLVM Exception + or + + - the Apache License, Version 2.0 + or + , + - or the MIT license + or + , + +at your option. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..0214a7e0f5d7693340c06054c617c580976f4d01 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.lock @@ -0,0 +1,995 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +dependencies = [ + "compiler_builtins", + "rustc-std-workspace-core", +] + +[[package]] +name = "bumpalo" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "bitflags 1.3.2", + "clap_lex", + "indexmap", + "textwrap", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "compiler_builtins" +version = "0.1.160" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6376049cfa92c0aa8b9ac95fae22184b981c658208d4ed8a1dc553cd83612895" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "criterion" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +dependencies = [ + "anes", + "atty", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +dependencies = [ + "rustc-std-workspace-core", +] + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "rustc-std-workspace-alloc" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d441c3b2ebf55cebf796bfdc265d67fa09db17b7bb6bd4be75c509e1e8fec3" + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9c45b374136f52f2d6311062c7146bff20fec063c3f5d46a410bd937746955" + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustix" +version = "1.1.3" +dependencies = [ + "bitflags 2.9.1", + "criterion", + "errno", + "flate2", + "libc", + "linux-raw-sys", + "memoffset", + "once_cell", + "rustc-std-workspace-alloc", + "rustc-std-workspace-core", + "serial_test", + "static_assertions", + "tempfile", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.146" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom", + "once_cell", + "rustix 1.1.2", + "windows-sys", +] + +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "zerocopy" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..87f6607f61e5dc6990c0ad65668ce43f712177b0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml @@ -0,0 +1,292 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "rustix" +version = "1.1.3" +authors = [ + "Dan Gohman ", + "Jakub Konka ", +] +build = "build.rs" +include = [ + "src", + "build.rs", + "Cargo.toml", + "COPYRIGHT", + "LICENSE*", + "/*.md", + "benches", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Safe Rust bindings to POSIX/Unix/Linux/Winsock-like syscalls" +documentation = "https://docs.rs/rustix" +readme = "README.md" +keywords = [ + "api", + "file", + "network", + "safe", + "syscall", +] +categories = [ + "os::unix-apis", + "date-and-time", + "filesystem", + "network-programming", +] +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +repository = "https://github.com/bytecodealliance/rustix" + +[package.metadata.docs.rs] +features = ["all-apis"] +targets = [ + "x86_64-unknown-linux-gnu", + "i686-unknown-linux-gnu", + "x86_64-apple-darwin", + "x86_64-pc-windows-msvc", + "x86_64-unknown-freebsd", + "x86_64-unknown-openbsd", + "x86_64-unknown-netbsd", + "x86_64-unknown-dragonfly", + "x86_64-unknown-illumos", + "x86_64-unknown-redox", + "x86_64-unknown-haiku", + "wasm32-unknown-emscripten", + "wasm32-wasip1", +] + +[features] +all-apis = [ + "event", + "fs", + "io_uring", + "mm", + "mount", + "net", + "param", + "pipe", + "process", + "pty", + "rand", + "runtime", + "shm", + "stdio", + "system", + "termios", + "thread", + "time", +] +alloc = [] +default = ["std"] +event = [] +fs = [] +io_uring = [ + "event", + "fs", + "net", + "thread", + "linux-raw-sys/io_uring", +] +linux_4_11 = [] +linux_5_1 = ["linux_4_11"] +linux_5_11 = ["linux_5_1"] +linux_latest = ["linux_5_11"] +mm = [] +mount = [] +net = [ + "linux-raw-sys/net", + "linux-raw-sys/netlink", + "linux-raw-sys/if_ether", + "linux-raw-sys/xdp", +] +param = [] +pipe = [] +process = ["linux-raw-sys/prctl"] +pty = ["fs"] +rand = [] +runtime = ["linux-raw-sys/prctl"] +rustc-dep-of-std = [ + "core", + "rustc-std-workspace-alloc", + "linux-raw-sys/rustc-dep-of-std", + "bitflags/rustc-dep-of-std", +] +shm = ["fs"] +std = [ + "bitflags/std", + "alloc", + "libc?/std", + "libc_errno?/std", +] +stdio = [] +system = ["linux-raw-sys/system"] +termios = [] +thread = ["linux-raw-sys/prctl"] +time = [] +try_close = [] +use-explicitly-provided-auxv = [] +use-libc = [ + "libc_errno", + "libc", +] +use-libc-auxv = [] + +[lib] +name = "rustix" +path = "src/lib.rs" + +[[bench]] +name = "mod" +path = "benches/mod.rs" +harness = false + +[dependencies.bitflags] +version = "2.4.0" +default-features = false + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.rustc-std-workspace-alloc] +version = "1.0.0" +optional = true + +[dev-dependencies.flate2] +version = "1.0" + +[dev-dependencies.libc] +version = "0.2.171" + +[dev-dependencies.libc_errno] +version = "0.3.10" +default-features = false +package = "errno" + +[dev-dependencies.memoffset] +version = "0.9.0" + +[dev-dependencies.serial_test] +version = "2.0.0" + +[dev-dependencies.static_assertions] +version = "1.1.0" + +[dev-dependencies.tempfile] +version = "3.5.0" + +[target.'cfg(all(any(target_os = "linux", target_os = "android"), any(rustix_use_libc, miri, not(all(target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64")))))))'.dependencies.linux-raw-sys] +version = "0.11.0" +features = [ + "general", + "ioctl", + "no_std", +] +default-features = false + +[target.'cfg(all(criterion, not(any(target_os = "emscripten", target_os = "wasi"))))'.dev-dependencies.criterion] +version = "0.4" + +[target.'cfg(all(not(rustix_use_libc), not(miri), target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64"))))'.dependencies.libc] +version = "0.2.177" +optional = true +default-features = false + +[target.'cfg(all(not(rustix_use_libc), not(miri), target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64"))))'.dependencies.libc_errno] +version = "0.3.10" +optional = true +default-features = false +package = "errno" + +[target.'cfg(all(not(rustix_use_libc), not(miri), target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64"))))'.dependencies.linux-raw-sys] +version = "0.11.0" +features = [ + "auxvec", + "general", + "errno", + "ioctl", + "no_std", + "elf", +] +default-features = false + +[target.'cfg(all(not(windows), any(rustix_use_libc, miri, not(all(target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64")))))))'.dependencies.libc] +version = "0.2.177" +default-features = false + +[target.'cfg(all(not(windows), any(rustix_use_libc, miri, not(all(target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64")))))))'.dependencies.libc_errno] +version = "0.3.10" +default-features = false +package = "errno" + +[target."cfg(windows)".dependencies.libc_errno] +version = "0.3.10" +default-features = false +package = "errno" + +[target."cfg(windows)".dependencies.windows-sys] +version = ">=0.52, <0.62" +features = [ + "Win32_Foundation", + "Win32_Networking_WinSock", +] + +[target."cfg(windows)".dev-dependencies.once_cell] +version = "1.20.3" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = [ + "cfg(alloc_c_string)", + "cfg(alloc_ffi)", + "cfg(apple)", + "cfg(asm_experimental_arch)", + "cfg(bsd)", + "cfg(core_c_str)", + "cfg(core_ffi_c)", + "cfg(core_intrinsics)", + "cfg(criterion)", + "cfg(document_experimental_runtime_api)", + "cfg(error_in_core)", + "cfg(fix_y2038)", + "cfg(freebsdlike)", + "cfg(libc)", + "cfg(linux_kernel)", + "cfg(linux_like)", + "cfg(linux_raw)", + "cfg(linux_raw_dep)", + "cfg(lower_upper_exp_for_non_zero)", + "cfg(sanitize_memory)", + "cfg(netbsdlike)", + "cfg(rustc_attrs)", + "cfg(rustc_diagnostics)", + "cfg(solarish)", + "cfg(staged_api)", + "cfg(static_assertions)", + "cfg(thumb_mode)", + "cfg(wasi)", + "cfg(wasi_ext)", + "cfg(wasip2)", + 'cfg(target_arch, values("xtensa"))', + 'cfg(target_os, values("cygwin"))', +] + +[hints] +mostly-unused = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..89a3fad33bfed20bf6704dbc25e5ec86b40c191e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/Cargo.toml.orig @@ -0,0 +1,278 @@ +[package] +name = "rustix" +version = "1.1.3" +authors = [ + "Dan Gohman ", + "Jakub Konka ", +] +description = "Safe Rust bindings to POSIX/Unix/Linux/Winsock-like syscalls" +documentation = "https://docs.rs/rustix" +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +repository = "https://github.com/bytecodealliance/rustix" +edition = "2021" +keywords = ["api", "file", "network", "safe", "syscall"] +categories = ["os::unix-apis", "date-and-time", "filesystem", "network-programming"] +include = ["src", "build.rs", "Cargo.toml", "COPYRIGHT", "LICENSE*", "/*.md", "benches"] +rust-version = "1.63" + +[hints] +# Most users use a fraction of the rustix API surface area, so this reduces compilation times +mostly-unused = true + +[dependencies] +bitflags = { version = "2.4.0", default-features = false } + +# Special dependencies used in rustc-dep-of-std mode. +core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" } +rustc-std-workspace-alloc = { version = "1.0.0", optional = true } # not aliased here but in lib.rs because of name collision with the alloc feature + +# Dependencies for platforms where linux_raw is supported, in addition to libc: +# +# On Linux on selected architectures, the linux_raw backend is supported, in +# addition to the libc backend. The linux_raw backend is used by default. The +# libc backend can be selected via adding `--cfg=rustix_use_libc` to +# `RUSTFLAGS` or enabling the `use-libc` cargo feature. +[target.'cfg(all(not(rustix_use_libc), not(miri), target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64"))))'.dependencies] +linux-raw-sys = { version = "0.11.0", default-features = false, features = ["auxvec", "general", "errno", "ioctl", "no_std", "elf"] } +libc_errno = { package = "errno", version = "0.3.10", default-features = false, optional = true } +libc = { version = "0.2.177", default-features = false, optional = true } + +# Dependencies for platforms where only libc is supported: +# +# On all other Unix-family platforms, and under Miri, we always use the libc +# backend, so enable its dependencies unconditionally. +[target.'cfg(all(not(windows), any(rustix_use_libc, miri, not(all(target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64")))))))'.dependencies] +libc_errno = { package = "errno", version = "0.3.10", default-features = false } +libc = { version = "0.2.177", default-features = false } + +# Additional dependencies for Linux and Android with the libc backend: +# +# Some syscalls do not have libc wrappers, such as in `io_uring`. For these, +# the libc backend uses the linux-raw-sys ABI and `libc::syscall`. +[target.'cfg(all(any(target_os = "linux", target_os = "android"), any(rustix_use_libc, miri, not(all(target_os = "linux", any(target_endian = "little", any(target_arch = "s390x", target_arch = "powerpc")), any(target_arch = "arm", all(target_arch = "aarch64", target_pointer_width = "64"), target_arch = "riscv64", all(rustix_use_experimental_asm, target_arch = "powerpc"), all(rustix_use_experimental_asm, target_arch = "powerpc64"), all(rustix_use_experimental_asm, target_arch = "s390x"), all(rustix_use_experimental_asm, target_arch = "mips"), all(rustix_use_experimental_asm, target_arch = "mips32r6"), all(rustix_use_experimental_asm, target_arch = "mips64"), all(rustix_use_experimental_asm, target_arch = "mips64r6"), target_arch = "x86", all(target_arch = "x86_64", target_pointer_width = "64")))))))'.dependencies] +linux-raw-sys = { version = "0.11.0", default-features = false, features = ["general", "ioctl", "no_std"] } + +# For the libc backend on Windows, use the Winsock API in windows-sys. +[target.'cfg(windows)'.dependencies.windows-sys] +version = ">=0.52, <0.62" +features = [ + "Win32_Foundation", + "Win32_Networking_WinSock", +] + +# For the libc backend on Windows, also use the errno crate, which has Windows +# support. +[target.'cfg(windows)'.dependencies.libc_errno] +version = "0.3.10" +package = "errno" +default-features = false + +[dev-dependencies] +tempfile = "3.5.0" +libc = "0.2.171" +libc_errno = { package = "errno", version = "0.3.10", default-features = false } +serial_test = "2.0.0" +memoffset = "0.9.0" +flate2 = "1.0" +static_assertions = "1.1.0" + +# With Rust 1.70 this can be removed in favor of `std::sync::OnceLock`. +[target.'cfg(windows)'.dev-dependencies] +once_cell = "1.20.3" + +[target.'cfg(all(criterion, not(any(target_os = "emscripten", target_os = "wasi"))))'.dev-dependencies] +criterion = "0.4" + +# Add Criterion configuration, as described here: +# +[[bench]] +name = "mod" +harness = false + +[package.metadata.docs.rs] +features = ["all-apis"] +targets = [ + "x86_64-unknown-linux-gnu", + "i686-unknown-linux-gnu", + "x86_64-apple-darwin", + "x86_64-pc-windows-msvc", + "x86_64-unknown-freebsd", + "x86_64-unknown-openbsd", + "x86_64-unknown-netbsd", + "x86_64-unknown-dragonfly", + "x86_64-unknown-illumos", + "x86_64-unknown-redox", + "x86_64-unknown-haiku", + "wasm32-unknown-emscripten", + "wasm32-wasip1", +] + +[features] + +# By default, use `std`. +default = ["std"] + +# This enables use of std. Disabling this enables `#![no_std]`, and requires +# Rust 1.77 or newer. +std = ["bitflags/std", "alloc", "libc?/std", "libc_errno?/std"] + +# Enable this to request the libc backend. +use-libc = ["libc_errno", "libc"] + +# Enable `rustix::event::*`. +event = [] + +# Enable `rustix::fs::*`. +fs = [] + +# Enable `rustix::io_uring::*` (on platforms that support it). +io_uring = ["event", "fs", "net", "thread", "linux-raw-sys/io_uring"] + +# Enable `rustix::mount::*`. +mount = [] + +# Enable `rustix::net::*`. +net = ["linux-raw-sys/net", "linux-raw-sys/netlink", "linux-raw-sys/if_ether", "linux-raw-sys/xdp"] + +# Enable `rustix::thread::*`. +thread = ["linux-raw-sys/prctl"] + +# Enable `rustix::process::*`. +process = ["linux-raw-sys/prctl"] + +# Enable `rustix::shm::*`. +shm = ["fs"] + +# Enable `rustix::time::*`. +time = [] + +# Enable `rustix::param::*`. +param = [] + +# Enable `rustix::pty::*`. +pty = ["fs"] + +# Enable `rustix::termios::*`. +termios = [] + +# Enable `rustix::mm::*`. +mm = [] + +# Enable `rustix::pipe::*`. +pipe = [] + +# Enable `rustix::rand::*`. +rand = [] + +# Enable `rustix::stdio::*`. +stdio = [] + +# Enable `rustix::system::*`. +system = ["linux-raw-sys/system"] + +# Enable `rustix::runtime::*`. ⚠ This API is undocumented and unstable and +# experimental and not intended for general-purpose use. ⚠ +runtime = ["linux-raw-sys/prctl"] + +# Enable all API features. +# +# This is primarily intended for rustix developers. Users are encouraged to +# enable only those features they need. +all-apis = [ + "event", + "fs", + "io_uring", + "mm", + "mount", + "net", + "param", + "pipe", + "process", + "pty", + "rand", + "runtime", + "shm", + "stdio", + "system", + "termios", + "thread", + "time", +] + +# When using the linux_raw backend, should we use `getauxval` for reading aux +# vectors, instead of `PR_GET_AUXV` or "/proc/self/auxv"? +use-libc-auxv = [] + +# Enable "use-explicitly-provided-auxv" mode, with a public +# `rustix::param::init` function that must be called before anything else in +# rustix. This is unstable and experimental and not intended for +# general-purpose use. +use-explicitly-provided-auxv = [] + +# OS compatibility features + +# Specialize for Linux 4.11 or later +linux_4_11 = [] + +# Specialize for Linux 5.1 or later +linux_5_1 = ["linux_4_11"] + +# Specialize for Linux 5.11 or later +linux_5_11 = ["linux_5_1"] + +# Enable all specializations for the latest Linux versions. +linux_latest = ["linux_5_11"] + +# Enable features which depend on the Rust global allocator, such as functions +# that return owned strings or `Vec`s. +alloc = [] + +# This is used in the port of std to rustix. This is experimental and not meant +# for regular use. +rustc-dep-of-std = [ + "core", + "rustc-std-workspace-alloc", + "linux-raw-sys/rustc-dep-of-std", + "bitflags/rustc-dep-of-std", +] + +# Enable `rustix::io::try_close`. The rustix developers do not intend the +# existence of this feature to imply that anyone should use it. +try_close = [] + +[lints.rust.unexpected_cfgs] +level = "warn" +check-cfg = [ + 'cfg(alloc_c_string)', + 'cfg(alloc_ffi)', + 'cfg(apple)', + 'cfg(asm_experimental_arch)', + 'cfg(bsd)', + 'cfg(core_c_str)', + 'cfg(core_ffi_c)', + 'cfg(core_intrinsics)', + 'cfg(criterion)', + 'cfg(document_experimental_runtime_api)', + 'cfg(error_in_core)', + 'cfg(fix_y2038)', + 'cfg(freebsdlike)', + 'cfg(libc)', + 'cfg(linux_kernel)', + 'cfg(linux_like)', + 'cfg(linux_raw)', + 'cfg(linux_raw_dep)', + 'cfg(lower_upper_exp_for_non_zero)', + 'cfg(sanitize_memory)', + 'cfg(netbsdlike)', + 'cfg(rustc_attrs)', + 'cfg(rustc_diagnostics)', + 'cfg(solarish)', + 'cfg(staged_api)', + 'cfg(static_assertions)', + 'cfg(thumb_mode)', + 'cfg(wasi)', + 'cfg(wasi_ext)', + 'cfg(wasip2)', + 'cfg(target_arch, values("xtensa"))', + 'cfg(target_os, values("cygwin"))', +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-Apache-2.0_WITH_LLVM-exception b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-Apache-2.0_WITH_LLVM-exception new file mode 100644 index 0000000000000000000000000000000000000000..f9d81955f4bcb8f96a025e2ecc46f39ec536d465 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-Apache-2.0_WITH_LLVM-exception @@ -0,0 +1,220 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/ORG_CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/ORG_CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..6f4fb3f537d154768878020bbbb7fc2897956066 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/ORG_CODE_OF_CONDUCT.md @@ -0,0 +1,143 @@ +# Bytecode Alliance Organizational Code of Conduct (OCoC) + +*Note*: this Code of Conduct pertains to organizations' behavior. Please also see the [Individual Code of Conduct](CODE_OF_CONDUCT.md). + +## Preamble + +The Bytecode Alliance (BA) welcomes involvement from organizations, +including commercial organizations. This document is an +*organizational* code of conduct, intended particularly to provide +guidance to commercial organizations. It is distinct from the +[Individual Code of Conduct (ICoC)](CODE_OF_CONDUCT.md), and does not +replace the ICoC. This OCoC applies to any group of people acting in +concert as a BA member or as a participant in BA activities, whether +or not that group is formally incorporated in some jurisdiction. + +The code of conduct described below is not a set of rigid rules, and +we did not write it to encompass every conceivable scenario that might +arise. For example, it is theoretically possible there would be times +when asserting patents is in the best interest of the BA community as +a whole. In such instances, consult with the BA, strive for +consensus, and interpret these rules with an intent that is generous +to the community the BA serves. + +While we may revise these guidelines from time to time based on +real-world experience, overall they are based on a simple principle: + +*Bytecode Alliance members should observe the distinction between + public community functions and private functions — especially + commercial ones — and should ensure that the latter support, or at + least do not harm, the former.* + +## Guidelines + + * **Do not cause confusion about Wasm standards or interoperability.** + + Having an interoperable WebAssembly core is a high priority for + the BA, and members should strive to preserve that core. It is fine + to develop additional non-standard features or APIs, but they + should always be clearly distinguished from the core interoperable + Wasm. + + Treat the WebAssembly name and any BA-associated names with + respect, and follow BA trademark and branding guidelines. If you + distribute a customized version of software originally produced by + the BA, or if you build a product or service using BA-derived + software, use names that clearly distinguish your work from the + original. (You should still provide proper attribution to the + original, of course, wherever such attribution would normally be + given.) + + Further, do not use the WebAssembly name or BA-associated names in + other public namespaces in ways that could cause confusion, e.g., + in company names, names of commercial service offerings, domain + names, publicly-visible social media accounts or online service + accounts, etc. It may sometimes be reasonable, however, to + register such a name in a new namespace and then immediately donate + control of that account to the BA, because that would help the project + maintain its identity. + + For further guidance, see the BA Trademark and Branding Policy + [TODO: create policy, then insert link]. + + * **Do not restrict contributors.** If your company requires + employees or contractors to sign non-compete agreements, those + agreements must not prevent people from participating in the BA or + contributing to related projects. + + This does not mean that all non-compete agreements are incompatible + with this code of conduct. For example, a company may restrict an + employee's ability to solicit the company's customers. However, an + agreement must not block any form of technical or social + participation in BA activities, including but not limited to the + implementation of particular features. + + The accumulation of experience and expertise in individual persons, + who are ultimately free to direct their energy and attention as + they decide, is one of the most important drivers of progress in + open source projects. A company that limits this freedom may hinder + the success of the BA's efforts. + + * **Do not use patents as offensive weapons.** If any BA participant + prevents the adoption or development of BA technologies by + asserting its patents, that undermines the purpose of the + coalition. The collaboration fostered by the BA cannot include + members who act to undermine its work. + + * **Practice responsible disclosure** for security vulnerabilities. + Use designated, non-public reporting channels to disclose technical + vulnerabilities, and give the project a reasonable period to + respond, remediate, and patch. [TODO: optionally include the + security vulnerability reporting URL here.] + + Vulnerability reporters may patch their company's own offerings, as + long as that patching does not significantly delay the reporting of + the vulnerability. Vulnerability information should never be used + for unilateral commercial advantage. Vendors may legitimately + compete on the speed and reliability with which they deploy + security fixes, but withholding vulnerability information damages + everyone in the long run by risking harm to the BA project's + reputation and to the security of all users. + + * **Respect the letter and spirit of open source practice.** While + there is not space to list here all possible aspects of standard + open source practice, some examples will help show what we mean: + + * Abide by all applicable open source license terms. Do not engage + in copyright violation or misattribution of any kind. + + * Do not claim others' ideas or designs as your own. + + * When others engage in publicly visible work (e.g., an upcoming + demo that is coordinated in a public issue tracker), do not + unilaterally announce early releases or early demonstrations of + that work ahead of their schedule in order to secure private + advantage (such as marketplace advantage) for yourself. + + The BA reserves the right to determine what constitutes good open + source practices and to take action as it deems appropriate to + encourage, and if necessary enforce, such practices. + +## Enforcement + +Instances of organizational behavior in violation of the OCoC may +be reported by contacting the Bytecode Alliance CoC team at +[report@bytecodealliance.org](mailto:report@bytecodealliance.org). The +CoC team will review and investigate all complaints, and will respond +in a way that it deems appropriate to the circumstances. The CoC team +is obligated to maintain confidentiality with regard to the reporter of +an incident. Further details of specific enforcement policies may be +posted separately. + +When the BA deems an organization in violation of this OCoC, the BA +will, at its sole discretion, determine what action to take. The BA +will decide what type, degree, and duration of corrective action is +needed, if any, before a violating organization can be considered for +membership (if it was not already a member) or can have its membership +reinstated (if it was a member and the BA canceled its membership due +to the violation). + +In practice, the BA's first approach will be to start a conversation, +with punitive enforcement used only as a last resort. Violations +often turn out to be unintentional and swiftly correctable with all +parties acting in good faith. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fa54bd6728b1185b8cdc8d495ab7c57d6cedfb86 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/README.md @@ -0,0 +1,208 @@ + + +`rustix` provides efficient memory-safe and [I/O-safe] wrappers to POSIX-like, +Unix-like, Linux, and Winsock syscall-like APIs, with configurable backends. It +uses Rust references, slices, and return values instead of raw pointers, and +[I/O safety types] instead of raw file descriptors, providing memory safety, +[I/O safety], and [provenance]. It uses `Result`s for reporting errors, +[`bitflags`] instead of bare integer flags, an [`Arg`] trait with optimizations +to efficiently accept any Rust string type, and several other efficient +conveniences. + +`rustix` is low-level and, and while the `net` API supports [Windows Sockets 2] +(Winsock), the rest of the APIs do not support Windows; for higher-level and +more portable APIs built on this functionality, see the [`cap-std`], [`memfd`], +[`timerfd`], and [`io-streams`] crates, for example. + +`rustix` currently has two backends available: + + * linux_raw, which uses raw Linux system calls and vDSO calls, and is + supported on Linux on x86-64, x86, aarch64, riscv64gc, powerpc64le, + arm (v5 onwards), mipsel, and mips64el, with stable, nightly, and 1.63 Rust. + - By being implemented entirely in Rust, avoiding `libc`, `errno`, and pthread + cancellation, and employing some specialized optimizations, most functions + compile down to very efficient code, which can often be fully inlined into + user code. + - Most functions in `linux_raw` preserve memory, I/O safety, and pointer + provenance all the way down to the syscalls. + + * libc, which uses the [`libc`] crate which provides bindings to native `libc` + libraries on Unix-family platforms, and [`windows-sys`] for Winsock on + Windows, and is portable to many OS's. + +The linux_raw backend is enabled by default on platforms which support it. To +enable the libc backend instead, either enable the "use-libc" cargo feature, or +set the `RUSTFLAGS` environment variable to `--cfg=rustix_use_libc` when +building. + +## Cargo features + +The modules [`rustix::io`], [`rustix::buffer`], [`rustix::fd`], +[`rustix::ffi`], and [`rustix::ioctl`] are enabled by default. The rest of the +API modules are conditional with cargo feature flags. + +| Name | Description | +| ---------- | -------------------------------------------------------------- | +| `event` | [`rustix::event`]—Polling and event operations. | +| `fs` | [`rustix::fs`]—Filesystem operations. | +| `io_uring` | [`rustix::io_uring`]—Linux io_uring. | +| `mm` | [`rustix::mm`]—Memory map operations. | +| `mount` | [`rustix::mount`]—Linux mount API. | +| `net` | [`rustix::net`]—Network-related operations. | +| `param` | [`rustix::param`]—Process parameters. | +| `pipe` | [`rustix::pipe`]—Pipe operations. | +| `process` | [`rustix::process`]—Process-associated operations. | +| `pty` | [`rustix::pty`]—Pseudoterminal operations. | +| `rand` | [`rustix::rand`]—Random-related operations. | +| `shm` | [`rustix::shm`]—POSIX shared memory. | +| `stdio` | [`rustix::stdio`]—Stdio-related operations. | +| `system` | [`rustix::system`]—System-related operations. | +| `termios` | [`rustix::termios`]—Terminal I/O stream operations. | +| `thread` | [`rustix::thread`]—Thread-associated operations. | +| `time` | [`rustix::time`]—Time-related operations. | +| | | +| `use-libc` | Enable the libc backend. | +| | | +| `linux_4_11` | Enable optimizations that assume Linux ≥ 4.11 | +| `linux_5_1` | Enable optimizations that assume Linux ≥ 5.1 | +| `linux_5_11` | Enable optimizations that assume Linux ≥ 5.11 | +| `linux_latest` | Enable optimizations that assume the latest Linux release | +| | | +| `use-libc-auxv` | Use `getauxval` instead of `PR_GET_AUXV` or "/proc/self/auxv". | +| | | +| `std` | On by default; disable to activate `#![no_std]`. | +| `alloc` | On by default; enables features that depend on [`alloc`]. | + +[`rustix::buffer`]: https://docs.rs/rustix/*/rustix/buffer/index.html +[`rustix::event`]: https://docs.rs/rustix/*/rustix/event/index.html +[`rustix::fs`]: https://docs.rs/rustix/*/rustix/fs/index.html +[`rustix::io_uring`]: https://docs.rs/rustix/*/rustix/io_uring/index.html +[`rustix::mm`]: https://docs.rs/rustix/*/rustix/mm/index.html +[`rustix::mount`]: https://docs.rs/rustix/*/rustix/mount/index.html +[`rustix::net`]: https://docs.rs/rustix/*/rustix/net/index.html +[`rustix::param`]: https://docs.rs/rustix/*/rustix/param/index.html +[`rustix::pipe`]: https://docs.rs/rustix/*/rustix/pipe/index.html +[`rustix::process`]: https://docs.rs/rustix/*/rustix/process/index.html +[`rustix::pty`]: https://docs.rs/rustix/*/rustix/pty/index.html +[`rustix::rand`]: https://docs.rs/rustix/*/rustix/rand/index.html +[`rustix::shm`]: https://docs.rs/rustix/*/rustix/shm/index.html +[`rustix::stdio`]: https://docs.rs/rustix/*/rustix/stdio/index.html +[`rustix::system`]: https://docs.rs/rustix/*/rustix/system/index.html +[`rustix::termios`]: https://docs.rs/rustix/*/rustix/termios/index.html +[`rustix::thread`]: https://docs.rs/rustix/*/rustix/thread/index.html +[`rustix::time`]: https://docs.rs/rustix/*/rustix/time/index.html +[`rustix::io`]: https://docs.rs/rustix/*/rustix/io/index.html +[`rustix::fd`]: https://docs.rs/rustix/*/rustix/fd/index.html +[`rustix::ffi`]: https://docs.rs/rustix/*/rustix/ffi/index.html +[`rustix::ioctl`]: https://docs.rs/rustix/*/rustix/ffi/ioctl.html + +## 64-bit Large File Support (LFS) and Year 2038 (y2038) support + +`rustix` automatically uses 64-bit APIs when available, and avoids exposing +32-bit APIs that would have the year-2038 problem or fail to support large +files. For instance, `rustix::fstatvfs` calls `fstatvfs64`, and returns a +struct that's 64-bit even on 32-bit platforms. + +## Similar crates + +`rustix` is similar to [`nix`], [`simple_libc`], [`unix`], [`nc`], [`uapi`], +and [`rusl`]. `rustix` is architected for [I/O safety] with most APIs using +[`OwnedFd`] and [`AsFd`] to manipulate file descriptors rather than `File` or +even `c_int`, and supporting multiple backends so that it can use direct +syscalls while still being usable on all platforms `libc` supports. Like `nix`, +`rustix` has an optimized and flexible filename argument mechanism that allows +users to use a variety of string types, including non-UTF-8 string types. + +[`relibc`] is a similar project which aims to be a full "libc", including +C-compatible interfaces and higher-level C/POSIX standard-library +functionality; `rustix` just aims to provide safe and idiomatic Rust interfaces +to low-level syscalls. `relibc` also doesn't tend to support features not +supported on Redox, such as `*at` functions like `openat`, which are important +features for `rustix`. + +`rustix` has its own code for making direct syscalls, similar to the +[`syscall`], [`sc`], and [`scall`] crates, using the Rust `asm!` macro. +`rustix` can also use Linux's vDSO mechanism to optimize Linux `clock_gettime` +on all architectures, and all Linux system calls on x86. And `rustix`'s +syscalls report errors using an optimized `Errno` type. + +`rustix`'s `*at` functions are similar to the [`openat`] crate, but `rustix` +provides them as free functions rather than associated functions of a `Dir` +type. `rustix`'s `CWD` constant exposes the special `AT_FDCWD` value in a safe +way, so users don't need to open `.` to get a current-directory handle. + +`rustix`'s `openat2` function is similar to the [`openat2`] crate, but uses I/O +safety types rather than `RawFd`. `rustix` does not provide dynamic feature +detection, so users must handle the [`NOSYS`] error themselves. + +`rustix`'s `termios` module is similar to the [`termios`] crate, but uses I/O +safety types rather than `RawFd`, and the flags parameters to functions such as +`tcsetattr` are `enum`s rather than bare integers. And, rustix calls its +`tcgetattr` function `tcgetattr`, rather than `Termios::from_fd`. + +## Minimum Supported Rust Version (MSRV) + +This crate currently works on the version of [Rust on Debian stable], which is +currently [Rust 1.63]. This policy may change in the future, in minor version +releases, so users using a fixed version of Rust should pin to a specific +version of this crate. + +## Minimum Linux Version + +On Linux platforms, rustix requires at least Linux 3.2. This is at most the +oldest Linux version supported by: + - [any current Rust target], or + - [kernel.org] at the time of rustix's [MSRV] release. +The specifics of this policy may change in the future, but we intend it to +always reflect “very old” Linux versions. + +[MSRV]: #minimum-supported-rust-version-msrv +[Rust 1.63]: https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html +[any current Rust target]: https://doc.rust-lang.org/nightly/rustc/platform-support.html +[kernel.org]: https://www.kernel.org/releases.html +[Rust on Debian stable]: https://packages.debian.org/stable/rust/rustc +[Windows Sockets 2]: https://learn.microsoft.com/en-us/windows/win32/winsock/windows-sockets-start-page-2 +[`nix`]: https://crates.io/crates/nix +[`unix`]: https://crates.io/crates/unix +[`nc`]: https://crates.io/crates/nc +[`simple_libc`]: https://crates.io/crates/simple_libc +[`uapi`]: https://crates.io/crates/uapi +[`rusl`]: https://lib.rs/crates/rusl +[`relibc`]: https://gitlab.redox-os.org/redox-os/relibc +[`syscall`]: https://crates.io/crates/syscall +[`sc`]: https://crates.io/crates/sc +[`scall`]: https://crates.io/crates/scall +[`openat`]: https://crates.io/crates/openat +[`openat2`]: https://crates.io/crates/openat2 +[I/O safety types]: https://doc.rust-lang.org/stable/std/os/fd/index.html#structs +[`termios`]: https://crates.io/crates/termios +[`libc`]: https://crates.io/crates/libc +[`windows-sys`]: https://crates.io/crates/windows-sys +[`cap-std`]: https://crates.io/crates/cap-std +[`memfd`]: https://crates.io/crates/memfd +[`timerfd`]: https://crates.io/crates/timerfd +[`io-streams`]: https://crates.io/crates/io-streams +[`bitflags`]: https://crates.io/crates/bitflags +[`Arg`]: https://docs.rs/rustix/*/rustix/path/trait.Arg.html +[I/O-safe]: https://github.com/rust-lang/rfcs/blob/master/text/3128-io-safety.md +[I/O safety]: https://github.com/rust-lang/rfcs/blob/master/text/3128-io-safety.md +[provenance]: https://github.com/rust-lang/rust/issues/95228 +[`OwnedFd`]: https://doc.rust-lang.org/stable/std/os/fd/struct.OwnedFd.html +[`AsFd`]: https://doc.rust-lang.org/stable/std/os/fd/trait.AsFd.html +[`NOSYS`]: https://docs.rs/rustix/*/rustix/io/struct.Errno.html#associatedconstant.NOSYS +[`alloc`]: https://doc.rust-lang.org/alloc/alloc/index.html diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/SECURITY.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..3513b9cb35734dd840996b5cded145e51c94ede0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +Building secure foundations for software development is at the core of what we do in the Bytecode Alliance. Contributions of external security researchers are a vital part of that. + +## Scope + +If you believe you've found a security issue in any website, service, or software owned or operated by the Bytecode Alliance, we encourage you to notify us. + +## How to Submit a Report + +To submit a vulnerability report to the Bytecode Alliance, please contact us at [security@bytecodealliance.org](mailto:security@bytecodealliance.org). Your submission will be reviewed and validated by a member of our security team. + +## Safe Harbor + +The Bytecode Alliance supports safe harbor for security researchers who: + +* Make a good faith effort to avoid privacy violations, destruction of data, and interruption or degradation of our services. +* Only interact with accounts you own or with explicit permission of the account holder. If you do encounter Personally Identifiable Information (PII) contact us immediately, do not proceed with access, and immediately purge any local information. +* Provide us with a reasonable amount of time to resolve vulnerabilities prior to any disclosure to the public or a third-party. + +We will consider activities conducted consistent with this policy to constitute "authorized" conduct and will not pursue civil action or initiate a complaint to law enforcement. We will help to the extent we can if legal action is initiated by a third party against you. + +Please submit a report to us before engaging in conduct that may be inconsistent with or unaddressed by this policy. + +## Preferences + +* Please provide detailed reports with reproducible steps and a clearly defined impact. +* Submit one vulnerability per report. +* Social engineering (e.g. phishing, vishing, smishing) is prohibited. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..1677ece453350cc628aa19315fe0e44488aebb47 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustix-1.1.3/build.rs @@ -0,0 +1,286 @@ +use std::env::var; +use std::io::Write as _; +use std::path::PathBuf; + +/// The directory for inline asm. +const ASM_PATH: &str = "src/backend/linux_raw/arch"; + +fn main() { + // Don't rerun this on changes other than build.rs, as we only depend on + // the rustc version. + println!("cargo:rerun-if-changed=build.rs"); + + // Gather target information. + let arch = var("CARGO_CFG_TARGET_ARCH").unwrap(); + let env = var("CARGO_CFG_TARGET_ENV").unwrap(); + let abi = var("CARGO_CFG_TARGET_ABI"); + let inline_asm_name = format!("{}/{}.rs", ASM_PATH, arch); + let inline_asm_name_present = std::fs::metadata(inline_asm_name).is_ok(); + let os = var("CARGO_CFG_TARGET_OS").unwrap(); + let pointer_width = var("CARGO_CFG_TARGET_POINTER_WIDTH").unwrap(); + let endian = var("CARGO_CFG_TARGET_ENDIAN").unwrap(); + + // Check for special target variants. + let is_x32 = arch == "x86_64" && pointer_width == "32"; + let is_arm64_ilp32 = arch == "aarch64" && pointer_width == "32"; + let is_powerpc64be = arch == "powerpc64" && endian == "big"; + let is_mipseb = (arch == "mips" || arch == "mips32r6") && endian == "big"; + let is_mips64eb = arch.contains("mips64") && endian == "big"; + let is_unsupported_abi = is_x32 || is_arm64_ilp32 || is_powerpc64be || is_mipseb || is_mips64eb; + + // Check for `--features=use-libc`. This allows crate users to enable the + // libc backend. + let feature_use_libc = var("CARGO_FEATURE_USE_LIBC").is_ok(); + + // Check for `RUSTFLAGS=--cfg=rustix_use_libc`. This allows end users to + // enable the libc backend even if rustix is depended on transitively. + let cfg_use_libc = var("CARGO_CFG_RUSTIX_USE_LIBC").is_ok(); + + // Check for `RUSTFLAGS=--cfg=rustix_no_linux_raw`. This allows Linux users to + // enable the libc backend without the linux raw dependency. + let cfg_no_linux_raw = var("CARGO_CFG_RUSTIX_NO_LINUX_RAW").is_ok(); + + // Check for `--features=rustc-dep-of-std`. + let rustc_dep_of_std = var("CARGO_FEATURE_RUSTC_DEP_OF_STD").is_ok(); + + // Check for eg. `RUSTFLAGS=--cfg=rustix_use_experimental_features`. This + // is a rustc flag rather than a cargo feature flag because it's + // experimental and not something we want accidentally enabled via + // `--all-features`. + let rustix_use_experimental_features = + var("CARGO_CFG_RUSTIX_USE_EXPERIMENTAL_FEATURES").is_ok(); + + // Check for eg. `RUSTFLAGS=--cfg=rustix_use_experimental_asm`. This is a + // rustc flag rather than a cargo feature flag because it's experimental + // and not something we want accidentally enabled via `--all-features`. + let rustix_use_experimental_asm = var("CARGO_CFG_RUSTIX_USE_EXPERIMENTAL_ASM").is_ok(); + + // Miri doesn't support inline asm, and has builtin support for recognizing + // libc FFI calls, so if we're running under miri, use the libc backend. + let miri = var("CARGO_CFG_MIRI").is_ok(); + + // If experimental features are enabled, auto-detect and use available + // features. + if rustc_dep_of_std { + use_feature("rustc_attrs"); + use_feature("core_intrinsics"); + } else if rustix_use_experimental_features { + use_feature_or_nothing("rustc_attrs"); + use_feature_or_nothing("core_intrinsics"); + } + + // Features needed only in no-std configurations. + #[cfg(not(feature = "std"))] + { + use_feature_or_nothing("core_c_str"); + use_feature_or_nothing("core_ffi_c"); + use_feature_or_nothing("alloc_c_string"); + use_feature_or_nothing("alloc_ffi"); + use_feature_or_nothing("error_in_core"); + } + + // Feature needed for testing. + if use_static_assertions() { + use_feature("static_assertions"); + } + + // `LowerExp`/`UpperExp` for `NonZeroI32` etc. + if has_lower_upper_exp_for_non_zero() { + use_feature("lower_upper_exp_for_non_zero"); + } + + if can_compile("#[diagnostic::on_unimplemented()] trait Foo {}") { + use_feature("rustc_diagnostics") + } + + // WASI support can utilize wasi_ext if present. + if os == "wasi" { + use_feature_or_nothing("wasi_ext"); + use_feature_or_nothing("wasip2"); + } + + // If the libc backend is requested, or if we're not on a platform for + // which we have linux_raw support, use the libc backend. + // + // For now Android uses the libc backend; in theory it could use the + // linux_raw backend, but to do that we'll need to figure out how to + // install the toolchain for it. + let libc = feature_use_libc + || cfg_use_libc + || os != "linux" + || !inline_asm_name_present + || is_unsupported_abi + || miri + || ((arch == "powerpc" + || arch == "powerpc64" + || arch == "s390x" + || arch.starts_with("mips")) + && !rustix_use_experimental_asm); + if libc { + if (os == "linux" || os == "android") && !cfg_no_linux_raw { + use_feature("linux_raw_dep"); + } + + // Use the libc backend. + use_feature("libc"); + } else { + // Use the linux_raw backend. + use_feature("linux_raw_dep"); + use_feature("linux_raw"); + if rustix_use_experimental_asm { + use_feature("asm_experimental_arch"); + } + } + + // Detect whether the compiler requires us to use thumb mode on ARM. + if arch == "arm" && use_thumb_mode() { + use_feature("thumb_mode"); + } + + // Rust's libc crate groups some OS's together which have similar APIs; + // create similarly-named features to make `cfg` tests more concise. + let freebsdlike = os == "freebsd" || os == "dragonfly"; + if freebsdlike { + use_feature("freebsdlike"); + } + let netbsdlike = os == "openbsd" || os == "netbsd"; + if netbsdlike { + use_feature("netbsdlike"); + } + let apple = os == "macos" || os == "ios" || os == "tvos" || os == "visionos" || os == "watchos"; + if apple { + use_feature("apple"); + } + if os == "linux" || os == "l4re" || os == "android" || os == "emscripten" { + use_feature("linux_like"); + } + if os == "solaris" || os == "illumos" { + use_feature("solarish"); + } + if apple || freebsdlike || netbsdlike { + use_feature("bsd"); + } + + // Add some additional common target combinations. + + // Android and "regular" Linux both use the Linux kernel. + if os == "android" || os == "linux" { + use_feature("linux_kernel"); + } + + // These platforms have a 32-bit `time_t`. + if libc + && (arch == "arm" + || arch == "powerpc" + || arch == "mips" + || arch == "sparc" + || arch == "x86" + || (arch == "aarch64" && os == "linux" && abi == Ok("ilp32".to_string()))) + && (apple + || os == "android" + || (os == "freebsd" && arch == "x86") + || os == "haiku" + || env == "gnu" + || (env == "musl" && arch == "x86") + || (arch == "aarch64" && os == "linux" && abi == Ok("ilp32".to_string()))) + { + use_feature("fix_y2038"); + } + + println!("cargo:rerun-if-env-changed=CARGO_CFG_RUSTIX_USE_EXPERIMENTAL_ASM"); + println!("cargo:rerun-if-env-changed=CARGO_CFG_RUSTIX_USE_LIBC"); + + // Rerun this script if any of our features or configuration flags change, + // or if the toolchain we used for feature detection changes. + println!("cargo:rerun-if-env-changed=CARGO_FEATURE_USE_LIBC"); + println!("cargo:rerun-if-env-changed=CARGO_FEATURE_RUSTC_DEP_OF_STD"); + println!("cargo:rerun-if-env-changed=CARGO_CFG_MIRI"); +} + +fn use_static_assertions() -> bool { + // `offset_from` was made const in Rust 1.65. + can_compile("const unsafe fn foo(p: *const u8) -> isize { p.offset_from(p) }") +} + +fn use_thumb_mode() -> bool { + // In thumb mode, r7 is reserved. + !can_compile("pub unsafe fn f() { core::arch::asm!(\"udf #16\", in(\"r7\") 0); }") +} + +fn has_lower_upper_exp_for_non_zero() -> bool { + // LowerExp/UpperExp for NonZero* were added in Rust 1.84. + // + can_compile("fn a(x: &core::num::NonZeroI32, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { core::fmt::LowerExp::fmt(x, f) }") +} + +fn use_feature_or_nothing(feature: &str) { + if has_feature(feature) { + use_feature(feature); + } +} + +fn use_feature(feature: &str) { + println!("cargo:rustc-cfg={}", feature); +} + +/// Test whether the rustc at `var("RUSTC")` supports the given feature. +fn has_feature(feature: &str) -> bool { + can_compile(format!( + "#![allow(stable_features)]\n#![feature({})]", + feature + )) +} + +/// Test whether the rustc at `var("RUSTC")` can compile the given code. +fn can_compile>(test: T) -> bool { + use std::process::Stdio; + + let rustc = var("RUSTC").unwrap(); + let target = var("TARGET").unwrap(); + + // Use `RUSTC_WRAPPER` if it's set, unless it's set to an empty string, as + // documented [here]. + // [here]: https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-reads + let wrapper = var("RUSTC_WRAPPER") + .ok() + .and_then(|w| if w.is_empty() { None } else { Some(w) }); + + let mut cmd = if let Some(wrapper) = wrapper { + let mut cmd = std::process::Command::new(wrapper); + // The wrapper's first argument is supposed to be the path to rustc. + cmd.arg(rustc); + cmd + } else { + std::process::Command::new(rustc) + }; + + let out_dir = var("OUT_DIR").unwrap(); + let out_file = PathBuf::from(out_dir).join("rustix_test_can_compile"); + cmd.arg("--crate-type=rlib") // Don't require `main`. + .arg("--emit=metadata") // Do as little as possible but still parse. + .arg("--target") + .arg(target) + .arg("-o") + .arg(out_file) + .stdout(Stdio::null()); // We don't care about the output (only whether it builds or not) + + // If Cargo wants to set RUSTFLAGS, use that. + if let Ok(rustflags) = var("CARGO_ENCODED_RUSTFLAGS") { + if !rustflags.is_empty() { + for arg in rustflags.split('\x1f') { + cmd.arg(arg); + } + } + } + + let mut child = cmd + .arg("-") // Read from stdin. + .stdin(Stdio::piped()) // Stdin is a pipe. + .stderr(Stdio::null()) // Errors from feature detection aren't interesting and can be confusing. + .spawn() + .unwrap(); + + writeln!(child.stdin.take().unwrap(), "{}", test.as_ref()).unwrap(); + + child.wait().unwrap().success() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..cb8a51f72fa0ff14d2ae0b8256fd35f2caa1d60a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "d1345fc39ad597e27e6355341d2b2b40c501625b" + }, + "path_in_vcs": "rustls" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..0ff087e37a52649fd122be2bfe61f8ad11c6126b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.lock @@ -0,0 +1,721 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +dependencies = [ + "memchr", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bumpalo" +version = "3.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "bitflags", + "textwrap", + "unicode-width", +] + +[[package]] +name = "criterion" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +dependencies = [ + "atty", + "cast", + "clap", + "criterion-plot", + "csv", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_cbor", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "csv" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +dependencies = [ + "memchr", +] + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" + +[[package]] +name = "js-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.147" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "memchr" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76fc44e2588d5b436dbc3c6cf62aef290f90dab6235744a93dfe1cc18f451e2c" + +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num-traits" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.2", + "libc", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + +[[package]] +name = "plotters" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" + +[[package]] +name = "plotters-svg" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "proc-macro2" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-utils", + "num_cpus", +] + +[[package]] +name = "regex" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rustls" +version = "0.20.9" +dependencies = [ + "base64 0.13.1", + "criterion", + "env_logger", + "log", + "ring", + "rustls-pemfile", + "rustversion", + "sct", + "webpki", + "webpki-roots", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64 0.21.3", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "serde" +version = "1.0.188" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.188" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "syn" +version = "2.0.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "unicode-ident" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "walkdir" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" + +[[package]] +name = "web-sys" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +dependencies = [ + "webpki", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..02b7a12a3fa4c2ab8ce41428f70b06343c271625 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml @@ -0,0 +1,107 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.60" +name = "rustls" +version = "0.20.9" +build = "build.rs" +autobenches = false +description = "Rustls is a modern TLS library written in Rust." +homepage = "https://github.com/rustls/rustls" +readme = "README.md" +categories = [ + "network-programming", + "cryptography", +] +license = "Apache-2.0/ISC/MIT" +repository = "https://github.com/rustls/rustls" +resolver = "2" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[[example]] +name = "bogo_shim" +path = "examples/internal/bogo_shim.rs" +required-features = [ + "dangerous_configuration", + "quic", +] + +[[example]] +name = "trytls_shim" +path = "examples/internal/trytls_shim.rs" + +[[example]] +name = "bench" +path = "examples/internal/bench.rs" + +[[bench]] +name = "benchmarks" +path = "benches/benchmarks.rs" +harness = false + +[dependencies.log] +version = "0.4.4" +optional = true + +[dependencies.ring] +version = "0.16.20" + +[dependencies.sct] +version = "0.7.0" + +[dependencies.webpki] +version = "0.22.0" +features = [ + "alloc", + "std", +] + +[dev-dependencies.base64] +version = "0.13.0" + +[dev-dependencies.criterion] +version = "0.3.0" + +[dev-dependencies.env_logger] +version = "0.9.0" + +[dev-dependencies.log] +version = "0.4.4" + +[dev-dependencies.rustls-pemfile] +version = "1.0.0" + +[dev-dependencies.webpki-roots] +version = "0.22.0" + +[build-dependencies.rustversion] +version = "1.0.6" +optional = true + +[features] +dangerous_configuration = [] +default = [ + "logging", + "tls12", +] +logging = ["log"] +quic = [] +read_buf = ["rustversion"] +secret_extraction = [] +tls12 = [] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..cfcafbbfb9425a075efba87398ff745f39902777 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/Cargo.toml.orig @@ -0,0 +1,62 @@ +[package] +name = "rustls" +version = "0.20.9" +edition = "2018" +rust-version = "1.60" +license = "Apache-2.0/ISC/MIT" +readme = "../README.md" +description = "Rustls is a modern TLS library written in Rust." +homepage = "https://github.com/rustls/rustls" +repository = "https://github.com/rustls/rustls" +categories = ["network-programming", "cryptography"] +autobenches = false +build = "build.rs" +resolver = "2" + +[build-dependencies] +rustversion = { version = "1.0.6", optional = true } + +[dependencies] +log = { version = "0.4.4", optional = true } +ring = "0.16.20" +sct = "0.7.0" +webpki = { version = "0.22.0", features = ["alloc", "std"] } + +[features] +default = ["logging", "tls12"] +logging = ["log"] +dangerous_configuration = [] +secret_extraction = [] +quic = [] +tls12 = [] +read_buf = ["rustversion"] + +[dev-dependencies] +env_logger = "0.9.0" +log = "0.4.4" +webpki-roots = "0.22.0" +criterion = "0.3.0" +rustls-pemfile = "1.0.0" +base64 = "0.13.0" + +[[example]] +name = "bogo_shim" +path = "examples/internal/bogo_shim.rs" +required-features = ["dangerous_configuration", "quic"] + +[[example]] +name = "trytls_shim" +path = "examples/internal/trytls_shim.rs" + +[[example]] +name = "bench" +path = "examples/internal/bench.rs" + +[[bench]] +name = "benchmarks" +path = "benches/benchmarks.rs" +harness = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-ISC b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-ISC new file mode 100644 index 0000000000000000000000000000000000000000..03acf1bd2c44c9c10429d0ee4d05ee1e92f9e3f9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-ISC @@ -0,0 +1,15 @@ +ISC License (ISC) +Copyright (c) 2016, Joseph Birr-Pixton + +Permission to use, copy, modify, and/or distribute this software for +any purpose with or without fee is hereby granted, provided that the +above copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..ef480e6f0936ef5d006d6e3ce9ea8536232b3c88 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Joseph Birr-Pixton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/benches/benchmarks.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/benches/benchmarks.rs new file mode 100644 index 0000000000000000000000000000000000000000..08191be8659ccf1b573358eb6dc0029a674681b1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/benches/benchmarks.rs @@ -0,0 +1,26 @@ +use criterion::criterion_group; +use criterion::criterion_main; +/// Microbenchmarks go here. Larger benchmarks of (e.g..) protocol +/// performance go in examples/internal/bench.rs. +use criterion::Criterion; + +#[path = "../tests/common/mod.rs"] +mod test_utils; +use test_utils::*; + +use rustls::ServerConnection; + +use std::io; +use std::sync::Arc; + +fn bench_ewouldblock(c: &mut Criterion) { + let server_config = make_server_config(KeyType::Rsa); + let mut server = ServerConnection::new(Arc::new(server_config)).unwrap(); + let mut read_ewouldblock = FailsReads::new(io::ErrorKind::WouldBlock); + c.bench_function("read_tls with EWOULDBLOCK", move |b| { + b.iter(|| server.read_tls(&mut read_ewouldblock)) + }); +} + +criterion_group!(benches, bench_ewouldblock); +criterion_main!(benches); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c73252a655faaa38ea42f1788f6470ad2cb4f79 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/build.rs @@ -0,0 +1,13 @@ +/// This build script allows us to enable the `read_buf` language feature only +/// for Rust Nightly. +/// +/// See the comment in lib.rs to understand why we need this. + +#[cfg_attr(feature = "read_buf", rustversion::not(nightly))] +fn main() {} + +#[cfg(feature = "read_buf")] +#[rustversion::nightly] +fn main() { + println!("cargo:rustc-cfg=read_buf"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/anchors.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/anchors.rs new file mode 100644 index 0000000000000000000000000000000000000000..4caf3e7d81679e7487e2a9f55d2da85b96e15605 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/anchors.rs @@ -0,0 +1,154 @@ +use crate::key; +#[cfg(feature = "logging")] +use crate::log::{debug, trace}; +use crate::msgs::handshake::{DistinguishedName, DistinguishedNames}; +use crate::x509; + +/// A trust anchor, commonly known as a "Root Certificate." +#[derive(Debug, Clone)] +pub struct OwnedTrustAnchor { + subject: Vec, + spki: Vec, + name_constraints: Option>, +} + +impl OwnedTrustAnchor { + /// Get a `webpki::TrustAnchor` by borrowing the owned elements. + pub(crate) fn to_trust_anchor(&self) -> webpki::TrustAnchor { + webpki::TrustAnchor { + subject: &self.subject, + spki: &self.spki, + name_constraints: self.name_constraints.as_deref(), + } + } + + /// Constructs an `OwnedTrustAnchor` from its components. + /// + /// All inputs are DER-encoded. + /// + /// `subject` is the [Subject] field of the trust anchor. + /// + /// `spki` is the [SubjectPublicKeyInfo] field of the trust anchor. + /// + /// `name_constraints` is the [Name Constraints] to + /// apply for this trust anchor, if any. + /// + /// [Subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + /// [SubjectPublicKeyInfo]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.7 + /// [Name Constraints]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.10 + pub fn from_subject_spki_name_constraints( + subject: impl Into>, + spki: impl Into>, + name_constraints: Option>>, + ) -> Self { + Self { + subject: subject.into(), + spki: spki.into(), + name_constraints: name_constraints.map(|x| x.into()), + } + } + + /// Return the subject field. + /// + /// This can be decoded using [x509-parser's FromDer trait](https://docs.rs/x509-parser/latest/x509_parser/traits/trait.FromDer.html). + /// + /// ```ignore + /// use x509_parser::traits::FromDer; + /// println!("{}", x509_parser::x509::X509Name::from_der(anchor.subject())?.1); + /// ``` + pub fn subject(&self) -> &[u8] { + &self.subject + } +} + +/// A container for root certificates able to provide a root-of-trust +/// for connection authentication. +#[derive(Debug, Clone)] +pub struct RootCertStore { + /// The list of roots. + pub roots: Vec, +} + +impl RootCertStore { + /// Make a new, empty `RootCertStore`. + pub fn empty() -> Self { + Self { roots: Vec::new() } + } + + /// Return true if there are no certificates. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Say how many certificates are in the container. + pub fn len(&self) -> usize { + self.roots.len() + } + + /// Return the Subject Names for certificates in the container. + #[deprecated(since = "0.20.7", note = "Use OwnedTrustAnchor::subject() instead")] + pub fn subjects(&self) -> DistinguishedNames { + let mut r = DistinguishedNames::new(); + + for ota in &self.roots { + let mut name = Vec::new(); + name.extend_from_slice(&ota.subject); + x509::wrap_in_sequence(&mut name); + r.push(DistinguishedName::new(name)); + } + + r + } + + /// Add a single DER-encoded certificate to the store. + pub fn add(&mut self, der: &key::Certificate) -> Result<(), webpki::Error> { + let ta = webpki::TrustAnchor::try_from_cert_der(&der.0)?; + let ota = OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject, + ta.spki, + ta.name_constraints, + ); + self.roots.push(ota); + Ok(()) + } + + /// Adds all the given TrustAnchors `anchors`. This does not + /// fail. + pub fn add_server_trust_anchors( + &mut self, + trust_anchors: impl Iterator, + ) { + self.roots.extend(trust_anchors) + } + + /// Parse the given DER-encoded certificates and add all that can be parsed + /// in a best-effort fashion. + /// + /// This is because large collections of root certificates often + /// include ancient or syntactically invalid certificates. + /// + /// Returns the number of certificates added, and the number that were ignored. + pub fn add_parsable_certificates(&mut self, der_certs: &[Vec]) -> (usize, usize) { + let mut valid_count = 0; + let mut invalid_count = 0; + + for der_cert in der_certs { + #[cfg_attr(not(feature = "logging"), allow(unused_variables))] + match self.add(&key::Certificate(der_cert.clone())) { + Ok(_) => valid_count += 1, + Err(err) => { + trace!("invalid cert der {:?}", der_cert); + debug!("certificate parsing failed: {:?}", err); + invalid_count += 1 + } + } + } + + debug!( + "add_parsable_certificates processed {} valid and {} invalid certs", + valid_count, invalid_count + ); + + (valid_count, invalid_count) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/bs_debug.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/bs_debug.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad73ee6b3ca8a7d45bbf5913dc4d5d189e70201d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/bs_debug.rs @@ -0,0 +1,77 @@ +use std::fmt; + +/// Alternative implementation of `fmt::Debug` for byte slice. +/// +/// Standard `Debug` implementation for `[u8]` is comma separated +/// list of numbers. Since large amount of byte strings are in fact +/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP), +/// it is convenient to print strings as ASCII when possible. +/// +/// This struct wraps `&[u8]` just to override `fmt::Debug`. +/// +/// `BsDebug` is not a part of public API of bytes crate. +pub(crate) struct BsDebug<'a>(pub(crate) &'a [u8]); + +impl<'a> fmt::Debug for BsDebug<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(fmt, "b\"")?; + for &c in self.0 { + // https://doc.rust-lang.org/reference.html#byte-escapes + if c == b'\n' { + write!(fmt, "\\n")?; + } else if c == b'\r' { + write!(fmt, "\\r")?; + } else if c == b'\t' { + write!(fmt, "\\t")?; + } else if c == b'\\' || c == b'"' { + write!(fmt, "\\{}", c as char)?; + } else if c == b'\0' { + write!(fmt, "\\0")?; + // ASCII printable + } else if (0x20..0x7f).contains(&c) { + write!(fmt, "{}", c as char)?; + } else { + write!(fmt, "\\x{:02x}", c)?; + } + } + write!(fmt, "\"")?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::BsDebug; + + #[test] + fn debug() { + let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect(); + + let expected = "b\"\ + \\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\ + \\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\ + \\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\ + \\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\ + \x20!\\\"#$%&'()*+,-./0123456789:;<=>?\ + @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\ + `abcdefghijklmnopqrstuvwxyz{|}~\\x7f\ + \\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\ + \\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\ + \\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\ + \\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\ + \\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\ + \\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\ + \\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\ + \\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\ + \\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\ + \\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\ + \\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\ + \\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\ + \\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\ + \\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\ + \\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\ + \\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\""; + + assert_eq!(expected, format!("{:?}", BsDebug(&vec))); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/builder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b63f8c1e35b5595edf9517e727a078ab65aff24 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/builder.rs @@ -0,0 +1,268 @@ +use crate::error::Error; +use crate::kx::{SupportedKxGroup, ALL_KX_GROUPS}; +use crate::suites::{SupportedCipherSuite, DEFAULT_CIPHER_SUITES}; +use crate::versions; + +use std::fmt; +use std::marker::PhantomData; + +/// Building a [`ServerConfig`] or [`ClientConfig`] in a linker-friendly and +/// complete way. +/// +/// Linker-friendly: meaning unused cipher suites, protocol +/// versions, key exchange mechanisms, etc. can be discarded +/// by the linker as they'll be unreferenced. +/// +/// Complete: the type system ensures all decisions required to run a +/// server or client have been made by the time the process finishes. +/// +/// Example, to make a [`ServerConfig`]: +/// +/// ```no_run +/// # use rustls::ServerConfig; +/// # let certs = vec![]; +/// # let private_key = rustls::PrivateKey(vec![]); +/// ServerConfig::builder() +/// .with_safe_default_cipher_suites() +/// .with_safe_default_kx_groups() +/// .with_safe_default_protocol_versions() +/// .unwrap() +/// .with_no_client_auth() +/// .with_single_cert(certs, private_key) +/// .expect("bad certificate/key"); +/// ``` +/// +/// This may be shortened to: +/// +/// ```no_run +/// # use rustls::ServerConfig; +/// # let certs = vec![]; +/// # let private_key = rustls::PrivateKey(vec![]); +/// ServerConfig::builder() +/// .with_safe_defaults() +/// .with_no_client_auth() +/// .with_single_cert(certs, private_key) +/// .expect("bad certificate/key"); +/// ``` +/// +/// To make a [`ClientConfig`]: +/// +/// ```no_run +/// # use rustls::ClientConfig; +/// # let root_certs = rustls::RootCertStore::empty(); +/// # let certs = vec![]; +/// # let private_key = rustls::PrivateKey(vec![]); +/// ClientConfig::builder() +/// .with_safe_default_cipher_suites() +/// .with_safe_default_kx_groups() +/// .with_safe_default_protocol_versions() +/// .unwrap() +/// .with_root_certificates(root_certs) +/// .with_single_cert(certs, private_key) +/// .expect("bad certificate/key"); +/// ``` +/// +/// This may be shortened to: +/// +/// ``` +/// # use rustls::ClientConfig; +/// # let root_certs = rustls::RootCertStore::empty(); +/// ClientConfig::builder() +/// .with_safe_defaults() +/// .with_root_certificates(root_certs) +/// .with_no_client_auth(); +/// ``` +/// +/// The types used here fit together like this: +/// +/// 1. Call [`ClientConfig::builder()`] or [`ServerConfig::builder()`] to initialize a builder. +/// 1. You must make a decision on which cipher suites to use, typically +/// by calling [`ConfigBuilder::with_safe_default_cipher_suites()`]. +/// 2. Now you must make a decision +/// on key exchange groups: typically by calling +/// [`ConfigBuilder::with_safe_default_kx_groups()`]. +/// 3. Now you must make +/// a decision on which protocol versions to support, typically by calling +/// [`ConfigBuilder::with_safe_default_protocol_versions()`]. +/// 5. Now see [`ConfigBuilder`] or +/// [`ConfigBuilder`] for further steps. +/// +/// [`ServerConfig`]: crate::ServerConfig +/// [`ClientConfig`]: crate::ClientConfig +/// [`ClientConfig::builder()`]: crate::ClientConfig::builder() +/// [`ServerConfig::builder()`]: crate::ServerConfig::builder() +/// [`ConfigBuilder`]: struct.ConfigBuilder.html#impl-3 +/// [`ConfigBuilder`]: struct.ConfigBuilder.html#impl-6 +#[derive(Clone)] +pub struct ConfigBuilder { + pub(crate) state: State, + pub(crate) side: PhantomData, +} + +impl fmt::Debug for ConfigBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let side_name = std::any::type_name::(); + let side_name = side_name + .split("::") + .last() + .unwrap_or(side_name); + f.debug_struct(&format!("ConfigBuilder<{}, _>", side_name)) + .field("state", &self.state) + .finish() + } +} + +/// Config builder state where the caller must supply cipher suites. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsCipherSuites(pub(crate) ()); + +impl ConfigBuilder { + /// Start side-specific config with defaults for underlying cryptography. + /// + /// If used, this will enable all safe supported cipher suites ([`DEFAULT_CIPHER_SUITES`]), all + /// safe supported key exchange groups ([`ALL_KX_GROUPS`]) and all safe supported protocol + /// versions ([`DEFAULT_VERSIONS`]). + /// + /// These are safe defaults, useful for 99% of applications. + /// + /// [`DEFAULT_VERSIONS`]: versions::DEFAULT_VERSIONS + pub fn with_safe_defaults(self) -> ConfigBuilder { + ConfigBuilder { + state: WantsVerifier { + cipher_suites: DEFAULT_CIPHER_SUITES.to_vec(), + kx_groups: ALL_KX_GROUPS.to_vec(), + versions: versions::EnabledVersions::new(versions::DEFAULT_VERSIONS), + }, + side: self.side, + } + } + + /// Choose a specific set of cipher suites. + pub fn with_cipher_suites( + self, + cipher_suites: &[SupportedCipherSuite], + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsKxGroups { + cipher_suites: cipher_suites.to_vec(), + }, + side: self.side, + } + } + + /// Choose the default set of cipher suites ([`DEFAULT_CIPHER_SUITES`]). + /// + /// Note that this default provides only high-quality suites: there is no need + /// to filter out low-, export- or NULL-strength cipher suites: rustls does not + /// implement these. + pub fn with_safe_default_cipher_suites(self) -> ConfigBuilder { + self.with_cipher_suites(DEFAULT_CIPHER_SUITES) + } +} + +/// Config builder state where the caller must supply key exchange groups. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsKxGroups { + cipher_suites: Vec, +} + +impl ConfigBuilder { + /// Choose a specific set of key exchange groups. + pub fn with_kx_groups( + self, + kx_groups: &[&'static SupportedKxGroup], + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsVersions { + cipher_suites: self.state.cipher_suites, + kx_groups: kx_groups.to_vec(), + }, + side: self.side, + } + } + + /// Choose the default set of key exchange groups ([`ALL_KX_GROUPS`]). + /// + /// This is a safe default: rustls doesn't implement any poor-quality groups. + pub fn with_safe_default_kx_groups(self) -> ConfigBuilder { + self.with_kx_groups(&ALL_KX_GROUPS) + } +} + +/// Config builder state where the caller must supply TLS protocol versions. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsVersions { + cipher_suites: Vec, + kx_groups: Vec<&'static SupportedKxGroup>, +} + +impl ConfigBuilder { + /// Accept the default protocol versions: both TLS1.2 and TLS1.3 are enabled. + pub fn with_safe_default_protocol_versions( + self, + ) -> Result, Error> { + self.with_protocol_versions(versions::DEFAULT_VERSIONS) + } + + /// Use a specific set of protocol versions. + pub fn with_protocol_versions( + self, + versions: &[&'static versions::SupportedProtocolVersion], + ) -> Result, Error> { + let mut any_usable_suite = false; + for suite in &self.state.cipher_suites { + if versions.contains(&suite.version()) { + any_usable_suite = true; + break; + } + } + + if !any_usable_suite { + return Err(Error::General("no usable cipher suites configured".into())); + } + + if self.state.kx_groups.is_empty() { + return Err(Error::General("no kx groups configured".into())); + } + + Ok(ConfigBuilder { + state: WantsVerifier { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + versions: versions::EnabledVersions::new(versions), + }, + side: self.side, + }) + } +} + +/// Config builder state where the caller must supply a verifier. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsVerifier { + pub(crate) cipher_suites: Vec, + pub(crate) kx_groups: Vec<&'static SupportedKxGroup>, + pub(crate) versions: versions::EnabledVersions, +} + +/// Helper trait to abstract [`ConfigBuilder`] over building a [`ClientConfig`] or [`ServerConfig`]. +/// +/// [`ClientConfig`]: crate::ClientConfig +/// [`ServerConfig`]: crate::ServerConfig +pub trait ConfigSide: sealed::Sealed {} + +impl ConfigSide for crate::ClientConfig {} +impl ConfigSide for crate::ServerConfig {} + +mod sealed { + pub trait Sealed {} + impl Sealed for crate::ClientConfig {} + impl Sealed for crate::ServerConfig {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/check.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/check.rs new file mode 100644 index 0000000000000000000000000000000000000000..d318343c0b5ce557c7193db82c516b6d2d56f1d7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/check.rs @@ -0,0 +1,77 @@ +use crate::error::Error; +#[cfg(feature = "logging")] +use crate::log::warn; +use crate::msgs::enums::{ContentType, HandshakeType}; +use crate::msgs::message::MessagePayload; + +/// For a Message $m, and a HandshakePayload enum member $payload_type, +/// return Ok(payload) if $m is both a handshake message and one that +/// has the given $payload_type. If not, return Err(rustls::Error) quoting +/// $handshake_type as the expected handshake type. +macro_rules! require_handshake_msg( + ( $m:expr, $handshake_type:path, $payload_type:path ) => ( + match &$m.payload { + MessagePayload::Handshake { parsed: $crate::msgs::handshake::HandshakeMessagePayload { + payload: $payload_type(hm), + .. + }, .. } => Ok(hm), + payload => Err($crate::check::inappropriate_handshake_message( + payload, + &[$crate::msgs::enums::ContentType::Handshake], + &[$handshake_type])) + } + ) +); + +/// Like require_handshake_msg, but moves the payload out of $m. +#[cfg(feature = "tls12")] +macro_rules! require_handshake_msg_move( + ( $m:expr, $handshake_type:path, $payload_type:path ) => ( + match $m.payload { + MessagePayload::Handshake { parsed: $crate::msgs::handshake::HandshakeMessagePayload { + payload: $payload_type(hm), + .. + }, .. } => Ok(hm), + payload => + Err($crate::check::inappropriate_handshake_message( + &payload, + &[$crate::msgs::enums::ContentType::Handshake], + &[$handshake_type])) + } + ) +); + +pub(crate) fn inappropriate_message( + payload: &MessagePayload, + content_types: &[ContentType], +) -> Error { + warn!( + "Received a {:?} message while expecting {:?}", + payload.content_type(), + content_types + ); + Error::InappropriateMessage { + expect_types: content_types.to_vec(), + got_type: payload.content_type(), + } +} + +pub(crate) fn inappropriate_handshake_message( + payload: &MessagePayload, + content_types: &[ContentType], + handshake_types: &[HandshakeType], +) -> Error { + match payload { + MessagePayload::Handshake { parsed, .. } => { + warn!( + "Received a {:?} handshake message while expecting {:?}", + parsed.typ, handshake_types + ); + Error::InappropriateHandshakeMessage { + expect_types: handshake_types.to_vec(), + got_type: parsed.typ, + } + } + payload => inappropriate_message(payload, content_types), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/cipher.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/cipher.rs new file mode 100644 index 0000000000000000000000000000000000000000..b595ca6888d65bf50f6b7142d1370fc9a5313ec4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/cipher.rs @@ -0,0 +1,101 @@ +use crate::error::Error; +use crate::msgs::codec; +use crate::msgs::message::{BorrowedPlainMessage, OpaqueMessage, PlainMessage}; + +use ring::{aead, hkdf}; + +/// Objects with this trait can decrypt TLS messages. +pub trait MessageDecrypter: Send + Sync { + /// Perform the decryption over the concerned TLS message. + + fn decrypt(&self, m: OpaqueMessage, seq: u64) -> Result; +} + +/// Objects with this trait can encrypt TLS messages. +pub(crate) trait MessageEncrypter: Send + Sync { + fn encrypt(&self, m: BorrowedPlainMessage, seq: u64) -> Result; +} + +impl dyn MessageEncrypter { + pub(crate) fn invalid() -> Box { + Box::new(InvalidMessageEncrypter {}) + } +} + +impl dyn MessageDecrypter { + pub(crate) fn invalid() -> Box { + Box::new(InvalidMessageDecrypter {}) + } +} + +/// A write or read IV. +#[derive(Default)] +pub(crate) struct Iv(pub(crate) [u8; ring::aead::NONCE_LEN]); + +impl Iv { + #[cfg(feature = "tls12")] + fn new(value: [u8; ring::aead::NONCE_LEN]) -> Self { + Self(value) + } + + #[cfg(feature = "tls12")] + pub(crate) fn copy(value: &[u8]) -> Self { + debug_assert_eq!(value.len(), ring::aead::NONCE_LEN); + let mut iv = Self::new(Default::default()); + iv.0.copy_from_slice(value); + iv + } + + #[cfg(test)] + pub(crate) fn value(&self) -> &[u8; 12] { + &self.0 + } +} + +pub(crate) struct IvLen; + +impl hkdf::KeyType for IvLen { + fn len(&self) -> usize { + aead::NONCE_LEN + } +} + +impl From> for Iv { + fn from(okm: hkdf::Okm) -> Self { + let mut r = Self(Default::default()); + okm.fill(&mut r.0[..]).unwrap(); + r + } +} + +pub(crate) fn make_nonce(iv: &Iv, seq: u64) -> ring::aead::Nonce { + let mut nonce = [0u8; ring::aead::NONCE_LEN]; + codec::put_u64(seq, &mut nonce[4..]); + + nonce + .iter_mut() + .zip(iv.0.iter()) + .for_each(|(nonce, iv)| { + *nonce ^= *iv; + }); + + aead::Nonce::assume_unique_for_key(nonce) +} + +/// A `MessageEncrypter` which doesn't work. +struct InvalidMessageEncrypter {} + +impl MessageEncrypter for InvalidMessageEncrypter { + fn encrypt(&self, _m: BorrowedPlainMessage, _seq: u64) -> Result { + Err(Error::General("encrypt not yet available".to_string())) + } +} + +/// A `MessageDecrypter` which doesn't work. +struct InvalidMessageDecrypter {} + +impl MessageDecrypter for InvalidMessageDecrypter { + fn decrypt(&self, _m: OpaqueMessage, _seq: u64) -> Result { + Err(Error::DecryptError) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/builder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..033388b4c052c86312f93b2bb735cae4576032db --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/builder.rs @@ -0,0 +1,192 @@ +use crate::anchors; +use crate::builder::{ConfigBuilder, WantsVerifier}; +use crate::client::handy; +use crate::client::{ClientConfig, ResolvesClientCert}; +use crate::error::Error; +use crate::key; +use crate::kx::SupportedKxGroup; +use crate::suites::SupportedCipherSuite; +use crate::verify::{self, CertificateTransparencyPolicy}; +use crate::versions; +use crate::NoKeyLog; + +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::SystemTime; + +impl ConfigBuilder { + /// Choose how to verify client certificates. + pub fn with_root_certificates( + self, + root_store: anchors::RootCertStore, + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsTransparencyPolicyOrClientCert { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + versions: self.state.versions, + root_store, + }, + side: PhantomData, + } + } + + #[cfg(feature = "dangerous_configuration")] + /// Set a custom certificate verifier. + pub fn with_custom_certificate_verifier( + self, + verifier: Arc, + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsClientCert { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + versions: self.state.versions, + verifier, + }, + side: PhantomData, + } + } +} + +/// A config builder state where the caller needs to supply a certificate transparency policy or +/// client certificate resolver. +/// +/// In this state, the caller can optionally enable certificate transparency, or ignore CT and +/// invoke one of the methods related to client certificates (as in the [`WantsClientCert`] state). +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsTransparencyPolicyOrClientCert { + cipher_suites: Vec, + kx_groups: Vec<&'static SupportedKxGroup>, + versions: versions::EnabledVersions, + root_store: anchors::RootCertStore, +} + +impl ConfigBuilder { + /// Set Certificate Transparency logs to use for server certificate validation. + /// + /// Because Certificate Transparency logs are sharded on a per-year basis and can be trusted or + /// distrusted relatively quickly, rustls stores a validation deadline. Server certificates will + /// be validated against the configured CT logs until the deadline expires. After the deadline, + /// certificates will no longer be validated, and a warning message will be logged. The deadline + /// may vary depending on how often you deploy builds with updated dependencies. + pub fn with_certificate_transparency_logs( + self, + logs: &'static [&'static sct::Log], + validation_deadline: SystemTime, + ) -> ConfigBuilder { + self.with_logs(Some(CertificateTransparencyPolicy::new( + logs, + validation_deadline, + ))) + } + + /// Sets a single certificate chain and matching private key for use + /// in client authentication. + /// + /// `cert_chain` is a vector of DER-encoded certificates. + /// `key_der` is a DER-encoded RSA, ECDSA, or Ed25519 private key. + /// + /// This function fails if `key_der` is invalid. + pub fn with_single_cert( + self, + cert_chain: Vec, + key_der: key::PrivateKey, + ) -> Result { + self.with_logs(None) + .with_single_cert(cert_chain, key_der) + } + + /// Do not support client auth. + pub fn with_no_client_auth(self) -> ClientConfig { + self.with_logs(None) + .with_client_cert_resolver(Arc::new(handy::FailResolveClientCert {})) + } + + /// Sets a custom [`ResolvesClientCert`]. + pub fn with_client_cert_resolver( + self, + client_auth_cert_resolver: Arc, + ) -> ClientConfig { + self.with_logs(None) + .with_client_cert_resolver(client_auth_cert_resolver) + } + + fn with_logs( + self, + ct_policy: Option, + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsClientCert { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + versions: self.state.versions, + verifier: Arc::new(verify::WebPkiVerifier::new( + self.state.root_store, + ct_policy, + )), + }, + side: PhantomData, + } + } +} + +/// A config builder state where the caller needs to supply whether and how to provide a client +/// certificate. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsClientCert { + cipher_suites: Vec, + kx_groups: Vec<&'static SupportedKxGroup>, + versions: versions::EnabledVersions, + verifier: Arc, +} + +impl ConfigBuilder { + /// Sets a single certificate chain and matching private key for use + /// in client authentication. + /// + /// `cert_chain` is a vector of DER-encoded certificates. + /// `key_der` is a DER-encoded RSA, ECDSA, or Ed25519 private key. + /// + /// This function fails if `key_der` is invalid. + pub fn with_single_cert( + self, + cert_chain: Vec, + key_der: key::PrivateKey, + ) -> Result { + let resolver = handy::AlwaysResolvesClientCert::new(cert_chain, &key_der)?; + Ok(self.with_client_cert_resolver(Arc::new(resolver))) + } + + /// Do not support client auth. + pub fn with_no_client_auth(self) -> ClientConfig { + self.with_client_cert_resolver(Arc::new(handy::FailResolveClientCert {})) + } + + /// Sets a custom [`ResolvesClientCert`]. + pub fn with_client_cert_resolver( + self, + client_auth_cert_resolver: Arc, + ) -> ClientConfig { + ClientConfig { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + alpn_protocols: Vec::new(), + session_storage: handy::ClientSessionMemoryCache::new(256), + max_fragment_size: None, + client_auth_cert_resolver, + enable_tickets: true, + versions: self.state.versions, + enable_sni: true, + verifier: self.state.verifier, + key_log: Arc::new(NoKeyLog {}), + #[cfg(feature = "secret_extraction")] + enable_secret_extraction: false, + enable_early_data: false, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/client_conn.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/client_conn.rs new file mode 100644 index 0000000000000000000000000000000000000000..24137e5bf2c7a2bd4994f8d433e9841b3dab32f3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/client_conn.rs @@ -0,0 +1,667 @@ +use crate::builder::{ConfigBuilder, WantsCipherSuites}; +use crate::conn::{CommonState, ConnectionCommon, Protocol, Side}; +use crate::enums::{CipherSuite, ProtocolVersion, SignatureScheme}; +use crate::error::Error; +use crate::kx::SupportedKxGroup; +#[cfg(feature = "logging")] +use crate::log::trace; +#[cfg(feature = "quic")] +use crate::msgs::enums::AlertDescription; +use crate::msgs::handshake::ClientExtension; +use crate::sign; +use crate::suites::SupportedCipherSuite; +use crate::verify; +use crate::versions; +#[cfg(feature = "secret_extraction")] +use crate::ExtractedSecrets; +use crate::KeyLog; + +use super::hs; +#[cfg(feature = "quic")] +use crate::quic; + +use std::convert::TryFrom; +use std::error::Error as StdError; +use std::marker::PhantomData; +use std::net::IpAddr; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use std::{fmt, io, mem}; + +/// A trait for the ability to store client session data. +/// The keys and values are opaque. +/// +/// Both the keys and values should be treated as +/// **highly sensitive data**, containing enough key material +/// to break all security of the corresponding session. +/// +/// `put` is a mutating operation; this isn't expressed +/// in the type system to allow implementations freedom in +/// how to achieve interior mutability. `Mutex` is a common +/// choice. +pub trait StoresClientSessions: Send + Sync { + /// Stores a new `value` for `key`. Returns `true` + /// if the value was stored. + fn put(&self, key: Vec, value: Vec) -> bool; + + /// Returns the latest value for `key`. Returns `None` + /// if there's no such value. + fn get(&self, key: &[u8]) -> Option>; +} + +/// A trait for the ability to choose a certificate chain and +/// private key for the purposes of client authentication. +pub trait ResolvesClientCert: Send + Sync { + /// With the server-supplied acceptable issuers in `acceptable_issuers`, + /// the server's supported signature schemes in `sigschemes`, + /// return a certificate chain and signing key to authenticate. + /// + /// `acceptable_issuers` is undecoded and unverified by the rustls + /// library, but it should be expected to contain a DER encodings + /// of X501 NAMEs. + /// + /// Return None to continue the handshake without any client + /// authentication. The server may reject the handshake later + /// if it requires authentication. + fn resolve( + &self, + acceptable_issuers: &[&[u8]], + sigschemes: &[SignatureScheme], + ) -> Option>; + + /// Return true if any certificates at all are available. + fn has_certs(&self) -> bool; +} + +/// Common configuration for (typically) all connections made by +/// a program. +/// +/// Making one of these can be expensive, and should be +/// once per process rather than once per connection. +/// +/// These must be created via the [`ClientConfig::builder()`] function. +/// +/// # Defaults +/// +/// * [`ClientConfig::max_fragment_size`]: the default is `None`: TLS packets are not fragmented to a specific size. +/// * [`ClientConfig::session_storage`]: the default stores 256 sessions in memory. +/// * [`ClientConfig::alpn_protocols`]: the default is empty -- no ALPN protocol is negotiated. +/// * [`ClientConfig::key_log`]: key material is not logged. +#[derive(Clone)] +pub struct ClientConfig { + /// List of ciphersuites, in preference order. + pub(super) cipher_suites: Vec, + + /// List of supported key exchange algorithms, in preference order -- the + /// first element is the highest priority. + /// + /// The first element in this list is the _default key share algorithm_, + /// and in TLS1.3 a key share for it is sent in the client hello. + pub(super) kx_groups: Vec<&'static SupportedKxGroup>, + + /// Which ALPN protocols we include in our client hello. + /// If empty, no ALPN extension is sent. + pub alpn_protocols: Vec>, + + /// How we store session data or tickets. + pub session_storage: Arc, + + /// The maximum size of TLS message we'll emit. If None, we don't limit TLS + /// message lengths except to the 2**16 limit specified in the standard. + /// + /// rustls enforces an arbitrary minimum of 32 bytes for this field. + /// Out of range values are reported as errors from ClientConnection::new. + /// + /// Setting this value to the TCP MSS may improve latency for stream-y workloads. + pub max_fragment_size: Option, + + /// How to decide what client auth certificate/keys to use. + pub client_auth_cert_resolver: Arc, + + /// Whether to support RFC5077 tickets. You must provide a working + /// `session_storage` member for this to have any meaningful + /// effect. + /// + /// The default is true. + pub enable_tickets: bool, + + /// Supported versions, in no particular order. The default + /// is all supported versions. + pub(super) versions: versions::EnabledVersions, + + /// Whether to send the Server Name Indication (SNI) extension + /// during the client handshake. + /// + /// The default is true. + pub enable_sni: bool, + + /// How to verify the server certificate chain. + pub(super) verifier: Arc, + + /// How to output key material for debugging. The default + /// does nothing. + pub key_log: Arc, + + /// Allows traffic secrets to be extracted after the handshake, + /// e.g. for kTLS setup. + #[cfg(feature = "secret_extraction")] + pub enable_secret_extraction: bool, + + /// Whether to send data on the first flight ("early data") in + /// TLS 1.3 handshakes. + /// + /// The default is false. + pub enable_early_data: bool, +} + +impl fmt::Debug for ClientConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ClientConfig") + .field("alpn_protocols", &self.alpn_protocols) + .field("max_fragment_size", &self.max_fragment_size) + .field("enable_tickets", &self.enable_tickets) + .field("enable_sni", &self.enable_sni) + .field("enable_early_data", &self.enable_early_data) + .finish_non_exhaustive() + } +} + +impl ClientConfig { + /// Create a builder to build up the client configuration. + /// + /// For more information, see the [`ConfigBuilder`] documentation. + pub fn builder() -> ConfigBuilder { + ConfigBuilder { + state: WantsCipherSuites(()), + side: PhantomData, + } + } + + #[doc(hidden)] + /// We support a given TLS version if it's quoted in the configured + /// versions *and* at least one ciphersuite for this version is + /// also configured. + pub fn supports_version(&self, v: ProtocolVersion) -> bool { + self.versions.contains(v) + && self + .cipher_suites + .iter() + .any(|cs| cs.version().version == v) + } + + /// Access configuration options whose use is dangerous and requires + /// extra care. + #[cfg(feature = "dangerous_configuration")] + pub fn dangerous(&mut self) -> danger::DangerousClientConfig { + danger::DangerousClientConfig { cfg: self } + } + + pub(super) fn find_cipher_suite(&self, suite: CipherSuite) -> Option { + self.cipher_suites + .iter() + .copied() + .find(|&scs| scs.suite() == suite) + } +} + +/// Encodes ways a client can know the expected name of the server. +/// +/// This currently covers knowing the DNS name of the server, but +/// will be extended in the future to supporting privacy-preserving names +/// for the server ("ECH"). For this reason this enum is `non_exhaustive`. +/// +/// # Making one +/// +/// If you have a DNS name as a `&str`, this type implements `TryFrom<&str>`, +/// so you can do: +/// +/// ``` +/// # use std::convert::{TryInto, TryFrom}; +/// # use rustls::ServerName; +/// ServerName::try_from("example.com").expect("invalid DNS name"); +/// +/// // or, alternatively... +/// +/// let x = "example.com".try_into().expect("invalid DNS name"); +/// # let _: ServerName = x; +/// ``` +#[non_exhaustive] +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub enum ServerName { + /// The server is identified by a DNS name. The name + /// is sent in the TLS Server Name Indication (SNI) + /// extension. + DnsName(verify::DnsName), + + /// The server is identified by an IP address. SNI is not + /// done. + IpAddress(IpAddr), +} + +impl ServerName { + /// Return the name that should go in the SNI extension. + /// If [`None`] is returned, the SNI extension is not included + /// in the handshake. + pub(crate) fn for_sni(&self) -> Option { + match self { + Self::DnsName(dns_name) => Some(dns_name.0.as_ref()), + Self::IpAddress(_) => None, + } + } + + /// Return a prefix-free, unique encoding for the name. + pub(crate) fn encode(&self) -> Vec { + enum UniqueTypeCode { + DnsName = 0x01, + IpAddr = 0x02, + } + + match self { + Self::DnsName(dns_name) => { + let bytes = dns_name.0.as_ref(); + + let mut r = Vec::with_capacity(2 + bytes.as_ref().len()); + r.push(UniqueTypeCode::DnsName as u8); + r.push(bytes.as_ref().len() as u8); + r.extend_from_slice(bytes.as_ref()); + + r + } + Self::IpAddress(address) => { + let string = address.to_string(); + let bytes = string.as_bytes(); + + let mut r = Vec::with_capacity(2 + bytes.len()); + r.push(UniqueTypeCode::IpAddr as u8); + r.push(bytes.len() as u8); + r.extend_from_slice(bytes); + + r + } + } + } +} + +/// Attempt to make a ServerName from a string by parsing +/// it as a DNS name. +impl TryFrom<&str> for ServerName { + type Error = InvalidDnsNameError; + fn try_from(s: &str) -> Result { + match webpki::DnsNameRef::try_from_ascii_str(s) { + Ok(dns) => Ok(Self::DnsName(verify::DnsName(dns.into()))), + Err(webpki::InvalidDnsNameError) => match s.parse() { + Ok(ip) => Ok(Self::IpAddress(ip)), + Err(_) => Err(InvalidDnsNameError), + }, + } + } +} + +/// The provided input could not be parsed because +/// it is not a syntactically-valid DNS Name. +#[derive(Debug)] +pub struct InvalidDnsNameError; + +impl fmt::Display for InvalidDnsNameError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("invalid dns name") + } +} + +impl StdError for InvalidDnsNameError {} + +/// Container for unsafe APIs +#[cfg(feature = "dangerous_configuration")] +pub(super) mod danger { + use std::sync::Arc; + + use super::verify::ServerCertVerifier; + use super::ClientConfig; + + /// Accessor for dangerous configuration options. + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] + pub struct DangerousClientConfig<'a> { + /// The underlying ClientConfig + pub cfg: &'a mut ClientConfig, + } + + impl<'a> DangerousClientConfig<'a> { + /// Overrides the default `ServerCertVerifier` with something else. + pub fn set_certificate_verifier(&mut self, verifier: Arc) { + self.cfg.verifier = verifier; + } + } +} + +#[derive(Debug, PartialEq)] +enum EarlyDataState { + Disabled, + Ready, + Accepted, + AcceptedFinished, + Rejected, +} + +pub(super) struct EarlyData { + state: EarlyDataState, + left: usize, +} + +impl EarlyData { + fn new() -> Self { + Self { + left: 0, + state: EarlyDataState::Disabled, + } + } + + pub(super) fn is_enabled(&self) -> bool { + matches!(self.state, EarlyDataState::Ready | EarlyDataState::Accepted) + } + + fn is_accepted(&self) -> bool { + matches!( + self.state, + EarlyDataState::Accepted | EarlyDataState::AcceptedFinished + ) + } + + pub(super) fn enable(&mut self, max_data: usize) { + assert_eq!(self.state, EarlyDataState::Disabled); + self.state = EarlyDataState::Ready; + self.left = max_data; + } + + pub(super) fn rejected(&mut self) { + trace!("EarlyData rejected"); + self.state = EarlyDataState::Rejected; + } + + pub(super) fn accepted(&mut self) { + trace!("EarlyData accepted"); + assert_eq!(self.state, EarlyDataState::Ready); + self.state = EarlyDataState::Accepted; + } + + pub(super) fn finished(&mut self) { + trace!("EarlyData finished"); + self.state = match self.state { + EarlyDataState::Accepted => EarlyDataState::AcceptedFinished, + _ => panic!("bad EarlyData state"), + } + } + + fn check_write(&mut self, sz: usize) -> io::Result { + match self.state { + EarlyDataState::Disabled => unreachable!(), + EarlyDataState::Ready | EarlyDataState::Accepted => { + let take = if self.left < sz { + mem::replace(&mut self.left, 0) + } else { + self.left -= sz; + sz + }; + + Ok(take) + } + EarlyDataState::Rejected | EarlyDataState::AcceptedFinished => { + Err(io::Error::from(io::ErrorKind::InvalidInput)) + } + } + } + + fn bytes_left(&self) -> usize { + self.left + } +} + +/// Stub that implements io::Write and dispatches to `write_early_data`. +pub struct WriteEarlyData<'a> { + sess: &'a mut ClientConnection, +} + +impl<'a> WriteEarlyData<'a> { + fn new(sess: &'a mut ClientConnection) -> WriteEarlyData<'a> { + WriteEarlyData { sess } + } + + /// How many bytes you may send. Writes will become short + /// once this reaches zero. + pub fn bytes_left(&self) -> usize { + self.sess + .inner + .data + .early_data + .bytes_left() + } +} + +impl<'a> io::Write for WriteEarlyData<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.sess.write_early_data(buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +/// This represents a single TLS client connection. +pub struct ClientConnection { + inner: ConnectionCommon, +} + +impl fmt::Debug for ClientConnection { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ClientConnection") + .finish() + } +} + +impl ClientConnection { + /// Make a new ClientConnection. `config` controls how + /// we behave in the TLS protocol, `name` is the + /// name of the server we want to talk to. + pub fn new(config: Arc, name: ServerName) -> Result { + Self::new_inner(config, name, Vec::new(), Protocol::Tcp) + } + + fn new_inner( + config: Arc, + name: ServerName, + extra_exts: Vec, + proto: Protocol, + ) -> Result { + let mut common_state = CommonState::new(Side::Client); + common_state.set_max_fragment_size(config.max_fragment_size)?; + common_state.protocol = proto; + #[cfg(feature = "secret_extraction")] + { + common_state.enable_secret_extraction = config.enable_secret_extraction; + } + let mut data = ClientConnectionData::new(); + + let mut cx = hs::ClientContext { + common: &mut common_state, + data: &mut data, + }; + + let state = hs::start_handshake(name, extra_exts, config, &mut cx)?; + let inner = ConnectionCommon::new(state, data, common_state); + + Ok(Self { inner }) + } + + /// Returns an `io::Write` implementer you can write bytes to + /// to send TLS1.3 early data (a.k.a. "0-RTT data") to the server. + /// + /// This returns None in many circumstances when the capability to + /// send early data is not available, including but not limited to: + /// + /// - The server hasn't been talked to previously. + /// - The server does not support resumption. + /// - The server does not support early data. + /// - The resumption data for the server has expired. + /// + /// The server specifies a maximum amount of early data. You can + /// learn this limit through the returned object, and writes through + /// it will process only this many bytes. + /// + /// The server can choose not to accept any sent early data -- + /// in this case the data is lost but the connection continues. You + /// can tell this happened using `is_early_data_accepted`. + pub fn early_data(&mut self) -> Option { + if self.inner.data.early_data.is_enabled() { + Some(WriteEarlyData::new(self)) + } else { + None + } + } + + /// Returns True if the server signalled it will process early data. + /// + /// If you sent early data and this returns false at the end of the + /// handshake then the server will not process the data. This + /// is not an error, but you may wish to resend the data. + pub fn is_early_data_accepted(&self) -> bool { + self.inner.data.early_data.is_accepted() + } + + fn write_early_data(&mut self, data: &[u8]) -> io::Result { + self.inner + .data + .early_data + .check_write(data.len()) + .map(|sz| { + self.inner + .common_state + .send_early_plaintext(&data[..sz]) + }) + } + + /// Extract secrets, so they can be used when configuring kTLS, for example. + #[cfg(feature = "secret_extraction")] + pub fn extract_secrets(self) -> Result { + self.inner.extract_secrets() + } +} + +impl Deref for ClientConnection { + type Target = ConnectionCommon; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for ClientConnection { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +#[doc(hidden)] +impl<'a> TryFrom<&'a mut crate::Connection> for &'a mut ClientConnection { + type Error = (); + + fn try_from(value: &'a mut crate::Connection) -> Result { + use crate::Connection::*; + match value { + Client(conn) => Ok(conn), + Server(_) => Err(()), + } + } +} + +impl From for crate::Connection { + fn from(conn: ClientConnection) -> Self { + Self::Client(conn) + } +} + +/// State associated with a client connection. +pub struct ClientConnectionData { + pub(super) early_data: EarlyData, + pub(super) resumption_ciphersuite: Option, +} + +impl ClientConnectionData { + fn new() -> Self { + Self { + early_data: EarlyData::new(), + resumption_ciphersuite: None, + } + } +} + +impl crate::conn::SideData for ClientConnectionData {} + +#[cfg(feature = "quic")] +impl quic::QuicExt for ClientConnection { + fn quic_transport_parameters(&self) -> Option<&[u8]> { + self.inner + .common_state + .quic + .params + .as_ref() + .map(|v| v.as_ref()) + } + + fn zero_rtt_keys(&self) -> Option { + Some(quic::DirectionalKeys::new( + self.inner + .data + .resumption_ciphersuite + .and_then(|suite| suite.tls13())?, + self.inner + .common_state + .quic + .early_secret + .as_ref()?, + )) + } + + fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), Error> { + self.inner.read_quic_hs(plaintext) + } + + fn write_hs(&mut self, buf: &mut Vec) -> Option { + quic::write_hs(&mut self.inner.common_state, buf) + } + + fn alert(&self) -> Option { + self.inner.common_state.quic.alert + } +} + +/// Methods specific to QUIC client sessions +#[cfg(feature = "quic")] +#[cfg_attr(docsrs, doc(cfg(feature = "quic")))] +pub trait ClientQuicExt { + /// Make a new QUIC ClientConnection. This differs from `ClientConnection::new()` + /// in that it takes an extra argument, `params`, which contains the + /// TLS-encoded transport parameters to send. + fn new_quic( + config: Arc, + quic_version: quic::Version, + name: ServerName, + params: Vec, + ) -> Result { + if !config.supports_version(ProtocolVersion::TLSv1_3) { + return Err(Error::General( + "TLS 1.3 support is required for QUIC".into(), + )); + } + + let ext = match quic_version { + quic::Version::V1Draft => ClientExtension::TransportParametersDraft(params), + quic::Version::V1 => ClientExtension::TransportParameters(params), + }; + + ClientConnection::new_inner(config, name, vec![ext], Protocol::Quic) + } +} + +#[cfg(feature = "quic")] +impl ClientQuicExt for ClientConnection {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/common.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/common.rs new file mode 100644 index 0000000000000000000000000000000000000000..ac9094dea4d0ec41f1f760907ee998768bac1922 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/common.rs @@ -0,0 +1,114 @@ +use super::ResolvesClientCert; +#[cfg(feature = "logging")] +use crate::log::{debug, trace}; +use crate::msgs::enums::ExtensionType; +use crate::msgs::handshake::CertificatePayload; +use crate::msgs::handshake::SCTList; +use crate::msgs::handshake::ServerExtension; +use crate::{sign, DistinguishedNames, SignatureScheme}; + +use std::sync::Arc; + +#[derive(Debug)] +pub(super) struct ServerCertDetails { + pub(super) cert_chain: CertificatePayload, + pub(super) ocsp_response: Vec, + pub(super) scts: Option, +} + +impl ServerCertDetails { + pub(super) fn new( + cert_chain: CertificatePayload, + ocsp_response: Vec, + scts: Option, + ) -> Self { + Self { + cert_chain, + ocsp_response, + scts, + } + } + + pub(super) fn scts(&self) -> impl Iterator { + self.scts + .as_deref() + .unwrap_or(&[]) + .iter() + .map(|payload| payload.0.as_slice()) + } +} + +pub(super) struct ClientHelloDetails { + pub(super) sent_extensions: Vec, +} + +impl ClientHelloDetails { + pub(super) fn new() -> Self { + Self { + sent_extensions: Vec::new(), + } + } + + pub(super) fn server_may_send_sct_list(&self) -> bool { + self.sent_extensions + .contains(&ExtensionType::SCT) + } + + pub(super) fn server_sent_unsolicited_extensions( + &self, + received_exts: &[ServerExtension], + allowed_unsolicited: &[ExtensionType], + ) -> bool { + for ext in received_exts { + let ext_type = ext.get_type(); + if !self.sent_extensions.contains(&ext_type) && !allowed_unsolicited.contains(&ext_type) + { + trace!("Unsolicited extension {:?}", ext_type); + return true; + } + } + + false + } +} + +pub(super) enum ClientAuthDetails { + /// Send an empty `Certificate` and no `CertificateVerify`. + Empty { auth_context_tls13: Option> }, + /// Send a non-empty `Certificate` and a `CertificateVerify`. + Verify { + certkey: Arc, + signer: Box, + auth_context_tls13: Option>, + }, +} + +impl ClientAuthDetails { + pub(super) fn resolve( + resolver: &dyn ResolvesClientCert, + canames: Option<&DistinguishedNames>, + sigschemes: &[SignatureScheme], + auth_context_tls13: Option>, + ) -> Self { + let acceptable_issuers = canames + .map(Vec::as_slice) + .unwrap_or_default() + .iter() + .map(|p| p.0.as_slice()) + .collect::>(); + + if let Some(certkey) = resolver.resolve(&acceptable_issuers, sigschemes) { + if let Some(signer) = certkey.key.choose_scheme(sigschemes) { + debug!("Attempting client auth"); + return Self::Verify { + certkey, + signer, + auth_context_tls13, + }; + } + } + + debug!("Client auth requested but no cert/sigscheme available"); + Self::Empty { auth_context_tls13 } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/handy.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/handy.rs new file mode 100644 index 0000000000000000000000000000000000000000..804887acd856f9c3b8c0f6f7e8d6f2c7843e12c6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/handy.rs @@ -0,0 +1,161 @@ +use crate::client; +use crate::enums::SignatureScheme; +use crate::error::Error; +use crate::key; +use crate::limited_cache; +use crate::sign; + +use std::sync::{Arc, Mutex}; + +/// An implementer of `StoresClientSessions` which does nothing. +pub struct NoClientSessionStorage {} + +impl client::StoresClientSessions for NoClientSessionStorage { + fn put(&self, _key: Vec, _value: Vec) -> bool { + false + } + + fn get(&self, _key: &[u8]) -> Option> { + None + } +} + +/// An implementer of `StoresClientSessions` that stores everything +/// in memory. It enforces a limit on the number of entries +/// to bound memory usage. +pub struct ClientSessionMemoryCache { + cache: Mutex, Vec>>, +} + +impl ClientSessionMemoryCache { + /// Make a new ClientSessionMemoryCache. `size` is the + /// maximum number of stored sessions. + pub fn new(size: usize) -> Arc { + debug_assert!(size > 0); + Arc::new(Self { + cache: Mutex::new(limited_cache::LimitedCache::new(size)), + }) + } +} + +impl client::StoresClientSessions for ClientSessionMemoryCache { + fn put(&self, key: Vec, value: Vec) -> bool { + self.cache + .lock() + .unwrap() + .insert(key, value); + true + } + + fn get(&self, key: &[u8]) -> Option> { + self.cache + .lock() + .unwrap() + .get(key) + .cloned() + } +} + +pub(super) struct FailResolveClientCert {} + +impl client::ResolvesClientCert for FailResolveClientCert { + fn resolve( + &self, + _acceptable_issuers: &[&[u8]], + _sigschemes: &[SignatureScheme], + ) -> Option> { + None + } + + fn has_certs(&self) -> bool { + false + } +} + +pub(super) struct AlwaysResolvesClientCert(Arc); + +impl AlwaysResolvesClientCert { + pub(super) fn new( + chain: Vec, + priv_key: &key::PrivateKey, + ) -> Result { + let key = sign::any_supported_type(priv_key) + .map_err(|_| Error::General("invalid private key".into()))?; + Ok(Self(Arc::new(sign::CertifiedKey::new(chain, key)))) + } +} + +impl client::ResolvesClientCert for AlwaysResolvesClientCert { + fn resolve( + &self, + _acceptable_issuers: &[&[u8]], + _sigschemes: &[SignatureScheme], + ) -> Option> { + Some(Arc::clone(&self.0)) + } + + fn has_certs(&self) -> bool { + true + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::client::StoresClientSessions; + + #[test] + fn test_noclientsessionstorage_drops_put() { + let c = NoClientSessionStorage {}; + assert!(!c.put(vec![0x01], vec![0x02])); + } + + #[test] + fn test_noclientsessionstorage_denies_gets() { + let c = NoClientSessionStorage {}; + c.put(vec![0x01], vec![0x02]); + assert_eq!(c.get(&[]), None); + assert_eq!(c.get(&[0x01]), None); + assert_eq!(c.get(&[0x02]), None); + } + + #[test] + fn test_clientsessionmemorycache_accepts_put() { + let c = ClientSessionMemoryCache::new(4); + assert!(c.put(vec![0x01], vec![0x02])); + } + + #[test] + fn test_clientsessionmemorycache_persists_put() { + let c = ClientSessionMemoryCache::new(4); + assert!(c.put(vec![0x01], vec![0x02])); + assert_eq!(c.get(&[0x01]), Some(vec![0x02])); + assert_eq!(c.get(&[0x01]), Some(vec![0x02])); + } + + #[test] + fn test_clientsessionmemorycache_overwrites_put() { + let c = ClientSessionMemoryCache::new(4); + assert!(c.put(vec![0x01], vec![0x02])); + assert!(c.put(vec![0x01], vec![0x04])); + assert_eq!(c.get(&[0x01]), Some(vec![0x04])); + } + + #[test] + fn test_clientsessionmemorycache_drops_to_maintain_size_invariant() { + let c = ClientSessionMemoryCache::new(2); + assert!(c.put(vec![0x01], vec![0x02])); + assert!(c.put(vec![0x03], vec![0x04])); + assert!(c.put(vec![0x05], vec![0x06])); + assert!(c.put(vec![0x07], vec![0x08])); + assert!(c.put(vec![0x09], vec![0x0a])); + + let count = c.get(&[0x01]).iter().count() + + c.get(&[0x03]).iter().count() + + c.get(&[0x05]).iter().count() + + c.get(&[0x07]).iter().count() + + c.get(&[0x09]).iter().count(); + + assert!(count < 5); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/hs.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/hs.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4724cc25ecbc8c5ccaf6cc3a28c1d4512a3eb42 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/hs.rs @@ -0,0 +1,856 @@ +#[cfg(feature = "logging")] +use crate::bs_debug; +use crate::check::inappropriate_handshake_message; +use crate::conn::{CommonState, ConnectionRandoms, State}; +use crate::enums::{CipherSuite, ProtocolVersion}; +use crate::error::Error; +use crate::hash_hs::HandshakeHashBuffer; +use crate::kx; +#[cfg(feature = "logging")] +use crate::log::{debug, trace}; +use crate::msgs::base::Payload; +#[cfg(feature = "quic")] +use crate::msgs::base::PayloadU16; +use crate::msgs::codec::{Codec, Reader}; +use crate::msgs::enums::{AlertDescription, Compression, ContentType}; +use crate::msgs::enums::{ECPointFormat, PSKKeyExchangeMode}; +use crate::msgs::enums::{ExtensionType, HandshakeType}; +use crate::msgs::handshake::{CertificateStatusRequest, ClientSessionTicket, SCTList}; +use crate::msgs::handshake::{ClientExtension, HasServerExtensions}; +use crate::msgs::handshake::{ClientHelloPayload, HandshakeMessagePayload, HandshakePayload}; +use crate::msgs::handshake::{ConvertProtocolNameList, ProtocolNameList}; +use crate::msgs::handshake::{ECPointFormatList, SupportedPointFormats}; +use crate::msgs::handshake::{HelloRetryRequest, KeyShareEntry}; +use crate::msgs::handshake::{Random, SessionID}; +use crate::msgs::message::{Message, MessagePayload}; +use crate::msgs::persist; +use crate::ticketer::TimeBase; +use crate::tls13::key_schedule::KeyScheduleEarly; +use crate::SupportedCipherSuite; + +#[cfg(feature = "tls12")] +use super::tls12; +use crate::client::client_conn::ClientConnectionData; +use crate::client::common::ClientHelloDetails; +use crate::client::{tls13, ClientConfig, ServerName}; + +use std::sync::Arc; + +pub(super) type NextState = Box>; +pub(super) type NextStateOrError = Result; +pub(super) type ClientContext<'a> = crate::conn::Context<'a, ClientConnectionData>; + +fn find_session( + server_name: &ServerName, + config: &ClientConfig, + #[cfg(feature = "quic")] cx: &mut ClientContext<'_>, +) -> Option> { + let key = persist::ClientSessionKey::session_for_server_name(server_name); + let key_buf = key.get_encoding(); + + let value = config + .session_storage + .get(&key_buf) + .or_else(|| { + debug!("No cached session for {:?}", server_name); + None + })?; + + #[allow(unused_mut)] + let mut reader = Reader::init(&value[2..]); + #[allow(clippy::bind_instead_of_map)] // https://github.com/rust-lang/rust-clippy/issues/8082 + CipherSuite::read_bytes(&value[..2]) + .and_then(|suite| { + persist::ClientSessionValue::read(&mut reader, suite, &config.cipher_suites) + }) + .and_then(|resuming| { + let retrieved = persist::Retrieved::new(resuming, TimeBase::now().ok()?); + match retrieved.has_expired() { + false => Some(retrieved), + true => None, + } + }) + .and_then(|resuming| { + #[cfg(feature = "quic")] + if cx.common.is_quic() { + let params = PayloadU16::read(&mut reader)?; + cx.common.quic.params = Some(params.0); + } + Some(resuming) + }) +} + +pub(super) fn start_handshake( + server_name: ServerName, + extra_exts: Vec, + config: Arc, + cx: &mut ClientContext<'_>, +) -> NextStateOrError { + let mut transcript_buffer = HandshakeHashBuffer::new(); + if config + .client_auth_cert_resolver + .has_certs() + { + transcript_buffer.set_client_auth_enabled(); + } + + let support_tls13 = config.supports_version(ProtocolVersion::TLSv1_3); + + let mut session_id: Option = None; + let mut resuming_session = find_session( + &server_name, + &config, + #[cfg(feature = "quic")] + cx, + ); + + let key_share = if support_tls13 { + Some(tls13::initial_key_share(&config, &server_name)?) + } else { + None + }; + + if let Some(_resuming) = &mut resuming_session { + #[cfg(feature = "tls12")] + if let persist::ClientSessionValue::Tls12(inner) = &mut _resuming.value { + // If we have a ticket, we use the sessionid as a signal that + // we're doing an abbreviated handshake. See section 3.4 in + // RFC5077. + if !inner.ticket().is_empty() { + inner.session_id = SessionID::random()?; + } + session_id = Some(inner.session_id); + } + + debug!("Resuming session"); + } else { + debug!("Not resuming any session"); + } + + // https://tools.ietf.org/html/rfc8446#appendix-D.4 + // https://tools.ietf.org/html/draft-ietf-quic-tls-34#section-8.4 + if session_id.is_none() && !cx.common.is_quic() { + session_id = Some(SessionID::random()?); + } + + let random = Random::new()?; + let hello_details = ClientHelloDetails::new(); + let sent_tls13_fake_ccs = false; + let may_send_sct_list = config.verifier.request_scts(); + Ok(emit_client_hello_for_retry( + config, + cx, + resuming_session, + random, + false, + transcript_buffer, + sent_tls13_fake_ccs, + hello_details, + session_id, + None, + server_name, + key_share, + extra_exts, + may_send_sct_list, + None, + )) +} + +struct ExpectServerHello { + config: Arc, + resuming_session: Option>, + server_name: ServerName, + random: Random, + using_ems: bool, + transcript_buffer: HandshakeHashBuffer, + early_key_schedule: Option, + hello: ClientHelloDetails, + offered_key_share: Option, + session_id: SessionID, + sent_tls13_fake_ccs: bool, + suite: Option, +} + +struct ExpectServerHelloOrHelloRetryRequest { + next: ExpectServerHello, + extra_exts: Vec, +} + +fn emit_client_hello_for_retry( + config: Arc, + cx: &mut ClientContext<'_>, + resuming_session: Option>, + random: Random, + using_ems: bool, + mut transcript_buffer: HandshakeHashBuffer, + mut sent_tls13_fake_ccs: bool, + mut hello: ClientHelloDetails, + session_id: Option, + retryreq: Option<&HelloRetryRequest>, + server_name: ServerName, + key_share: Option, + extra_exts: Vec, + may_send_sct_list: bool, + suite: Option, +) -> NextState { + // Do we have a SessionID or ticket cached for this host? + let (ticket, resume_version) = if let Some(resuming) = &resuming_session { + match &resuming.value { + persist::ClientSessionValue::Tls13(inner) => { + (inner.ticket().to_vec(), ProtocolVersion::TLSv1_3) + } + #[cfg(feature = "tls12")] + persist::ClientSessionValue::Tls12(inner) => { + (inner.ticket().to_vec(), ProtocolVersion::TLSv1_2) + } + } + } else { + (Vec::new(), ProtocolVersion::Unknown(0)) + }; + + let support_tls12 = config.supports_version(ProtocolVersion::TLSv1_2) && !cx.common.is_quic(); + let support_tls13 = config.supports_version(ProtocolVersion::TLSv1_3); + + let mut supported_versions = Vec::new(); + if support_tls13 { + supported_versions.push(ProtocolVersion::TLSv1_3); + } + + if support_tls12 { + supported_versions.push(ProtocolVersion::TLSv1_2); + } + + // should be unreachable thanks to config builder + assert!(!supported_versions.is_empty()); + + let mut exts = vec![ + ClientExtension::SupportedVersions(supported_versions), + ClientExtension::ECPointFormats(ECPointFormatList::supported()), + ClientExtension::NamedGroups( + config + .kx_groups + .iter() + .map(|skxg| skxg.name) + .collect(), + ), + ClientExtension::SignatureAlgorithms( + config + .verifier + .supported_verify_schemes(), + ), + ClientExtension::ExtendedMasterSecretRequest, + ClientExtension::CertificateStatusRequest(CertificateStatusRequest::build_ocsp()), + ]; + + if let (Some(sni_name), true) = (server_name.for_sni(), config.enable_sni) { + exts.push(ClientExtension::make_sni(sni_name)); + } + + if may_send_sct_list { + exts.push(ClientExtension::SignedCertificateTimestampRequest); + } + + if let Some(key_share) = &key_share { + debug_assert!(support_tls13); + let key_share = KeyShareEntry::new(key_share.group(), key_share.pubkey.as_ref()); + exts.push(ClientExtension::KeyShare(vec![key_share])); + } + + if let Some(cookie) = retryreq.and_then(HelloRetryRequest::get_cookie) { + exts.push(ClientExtension::Cookie(cookie.clone())); + } + + if support_tls13 && config.enable_tickets { + // We could support PSK_KE here too. Such connections don't + // have forward secrecy, and are similar to TLS1.2 resumption. + let psk_modes = vec![PSKKeyExchangeMode::PSK_DHE_KE]; + exts.push(ClientExtension::PresharedKeyModes(psk_modes)); + } + + if !config.alpn_protocols.is_empty() { + exts.push(ClientExtension::Protocols(ProtocolNameList::from_slices( + &config + .alpn_protocols + .iter() + .map(|proto| &proto[..]) + .collect::>(), + ))); + } + + // Extra extensions must be placed before the PSK extension + exts.extend(extra_exts.iter().cloned()); + + let fill_in_binder = if support_tls13 + && config.enable_tickets + && resume_version == ProtocolVersion::TLSv1_3 + && !ticket.is_empty() + { + resuming_session + .as_ref() + .and_then(|resuming| match (suite, resuming.tls13()) { + (Some(suite), Some(resuming)) => { + suite + .tls13()? + .can_resume_from(resuming.suite())?; + Some(resuming) + } + (None, Some(resuming)) => Some(resuming), + _ => None, + }) + .map(|resuming| { + tls13::prepare_resumption( + &config, + cx, + ticket, + &resuming, + &mut exts, + retryreq.is_some(), + ); + resuming + }) + } else if config.enable_tickets { + // If we have a ticket, include it. Otherwise, request one. + if ticket.is_empty() { + exts.push(ClientExtension::SessionTicket(ClientSessionTicket::Request)); + } else { + exts.push(ClientExtension::SessionTicket(ClientSessionTicket::Offer( + Payload::new(ticket), + ))); + } + None + } else { + None + }; + + // Note what extensions we sent. + hello.sent_extensions = exts + .iter() + .map(ClientExtension::get_type) + .collect(); + + let session_id = session_id.unwrap_or_else(SessionID::empty); + let mut cipher_suites: Vec<_> = config + .cipher_suites + .iter() + .map(|cs| cs.suite()) + .collect(); + // We don't do renegotiation at all, in fact. + cipher_suites.push(CipherSuite::TLS_EMPTY_RENEGOTIATION_INFO_SCSV); + + let mut chp = HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(ClientHelloPayload { + client_version: ProtocolVersion::TLSv1_2, + random, + session_id, + cipher_suites, + compression_methods: vec![Compression::Null], + extensions: exts, + }), + }; + + let early_key_schedule = if let Some(resuming) = fill_in_binder { + let schedule = tls13::fill_in_psk_binder(&resuming, &transcript_buffer, &mut chp); + Some((resuming.suite(), schedule)) + } else { + None + }; + + let ch = Message { + // "This value MUST be set to 0x0303 for all records generated + // by a TLS 1.3 implementation other than an initial ClientHello + // (i.e., one not generated after a HelloRetryRequest)" + version: if retryreq.is_some() { + ProtocolVersion::TLSv1_2 + } else { + ProtocolVersion::TLSv1_0 + }, + payload: MessagePayload::handshake(chp), + }; + + if retryreq.is_some() { + // send dummy CCS to fool middleboxes prior + // to second client hello + tls13::emit_fake_ccs(&mut sent_tls13_fake_ccs, cx.common); + } + + trace!("Sending ClientHello {:#?}", ch); + + transcript_buffer.add_message(&ch); + cx.common.send_msg(ch, false); + + // Calculate the hash of ClientHello and use it to derive EarlyTrafficSecret + let early_key_schedule = early_key_schedule.map(|(resuming_suite, schedule)| { + if !cx.data.early_data.is_enabled() { + return schedule; + } + + tls13::derive_early_traffic_secret( + &*config.key_log, + cx, + resuming_suite, + &schedule, + &mut sent_tls13_fake_ccs, + &transcript_buffer, + &random.0, + ); + schedule + }); + + let next = ExpectServerHello { + config, + resuming_session, + server_name, + random, + using_ems, + transcript_buffer, + early_key_schedule, + hello, + offered_key_share: key_share, + session_id, + sent_tls13_fake_ccs, + suite, + }; + + if support_tls13 && retryreq.is_none() { + Box::new(ExpectServerHelloOrHelloRetryRequest { next, extra_exts }) + } else { + Box::new(next) + } +} + +pub(super) fn process_alpn_protocol( + common: &mut CommonState, + config: &ClientConfig, + proto: Option<&[u8]>, +) -> Result<(), Error> { + common.alpn_protocol = proto.map(ToOwned::to_owned); + + if let Some(alpn_protocol) = &common.alpn_protocol { + if !config + .alpn_protocols + .contains(alpn_protocol) + { + return Err(common.illegal_param("server sent non-offered ALPN protocol")); + } + } + + #[cfg(feature = "quic")] + { + // RFC 9001 says: "While ALPN only specifies that servers use this alert, QUIC clients MUST + // use error 0x0178 to terminate a connection when ALPN negotiation fails." We judge that + // the user intended to use ALPN (rather than some out-of-band protocol negotiation + // mechanism) iff any ALPN protocols were configured. This defends against badly-behaved + // servers which accept a connection that requires an application-layer protocol they do not + // understand. + if common.is_quic() && common.alpn_protocol.is_none() && !config.alpn_protocols.is_empty() { + common.send_fatal_alert(AlertDescription::NoApplicationProtocol); + return Err(Error::NoApplicationProtocol); + } + } + + debug!( + "ALPN protocol is {:?}", + common + .alpn_protocol + .as_ref() + .map(|v| bs_debug::BsDebug(v)) + ); + Ok(()) +} + +pub(super) fn sct_list_is_invalid(scts: &SCTList) -> bool { + scts.is_empty() || scts.iter().any(|sct| sct.0.is_empty()) +} + +impl State for ExpectServerHello { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> NextStateOrError { + let server_hello = + require_handshake_msg!(m, HandshakeType::ServerHello, HandshakePayload::ServerHello)?; + trace!("We got ServerHello {:#?}", server_hello); + + use crate::ProtocolVersion::{TLSv1_2, TLSv1_3}; + let tls13_supported = self.config.supports_version(TLSv1_3); + + let server_version = if server_hello.legacy_version == TLSv1_2 { + server_hello + .get_supported_versions() + .unwrap_or(server_hello.legacy_version) + } else { + server_hello.legacy_version + }; + + let version = match server_version { + TLSv1_3 if tls13_supported => TLSv1_3, + TLSv1_2 if self.config.supports_version(TLSv1_2) => { + if cx.data.early_data.is_enabled() && cx.common.early_traffic { + // The client must fail with a dedicated error code if the server + // responds with TLS 1.2 when offering 0-RTT. + return Err(Error::PeerMisbehavedError( + "server chose v1.2 when offering 0-rtt".to_string(), + )); + } + + if server_hello + .get_supported_versions() + .is_some() + { + return Err(cx + .common + .illegal_param("server chose v1.2 using v1.3 extension")); + } + + TLSv1_2 + } + _ => { + cx.common + .send_fatal_alert(AlertDescription::ProtocolVersion); + let msg = match server_version { + TLSv1_2 | TLSv1_3 => "server's TLS version is disabled in client", + _ => "server does not support TLS v1.2/v1.3", + }; + return Err(Error::PeerIncompatibleError(msg.to_string())); + } + }; + + if server_hello.compression_method != Compression::Null { + return Err(cx + .common + .illegal_param("server chose non-Null compression")); + } + + if server_hello.has_duplicate_extension() { + cx.common + .send_fatal_alert(AlertDescription::DecodeError); + return Err(Error::PeerMisbehavedError( + "server sent duplicate extensions".to_string(), + )); + } + + let allowed_unsolicited = [ExtensionType::RenegotiationInfo]; + if self + .hello + .server_sent_unsolicited_extensions(&server_hello.extensions, &allowed_unsolicited) + { + cx.common + .send_fatal_alert(AlertDescription::UnsupportedExtension); + return Err(Error::PeerMisbehavedError( + "server sent unsolicited extension".to_string(), + )); + } + + cx.common.negotiated_version = Some(version); + + // Extract ALPN protocol + if !cx.common.is_tls13() { + process_alpn_protocol(cx.common, &self.config, server_hello.get_alpn_protocol())?; + } + + // If ECPointFormats extension is supplied by the server, it must contain + // Uncompressed. But it's allowed to be omitted. + if let Some(point_fmts) = server_hello.get_ecpoints_extension() { + if !point_fmts.contains(&ECPointFormat::Uncompressed) { + cx.common + .send_fatal_alert(AlertDescription::HandshakeFailure); + return Err(Error::PeerMisbehavedError( + "server does not support uncompressed points".to_string(), + )); + } + } + + let suite = self + .config + .find_cipher_suite(server_hello.cipher_suite) + .ok_or_else(|| { + cx.common + .send_fatal_alert(AlertDescription::HandshakeFailure); + Error::PeerMisbehavedError("server chose non-offered ciphersuite".to_string()) + })?; + + if version != suite.version().version { + return Err(cx + .common + .illegal_param("server chose unusable ciphersuite for version")); + } + + match self.suite { + Some(prev_suite) if prev_suite != suite => { + return Err(cx + .common + .illegal_param("server varied selected ciphersuite")); + } + _ => { + debug!("Using ciphersuite {:?}", suite); + self.suite = Some(suite); + cx.common.suite = Some(suite); + } + } + + // Start our handshake hash, and input the server-hello. + let mut transcript = self + .transcript_buffer + .start_hash(suite.hash_algorithm()); + transcript.add_message(&m); + + let randoms = ConnectionRandoms::new(self.random, server_hello.random); + // For TLS1.3, start message encryption using + // handshake_traffic_secret. + match suite { + SupportedCipherSuite::Tls13(suite) => { + let resuming_session = self + .resuming_session + .and_then(|resuming| match resuming.value { + persist::ClientSessionValue::Tls13(inner) => Some(inner), + #[cfg(feature = "tls12")] + persist::ClientSessionValue::Tls12(_) => None, + }); + + tls13::handle_server_hello( + self.config, + cx, + server_hello, + resuming_session, + self.server_name, + randoms, + suite, + transcript, + self.early_key_schedule, + self.hello, + // We always send a key share when TLS 1.3 is enabled. + self.offered_key_share.unwrap(), + self.sent_tls13_fake_ccs, + ) + } + #[cfg(feature = "tls12")] + SupportedCipherSuite::Tls12(suite) => { + let resuming_session = self + .resuming_session + .and_then(|resuming| match resuming.value { + persist::ClientSessionValue::Tls12(inner) => Some(inner), + persist::ClientSessionValue::Tls13(_) => None, + }); + + tls12::CompleteServerHelloHandling { + config: self.config, + resuming_session, + server_name: self.server_name, + randoms, + using_ems: self.using_ems, + transcript, + } + .handle_server_hello(cx, suite, server_hello, tls13_supported) + } + } + } +} + +impl ExpectServerHelloOrHelloRetryRequest { + fn into_expect_server_hello(self) -> NextState { + Box::new(self.next) + } + + fn handle_hello_retry_request( + self, + cx: &mut ClientContext<'_>, + m: Message, + ) -> NextStateOrError { + let hrr = require_handshake_msg!( + m, + HandshakeType::HelloRetryRequest, + HandshakePayload::HelloRetryRequest + )?; + trace!("Got HRR {:?}", hrr); + + cx.common.check_aligned_handshake()?; + + let cookie = hrr.get_cookie(); + let req_group = hrr.get_requested_key_share_group(); + + // We always send a key share when TLS 1.3 is enabled. + let offered_key_share = self.next.offered_key_share.unwrap(); + + // A retry request is illegal if it contains no cookie and asks for + // retry of a group we already sent. + if cookie.is_none() && req_group == Some(offered_key_share.group()) { + return Err(cx + .common + .illegal_param("server requested hrr with our group")); + } + + // Or has an empty cookie. + if let Some(cookie) = cookie { + if cookie.0.is_empty() { + return Err(cx + .common + .illegal_param("server requested hrr with empty cookie")); + } + } + + // Or has something unrecognised + if hrr.has_unknown_extension() { + cx.common + .send_fatal_alert(AlertDescription::UnsupportedExtension); + return Err(Error::PeerIncompatibleError( + "server sent hrr with unhandled extension".to_string(), + )); + } + + // Or has the same extensions more than once + if hrr.has_duplicate_extension() { + return Err(cx + .common + .illegal_param("server send duplicate hrr extensions")); + } + + // Or asks us to change nothing. + if cookie.is_none() && req_group.is_none() { + return Err(cx + .common + .illegal_param("server requested hrr with no changes")); + } + + // Or does not echo the session_id from our ClientHello: + // + // > the HelloRetryRequest has the same format as a ServerHello message, + // > and the legacy_version, legacy_session_id_echo, cipher_suite, and + // > legacy_compression_method fields have the same meaning + // + // + // and + // + // > A client which receives a legacy_session_id_echo field that does not + // > match what it sent in the ClientHello MUST abort the handshake with an + // > "illegal_parameter" alert. + // + if hrr.session_id != self.next.session_id { + cx.common + .send_fatal_alert(AlertDescription::IllegalParameter); + return Err(Error::PeerMisbehavedError( + "server did not echo the session_id from client hello".to_string(), + )); + } + + // Or asks us to talk a protocol we didn't offer, or doesn't support HRR at all. + match hrr.get_supported_versions() { + Some(ProtocolVersion::TLSv1_3) => { + cx.common.negotiated_version = Some(ProtocolVersion::TLSv1_3); + } + _ => { + return Err(cx + .common + .illegal_param("server requested unsupported version in hrr")); + } + } + + // Or asks us to use a ciphersuite we didn't offer. + let maybe_cs = self + .next + .config + .find_cipher_suite(hrr.cipher_suite); + let cs = match maybe_cs { + Some(cs) => cs, + None => { + return Err(cx + .common + .illegal_param("server requested unsupported cs in hrr")); + } + }; + + // HRR selects the ciphersuite. + cx.common.suite = Some(cs); + + // This is the draft19 change where the transcript became a tree + let transcript = self + .next + .transcript_buffer + .start_hash(cs.hash_algorithm()); + let mut transcript_buffer = transcript.into_hrr_buffer(); + transcript_buffer.add_message(&m); + + // Early data is not allowed after HelloRetryrequest + if cx.data.early_data.is_enabled() { + cx.data.early_data.rejected(); + } + + let may_send_sct_list = self + .next + .hello + .server_may_send_sct_list(); + + let key_share = match req_group { + Some(group) if group != offered_key_share.group() => { + let group = kx::KeyExchange::choose(group, &self.next.config.kx_groups) + .ok_or_else(|| { + cx.common + .illegal_param("server requested hrr with bad group") + })?; + kx::KeyExchange::start(group).ok_or(Error::FailedToGetRandomBytes)? + } + _ => offered_key_share, + }; + + Ok(emit_client_hello_for_retry( + self.next.config, + cx, + self.next.resuming_session, + self.next.random, + self.next.using_ems, + transcript_buffer, + self.next.sent_tls13_fake_ccs, + self.next.hello, + Some(self.next.session_id), + Some(hrr), + self.next.server_name, + Some(key_share), + self.extra_exts, + may_send_sct_list, + Some(cs), + )) + } +} + +impl State for ExpectServerHelloOrHelloRetryRequest { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> NextStateOrError { + match m.payload { + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::ServerHello(..), + .. + }, + .. + } => self + .into_expect_server_hello() + .handle(cx, m), + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::HelloRetryRequest(..), + .. + }, + .. + } => self.handle_hello_retry_request(cx, m), + payload => Err(inappropriate_handshake_message( + &payload, + &[ContentType::Handshake], + &[HandshakeType::ServerHello, HandshakeType::HelloRetryRequest], + )), + } + } +} + +pub(super) fn send_cert_error_alert(common: &mut CommonState, err: Error) -> Error { + match err { + Error::InvalidCertificateEncoding => { + common.send_fatal_alert(AlertDescription::DecodeError); + } + Error::PeerMisbehavedError(_) => { + common.send_fatal_alert(AlertDescription::IllegalParameter); + } + _ => { + common.send_fatal_alert(AlertDescription::BadCertificate); + } + }; + + err +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls12.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls12.rs new file mode 100644 index 0000000000000000000000000000000000000000..4caf854a67a8976f2b5aab7527f51628d3c35f69 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls12.rs @@ -0,0 +1,1118 @@ +use crate::check::{inappropriate_handshake_message, inappropriate_message}; +use crate::conn::{CommonState, ConnectionRandoms, Side, State}; +use crate::enums::ProtocolVersion; +use crate::error::Error; +use crate::hash_hs::HandshakeHash; +#[cfg(feature = "logging")] +use crate::log::{debug, trace}; +use crate::msgs::base::{Payload, PayloadU8}; +use crate::msgs::ccs::ChangeCipherSpecPayload; +use crate::msgs::codec::Codec; +use crate::msgs::enums::AlertDescription; +use crate::msgs::enums::{ContentType, HandshakeType}; +use crate::msgs::handshake::{ + CertificatePayload, DecomposedSignatureScheme, DigitallySignedStruct, HandshakeMessagePayload, + HandshakePayload, NewSessionTicketPayload, SCTList, ServerECDHParams, SessionID, +}; +use crate::msgs::message::{Message, MessagePayload}; +use crate::msgs::persist; +use crate::sign::Signer; +#[cfg(feature = "secret_extraction")] +use crate::suites::PartiallyExtractedSecrets; +use crate::suites::SupportedCipherSuite; +use crate::ticketer::TimeBase; +use crate::tls12::{self, ConnectionSecrets, Tls12CipherSuite}; +use crate::{kx, verify}; + +use super::client_conn::ClientConnectionData; +use super::hs::ClientContext; +use crate::client::common::ClientAuthDetails; +use crate::client::common::ServerCertDetails; +use crate::client::{hs, ClientConfig, ServerName}; + +use ring::agreement::PublicKey; +use ring::constant_time; + +use std::sync::Arc; + +pub(super) use server_hello::CompleteServerHelloHandling; + +mod server_hello { + use crate::msgs::enums::ExtensionType; + use crate::msgs::handshake::HasServerExtensions; + use crate::msgs::handshake::ServerHelloPayload; + + use super::*; + + pub(in crate::client) struct CompleteServerHelloHandling { + pub(in crate::client) config: Arc, + pub(in crate::client) resuming_session: Option, + pub(in crate::client) server_name: ServerName, + pub(in crate::client) randoms: ConnectionRandoms, + pub(in crate::client) using_ems: bool, + pub(in crate::client) transcript: HandshakeHash, + } + + impl CompleteServerHelloHandling { + pub(in crate::client) fn handle_server_hello( + mut self, + cx: &mut ClientContext, + suite: &'static Tls12CipherSuite, + server_hello: &ServerHelloPayload, + tls13_supported: bool, + ) -> hs::NextStateOrError { + server_hello + .random + .write_slice(&mut self.randoms.server); + + // Look for TLS1.3 downgrade signal in server random + // both the server random and TLS12_DOWNGRADE_SENTINEL are + // public values and don't require constant time comparison + let has_downgrade_marker = self.randoms.server[24..] == tls12::DOWNGRADE_SENTINEL; + if tls13_supported && has_downgrade_marker { + return Err(cx + .common + .illegal_param("downgrade to TLS1.2 when TLS1.3 is supported")); + } + + // Doing EMS? + self.using_ems = server_hello.ems_support_acked(); + + // Might the server send a ticket? + let must_issue_new_ticket = if server_hello + .find_extension(ExtensionType::SessionTicket) + .is_some() + { + debug!("Server supports tickets"); + true + } else { + false + }; + + // Might the server send a CertificateStatus between Certificate and + // ServerKeyExchange? + let may_send_cert_status = server_hello + .find_extension(ExtensionType::StatusRequest) + .is_some(); + if may_send_cert_status { + debug!("Server may staple OCSP response"); + } + + // Save any sent SCTs for verification against the certificate. + let server_cert_sct_list = if let Some(sct_list) = server_hello.get_sct_list() { + debug!("Server sent {:?} SCTs", sct_list.len()); + + if hs::sct_list_is_invalid(sct_list) { + let error_msg = "server sent invalid SCT list".to_string(); + return Err(Error::PeerMisbehavedError(error_msg)); + } + Some(sct_list.clone()) + } else { + None + }; + + // See if we're successfully resuming. + if let Some(ref resuming) = self.resuming_session { + if resuming.session_id == server_hello.session_id { + debug!("Server agreed to resume"); + + // Is the server telling lies about the ciphersuite? + if resuming.suite() != suite { + let error_msg = + "abbreviated handshake offered, but with varied cs".to_string(); + return Err(Error::PeerMisbehavedError(error_msg)); + } + + // And about EMS support? + if resuming.extended_ms() != self.using_ems { + let error_msg = "server varied ems support over resume".to_string(); + return Err(Error::PeerMisbehavedError(error_msg)); + } + + let secrets = + ConnectionSecrets::new_resume(self.randoms, suite, resuming.secret()); + self.config.key_log.log( + "CLIENT_RANDOM", + &secrets.randoms.client, + &secrets.master_secret, + ); + cx.common + .start_encryption_tls12(&secrets, Side::Client); + + // Since we're resuming, we verified the certificate and + // proof of possession in the prior session. + cx.common.peer_certificates = Some(resuming.server_cert_chain().to_vec()); + let cert_verified = verify::ServerCertVerified::assertion(); + let sig_verified = verify::HandshakeSignatureValid::assertion(); + + return if must_issue_new_ticket { + Ok(Box::new(ExpectNewTicket { + config: self.config, + secrets, + resuming_session: self.resuming_session, + session_id: server_hello.session_id, + server_name: self.server_name, + using_ems: self.using_ems, + transcript: self.transcript, + resuming: true, + cert_verified, + sig_verified, + })) + } else { + Ok(Box::new(ExpectCcs { + config: self.config, + secrets, + resuming_session: self.resuming_session, + session_id: server_hello.session_id, + server_name: self.server_name, + using_ems: self.using_ems, + transcript: self.transcript, + ticket: None, + resuming: true, + cert_verified, + sig_verified, + })) + }; + } + } + + Ok(Box::new(ExpectCertificate { + config: self.config, + resuming_session: self.resuming_session, + session_id: server_hello.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite, + may_send_cert_status, + must_issue_new_ticket, + server_cert_sct_list, + })) + } + } +} + +struct ExpectCertificate { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + pub(super) suite: &'static Tls12CipherSuite, + may_send_cert_status: bool, + must_issue_new_ticket: bool, + server_cert_sct_list: Option, +} + +impl State for ExpectCertificate { + fn handle( + mut self: Box, + _cx: &mut ClientContext<'_>, + m: Message, + ) -> hs::NextStateOrError { + self.transcript.add_message(&m); + let server_cert_chain = require_handshake_msg_move!( + m, + HandshakeType::Certificate, + HandshakePayload::Certificate + )?; + + if self.may_send_cert_status { + Ok(Box::new(ExpectCertificateStatusOrServerKx { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert_sct_list: self.server_cert_sct_list, + server_cert_chain, + must_issue_new_ticket: self.must_issue_new_ticket, + })) + } else { + let server_cert = + ServerCertDetails::new(server_cert_chain, vec![], self.server_cert_sct_list); + + Ok(Box::new(ExpectServerKx { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert, + must_issue_new_ticket: self.must_issue_new_ticket, + })) + } + } +} + +struct ExpectCertificateStatusOrServerKx { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert_sct_list: Option, + server_cert_chain: CertificatePayload, + must_issue_new_ticket: bool, +} + +impl State for ExpectCertificateStatusOrServerKx { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::ServerKeyExchange(..), + .. + }, + .. + } => Box::new(ExpectServerKx { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert: ServerCertDetails::new( + self.server_cert_chain, + vec![], + self.server_cert_sct_list, + ), + must_issue_new_ticket: self.must_issue_new_ticket, + }) + .handle(cx, m), + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::CertificateStatus(..), + .. + }, + .. + } => Box::new(ExpectCertificateStatus { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert_sct_list: self.server_cert_sct_list, + server_cert_chain: self.server_cert_chain, + must_issue_new_ticket: self.must_issue_new_ticket, + }) + .handle(cx, m), + payload => Err(inappropriate_handshake_message( + &payload, + &[ContentType::Handshake], + &[ + HandshakeType::ServerKeyExchange, + HandshakeType::CertificateStatus, + ], + )), + } + } +} + +struct ExpectCertificateStatus { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert_sct_list: Option, + server_cert_chain: CertificatePayload, + must_issue_new_ticket: bool, +} + +impl State for ExpectCertificateStatus { + fn handle( + mut self: Box, + _cx: &mut ClientContext<'_>, + m: Message, + ) -> hs::NextStateOrError { + self.transcript.add_message(&m); + let server_cert_ocsp_response = require_handshake_msg_move!( + m, + HandshakeType::CertificateStatus, + HandshakePayload::CertificateStatus + )? + .into_inner(); + + trace!( + "Server stapled OCSP response is {:?}", + &server_cert_ocsp_response + ); + + let server_cert = ServerCertDetails::new( + self.server_cert_chain, + server_cert_ocsp_response, + self.server_cert_sct_list, + ); + + Ok(Box::new(ExpectServerKx { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert, + must_issue_new_ticket: self.must_issue_new_ticket, + })) + } +} + +struct ExpectServerKx { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert: ServerCertDetails, + must_issue_new_ticket: bool, +} + +impl State for ExpectServerKx { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let opaque_kx = require_handshake_msg!( + m, + HandshakeType::ServerKeyExchange, + HandshakePayload::ServerKeyExchange + )?; + self.transcript.add_message(&m); + + let ecdhe = opaque_kx + .unwrap_given_kxa(&self.suite.kx) + .ok_or_else(|| { + cx.common + .send_fatal_alert(AlertDescription::DecodeError); + Error::CorruptMessagePayload(ContentType::Handshake) + })?; + + // Save the signature and signed parameters for later verification. + let mut kx_params = Vec::new(); + ecdhe.params.encode(&mut kx_params); + let server_kx = ServerKxDetails::new(kx_params, ecdhe.dss); + + #[cfg_attr(not(feature = "logging"), allow(unused_variables))] + { + debug!("ECDHE curve is {:?}", ecdhe.params.curve_params); + } + + Ok(Box::new(ExpectServerDoneOrCertReq { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert: self.server_cert, + server_kx, + must_issue_new_ticket: self.must_issue_new_ticket, + })) + } +} + +fn emit_certificate( + transcript: &mut HandshakeHash, + cert_chain: CertificatePayload, + common: &mut CommonState, +) { + let cert = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::Certificate, + payload: HandshakePayload::Certificate(cert_chain), + }), + }; + + transcript.add_message(&cert); + common.send_msg(cert, false); +} + +fn emit_clientkx(transcript: &mut HandshakeHash, common: &mut CommonState, pubkey: &PublicKey) { + let mut buf = Vec::new(); + let ecpoint = PayloadU8::new(Vec::from(pubkey.as_ref())); + ecpoint.encode(&mut buf); + let pubkey = Payload::new(buf); + + let ckx = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::ClientKeyExchange, + payload: HandshakePayload::ClientKeyExchange(pubkey), + }), + }; + + transcript.add_message(&ckx); + common.send_msg(ckx, false); +} + +fn emit_certverify( + transcript: &mut HandshakeHash, + signer: &dyn Signer, + common: &mut CommonState, +) -> Result<(), Error> { + let message = transcript + .take_handshake_buf() + .ok_or_else(|| Error::General("Expected transcript".to_owned()))?; + + let scheme = signer.scheme(); + let sig = signer.sign(&message)?; + let body = DigitallySignedStruct::new(scheme, sig); + + let m = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::CertificateVerify, + payload: HandshakePayload::CertificateVerify(body), + }), + }; + + transcript.add_message(&m); + common.send_msg(m, false); + Ok(()) +} + +fn emit_ccs(common: &mut CommonState) { + let ccs = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::ChangeCipherSpec(ChangeCipherSpecPayload {}), + }; + + common.send_msg(ccs, false); +} + +fn emit_finished( + secrets: &ConnectionSecrets, + transcript: &mut HandshakeHash, + common: &mut CommonState, +) { + let vh = transcript.get_current_hash(); + let verify_data = secrets.client_verify_data(&vh); + let verify_data_payload = Payload::new(verify_data); + + let f = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::Finished, + payload: HandshakePayload::Finished(verify_data_payload), + }), + }; + + transcript.add_message(&f); + common.send_msg(f, true); +} + +struct ServerKxDetails { + kx_params: Vec, + kx_sig: DigitallySignedStruct, +} + +impl ServerKxDetails { + fn new(params: Vec, sig: DigitallySignedStruct) -> Self { + Self { + kx_params: params, + kx_sig: sig, + } + } +} + +// --- Either a CertificateRequest, or a ServerHelloDone. --- +// Existence of the CertificateRequest tells us the server is asking for +// client auth. Otherwise we go straight to ServerHelloDone. +struct ExpectServerDoneOrCertReq { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert: ServerCertDetails, + server_kx: ServerKxDetails, + must_issue_new_ticket: bool, +} + +impl State for ExpectServerDoneOrCertReq { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + if matches!( + m.payload, + MessagePayload::Handshake { + parsed: HandshakeMessagePayload { + payload: HandshakePayload::CertificateRequest(_), + .. + }, + .. + } + ) { + Box::new(ExpectCertificateRequest { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert: self.server_cert, + server_kx: self.server_kx, + must_issue_new_ticket: self.must_issue_new_ticket, + }) + .handle(cx, m) + } else { + self.transcript.abandon_client_auth(); + + Box::new(ExpectServerDone { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert: self.server_cert, + server_kx: self.server_kx, + client_auth: None, + must_issue_new_ticket: self.must_issue_new_ticket, + }) + .handle(cx, m) + } + } +} + +struct ExpectCertificateRequest { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert: ServerCertDetails, + server_kx: ServerKxDetails, + must_issue_new_ticket: bool, +} + +impl State for ExpectCertificateRequest { + fn handle( + mut self: Box, + _cx: &mut ClientContext<'_>, + m: Message, + ) -> hs::NextStateOrError { + let certreq = require_handshake_msg!( + m, + HandshakeType::CertificateRequest, + HandshakePayload::CertificateRequest + )?; + self.transcript.add_message(&m); + debug!("Got CertificateRequest {:?}", certreq); + + // The RFC jovially describes the design here as 'somewhat complicated' + // and 'somewhat underspecified'. So thanks for that. + // + // We ignore certreq.certtypes as a result, since the information it contains + // is entirely duplicated in certreq.sigschemes. + + const NO_CONTEXT: Option> = None; // TLS 1.2 doesn't use a context. + let client_auth = ClientAuthDetails::resolve( + self.config + .client_auth_cert_resolver + .as_ref(), + Some(&certreq.canames), + &certreq.sigschemes, + NO_CONTEXT, + ); + + Ok(Box::new(ExpectServerDone { + config: self.config, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + randoms: self.randoms, + using_ems: self.using_ems, + transcript: self.transcript, + suite: self.suite, + server_cert: self.server_cert, + server_kx: self.server_kx, + client_auth: Some(client_auth), + must_issue_new_ticket: self.must_issue_new_ticket, + })) + } +} + +struct ExpectServerDone { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + randoms: ConnectionRandoms, + using_ems: bool, + transcript: HandshakeHash, + suite: &'static Tls12CipherSuite, + server_cert: ServerCertDetails, + server_kx: ServerKxDetails, + client_auth: Option, + must_issue_new_ticket: bool, +} + +impl State for ExpectServerDone { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::ServerHelloDone, + .. + }, + .. + } => {} + payload => { + return Err(inappropriate_handshake_message( + &payload, + &[ContentType::Handshake], + &[HandshakeType::ServerHelloDone], + )); + } + } + + let mut st = *self; + st.transcript.add_message(&m); + + cx.common.check_aligned_handshake()?; + + trace!("Server cert is {:?}", st.server_cert.cert_chain); + debug!("Server DNS name is {:?}", st.server_name); + + let suite = st.suite; + + // 1. Verify the cert chain. + // 2. Verify any SCTs provided with the certificate. + // 3. Verify that the top certificate signed their kx. + // 4. If doing client auth, send our Certificate. + // 5. Complete the key exchange: + // a) generate our kx pair + // b) emit a ClientKeyExchange containing it + // c) if doing client auth, emit a CertificateVerify + // d) emit a CCS + // e) derive the shared keys, and start encryption + // 6. emit a Finished, our first encrypted message under the new keys. + + // 1. + let (end_entity, intermediates) = st + .server_cert + .cert_chain + .split_first() + .ok_or(Error::NoCertificatesPresented)?; + let now = std::time::SystemTime::now(); + let cert_verified = st + .config + .verifier + .verify_server_cert( + end_entity, + intermediates, + &st.server_name, + &mut st.server_cert.scts(), + &st.server_cert.ocsp_response, + now, + ) + .map_err(|err| hs::send_cert_error_alert(cx.common, err))?; + + // 3. + // Build up the contents of the signed message. + // It's ClientHello.random || ServerHello.random || ServerKeyExchange.params + let sig_verified = { + let mut message = Vec::new(); + message.extend_from_slice(&st.randoms.client); + message.extend_from_slice(&st.randoms.server); + message.extend_from_slice(&st.server_kx.kx_params); + + // Check the signature is compatible with the ciphersuite. + let sig = &st.server_kx.kx_sig; + if !SupportedCipherSuite::from(suite).usable_for_signature_algorithm(sig.scheme.sign()) + { + let error_message = format!( + "peer signed kx with wrong algorithm (got {:?} expect {:?})", + sig.scheme.sign(), + suite.sign + ); + return Err(Error::PeerMisbehavedError(error_message)); + } + + st.config + .verifier + .verify_tls12_signature(&message, &st.server_cert.cert_chain[0], sig) + .map_err(|err| hs::send_cert_error_alert(cx.common, err))? + }; + cx.common.peer_certificates = Some(st.server_cert.cert_chain); + + // 4. + if let Some(client_auth) = &st.client_auth { + let certs = match client_auth { + ClientAuthDetails::Empty { .. } => Vec::new(), + ClientAuthDetails::Verify { certkey, .. } => certkey.cert.clone(), + }; + emit_certificate(&mut st.transcript, certs, cx.common); + } + + // 5a. + let ecdh_params = + tls12::decode_ecdh_params::(cx.common, &st.server_kx.kx_params)?; + let group = + kx::KeyExchange::choose(ecdh_params.curve_params.named_group, &st.config.kx_groups) + .ok_or_else(|| { + Error::PeerMisbehavedError("peer chose an unsupported group".to_string()) + })?; + let kx = kx::KeyExchange::start(group).ok_or(Error::FailedToGetRandomBytes)?; + + // 5b. + let mut transcript = st.transcript; + emit_clientkx(&mut transcript, cx.common, &kx.pubkey); + // nb. EMS handshake hash only runs up to ClientKeyExchange. + let ems_seed = st + .using_ems + .then(|| transcript.get_current_hash()); + + // 5c. + if let Some(ClientAuthDetails::Verify { signer, .. }) = &st.client_auth { + emit_certverify(&mut transcript, signer.as_ref(), cx.common)?; + } + + // 5d. + emit_ccs(cx.common); + + // 5e. Now commit secrets. + let secrets = ConnectionSecrets::from_key_exchange( + kx, + &ecdh_params.public.0, + ems_seed, + st.randoms, + suite, + )?; + + st.config.key_log.log( + "CLIENT_RANDOM", + &secrets.randoms.client, + &secrets.master_secret, + ); + cx.common + .start_encryption_tls12(&secrets, Side::Client); + cx.common + .record_layer + .start_encrypting(); + + // 6. + emit_finished(&secrets, &mut transcript, cx.common); + + if st.must_issue_new_ticket { + Ok(Box::new(ExpectNewTicket { + config: st.config, + secrets, + resuming_session: st.resuming_session, + session_id: st.session_id, + server_name: st.server_name, + using_ems: st.using_ems, + transcript, + resuming: false, + cert_verified, + sig_verified, + })) + } else { + Ok(Box::new(ExpectCcs { + config: st.config, + secrets, + resuming_session: st.resuming_session, + session_id: st.session_id, + server_name: st.server_name, + using_ems: st.using_ems, + transcript, + ticket: None, + resuming: false, + cert_verified, + sig_verified, + })) + } + } +} + +struct ExpectNewTicket { + config: Arc, + secrets: ConnectionSecrets, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + using_ems: bool, + transcript: HandshakeHash, + resuming: bool, + cert_verified: verify::ServerCertVerified, + sig_verified: verify::HandshakeSignatureValid, +} + +impl State for ExpectNewTicket { + fn handle( + mut self: Box, + _cx: &mut ClientContext<'_>, + m: Message, + ) -> hs::NextStateOrError { + self.transcript.add_message(&m); + + let nst = require_handshake_msg_move!( + m, + HandshakeType::NewSessionTicket, + HandshakePayload::NewSessionTicket + )?; + + Ok(Box::new(ExpectCcs { + config: self.config, + secrets: self.secrets, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + using_ems: self.using_ems, + transcript: self.transcript, + ticket: Some(nst), + resuming: self.resuming, + cert_verified: self.cert_verified, + sig_verified: self.sig_verified, + })) + } +} + +// -- Waiting for their CCS -- +struct ExpectCcs { + config: Arc, + secrets: ConnectionSecrets, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + using_ems: bool, + transcript: HandshakeHash, + ticket: Option, + resuming: bool, + cert_verified: verify::ServerCertVerified, + sig_verified: verify::HandshakeSignatureValid, +} + +impl State for ExpectCcs { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::ChangeCipherSpec(..) => {} + payload => { + return Err(inappropriate_message( + &payload, + &[ContentType::ChangeCipherSpec], + )); + } + } + // CCS should not be received interleaved with fragmented handshake-level + // message. + cx.common.check_aligned_handshake()?; + + // nb. msgs layer validates trivial contents of CCS + cx.common + .record_layer + .start_decrypting(); + + Ok(Box::new(ExpectFinished { + config: self.config, + secrets: self.secrets, + resuming_session: self.resuming_session, + session_id: self.session_id, + server_name: self.server_name, + using_ems: self.using_ems, + transcript: self.transcript, + ticket: self.ticket, + resuming: self.resuming, + cert_verified: self.cert_verified, + sig_verified: self.sig_verified, + })) + } +} + +struct ExpectFinished { + config: Arc, + resuming_session: Option, + session_id: SessionID, + server_name: ServerName, + using_ems: bool, + transcript: HandshakeHash, + ticket: Option, + secrets: ConnectionSecrets, + resuming: bool, + cert_verified: verify::ServerCertVerified, + sig_verified: verify::HandshakeSignatureValid, +} + +impl ExpectFinished { + // -- Waiting for their finished -- + fn save_session(&mut self, cx: &mut ClientContext<'_>) { + // Save a ticket. If we got a new ticket, save that. Otherwise, save the + // original ticket again. + let (mut ticket, lifetime) = match self.ticket.take() { + Some(nst) => (nst.ticket.0, nst.lifetime_hint), + None => (Vec::new(), 0), + }; + + if ticket.is_empty() { + if let Some(resuming_session) = &mut self.resuming_session { + ticket = resuming_session.take_ticket(); + } + } + + if self.session_id.is_empty() && ticket.is_empty() { + debug!("Session not saved: server didn't allocate id or ticket"); + return; + } + + let time_now = match TimeBase::now() { + Ok(time_now) => time_now, + #[allow(unused_variables)] + Err(e) => { + debug!("Session not saved: {}", e); + return; + } + }; + + let key = persist::ClientSessionKey::session_for_server_name(&self.server_name); + let value = persist::Tls12ClientSessionValue::new( + self.secrets.suite(), + self.session_id, + ticket, + self.secrets.get_master_secret(), + cx.common + .peer_certificates + .clone() + .unwrap_or_default(), + time_now, + lifetime, + self.using_ems, + ); + + let worked = self + .config + .session_storage + .put(key.get_encoding(), value.get_encoding()); + + if worked { + debug!("Session saved"); + } else { + debug!("Session not saved"); + } + } +} + +impl State for ExpectFinished { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let mut st = *self; + let finished = + require_handshake_msg!(m, HandshakeType::Finished, HandshakePayload::Finished)?; + + cx.common.check_aligned_handshake()?; + + // Work out what verify_data we expect. + let vh = st.transcript.get_current_hash(); + let expect_verify_data = st.secrets.server_verify_data(&vh); + + // Constant-time verification of this is relatively unimportant: they only + // get one chance. But it can't hurt. + let _fin_verified = + constant_time::verify_slices_are_equal(&expect_verify_data, &finished.0) + .map_err(|_| { + cx.common + .send_fatal_alert(AlertDescription::DecryptError); + Error::DecryptError + }) + .map(|_| verify::FinishedMessageVerified::assertion())?; + + // Hash this message too. + st.transcript.add_message(&m); + + st.save_session(cx); + + if st.resuming { + emit_ccs(cx.common); + cx.common + .record_layer + .start_encrypting(); + emit_finished(&st.secrets, &mut st.transcript, cx.common); + } + + cx.common.start_traffic(); + Ok(Box::new(ExpectTraffic { + secrets: st.secrets, + _cert_verified: st.cert_verified, + _sig_verified: st.sig_verified, + _fin_verified, + })) + } +} + +// -- Traffic transit state -- +struct ExpectTraffic { + secrets: ConnectionSecrets, + _cert_verified: verify::ServerCertVerified, + _sig_verified: verify::HandshakeSignatureValid, + _fin_verified: verify::FinishedMessageVerified, +} + +impl State for ExpectTraffic { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::ApplicationData(payload) => cx + .common + .take_received_plaintext(payload), + payload => { + return Err(inappropriate_message( + &payload, + &[ContentType::ApplicationData], + )); + } + } + Ok(self) + } + + fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: Option<&[u8]>, + ) -> Result<(), Error> { + self.secrets + .export_keying_material(output, label, context); + Ok(()) + } + + #[cfg(feature = "secret_extraction")] + fn extract_secrets(&self) -> Result { + self.secrets + .extract_secrets(Side::Client) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls13.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls13.rs new file mode 100644 index 0000000000000000000000000000000000000000..4c118bbcc4f685dc8f23ef4e3d1b15b6f6766c42 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/client/tls13.rs @@ -0,0 +1,1191 @@ +use crate::check::inappropriate_handshake_message; +use crate::conn::{CommonState, ConnectionRandoms, State}; +use crate::enums::{ProtocolVersion, SignatureScheme}; +use crate::error::Error; +use crate::hash_hs::{HandshakeHash, HandshakeHashBuffer}; +use crate::kx; +#[cfg(feature = "logging")] +use crate::log::{debug, trace, warn}; +use crate::msgs::base::{Payload, PayloadU8}; +use crate::msgs::ccs::ChangeCipherSpecPayload; +use crate::msgs::codec::Codec; +use crate::msgs::enums::KeyUpdateRequest; +use crate::msgs::enums::{AlertDescription, NamedGroup}; +use crate::msgs::enums::{ContentType, ExtensionType, HandshakeType}; +use crate::msgs::handshake::ClientExtension; +use crate::msgs::handshake::DigitallySignedStruct; +use crate::msgs::handshake::EncryptedExtensions; +use crate::msgs::handshake::NewSessionTicketPayloadTLS13; +use crate::msgs::handshake::{CertificateEntry, CertificatePayloadTLS13}; +use crate::msgs::handshake::{HandshakeMessagePayload, HandshakePayload}; +use crate::msgs::handshake::{HasServerExtensions, ServerHelloPayload}; +use crate::msgs::handshake::{PresharedKeyIdentity, PresharedKeyOffer}; +use crate::msgs::message::{Message, MessagePayload}; +use crate::msgs::persist; +use crate::tls13::key_schedule::{ + KeyScheduleEarly, KeyScheduleHandshake, KeySchedulePreHandshake, KeyScheduleTraffic, +}; +use crate::tls13::Tls13CipherSuite; +use crate::verify; +#[cfg(feature = "quic")] +use crate::{conn::Protocol, msgs::base::PayloadU16, quic}; +#[cfg(feature = "secret_extraction")] +use crate::{conn::Side, suites::PartiallyExtractedSecrets}; +use crate::{sign, KeyLog}; + +use super::client_conn::ClientConnectionData; +use super::hs::ClientContext; +use crate::client::common::ServerCertDetails; +use crate::client::common::{ClientAuthDetails, ClientHelloDetails}; +use crate::client::{hs, ClientConfig, ServerName, StoresClientSessions}; + +use crate::ticketer::TimeBase; +use ring::constant_time; + +use crate::sign::{CertifiedKey, Signer}; +use std::sync::Arc; + +// Extensions we expect in plaintext in the ServerHello. +static ALLOWED_PLAINTEXT_EXTS: &[ExtensionType] = &[ + ExtensionType::KeyShare, + ExtensionType::PreSharedKey, + ExtensionType::SupportedVersions, +]; + +// Only the intersection of things we offer, and those disallowed +// in TLS1.3 +static DISALLOWED_TLS13_EXTS: &[ExtensionType] = &[ + ExtensionType::ECPointFormats, + ExtensionType::SessionTicket, + ExtensionType::RenegotiationInfo, + ExtensionType::ExtendedMasterSecret, +]; + +pub(super) fn handle_server_hello( + config: Arc, + cx: &mut ClientContext, + server_hello: &ServerHelloPayload, + mut resuming_session: Option, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + early_key_schedule: Option, + hello: ClientHelloDetails, + our_key_share: kx::KeyExchange, + mut sent_tls13_fake_ccs: bool, +) -> hs::NextStateOrError { + validate_server_hello(cx.common, server_hello)?; + + let their_key_share = server_hello + .get_key_share() + .ok_or_else(|| { + cx.common + .send_fatal_alert(AlertDescription::MissingExtension); + Error::PeerMisbehavedError("missing key share".to_string()) + })?; + + if our_key_share.group() != their_key_share.group { + return Err(cx + .common + .illegal_param("wrong group for key share")); + } + + let key_schedule_pre_handshake = if let (Some(selected_psk), Some(early_key_schedule)) = + (server_hello.get_psk_index(), early_key_schedule) + { + if let Some(ref resuming) = resuming_session { + let resuming_suite = match suite.can_resume_from(resuming.suite()) { + Some(resuming) => resuming, + None => { + return Err(cx + .common + .illegal_param("server resuming incompatible suite")); + } + }; + + // If the server varies the suite here, we will have encrypted early data with + // the wrong suite. + if cx.data.early_data.is_enabled() && resuming_suite != suite { + return Err(cx + .common + .illegal_param("server varied suite with early data")); + } + + if selected_psk != 0 { + return Err(cx + .common + .illegal_param("server selected invalid psk")); + } + + debug!("Resuming using PSK"); + // The key schedule has been initialized and set in fill_in_psk_binder() + } else { + return Err(Error::PeerMisbehavedError( + "server selected unoffered psk".to_string(), + )); + } + KeySchedulePreHandshake::from(early_key_schedule) + } else { + debug!("Not resuming"); + // Discard the early data key schedule. + cx.data.early_data.rejected(); + cx.common.early_traffic = false; + resuming_session.take(); + KeySchedulePreHandshake::new(suite.hkdf_algorithm) + }; + + let key_schedule = our_key_share.complete(&their_key_share.payload.0, |secret| { + Ok(key_schedule_pre_handshake.into_handshake(secret)) + })?; + + // Remember what KX group the server liked for next time. + save_kx_hint(&config, &server_name, their_key_share.group); + + // If we change keying when a subsequent handshake message is being joined, + // the two halves will have different record layer protections. Disallow this. + cx.common.check_aligned_handshake()?; + + let hash_at_client_recvd_server_hello = transcript.get_current_hash(); + + let (key_schedule, client_key, server_key) = key_schedule.derive_handshake_secrets( + hash_at_client_recvd_server_hello, + &*config.key_log, + &randoms.client, + ); + + // Decrypt with the peer's key, encrypt with our own key + cx.common + .record_layer + .set_message_decrypter(suite.derive_decrypter(&server_key)); + + if !cx.data.early_data.is_enabled() { + // Set the client encryption key for handshakes if early data is not used + cx.common + .record_layer + .set_message_encrypter(suite.derive_encrypter(&client_key)); + } + + #[cfg(feature = "quic")] + if cx.common.is_quic() { + cx.common.quic.hs_secrets = Some(quic::Secrets::new(client_key, server_key, suite, true)); + } + + emit_fake_ccs(&mut sent_tls13_fake_ccs, cx.common); + + Ok(Box::new(ExpectEncryptedExtensions { + config, + resuming_session, + server_name, + randoms, + suite, + transcript, + key_schedule, + hello, + })) +} + +fn validate_server_hello( + common: &mut CommonState, + server_hello: &ServerHelloPayload, +) -> Result<(), Error> { + for ext in &server_hello.extensions { + if !ALLOWED_PLAINTEXT_EXTS.contains(&ext.get_type()) { + common.send_fatal_alert(AlertDescription::UnsupportedExtension); + return Err(Error::PeerMisbehavedError( + "server sent unexpected cleartext ext".to_string(), + )); + } + } + + Ok(()) +} + +pub(super) fn initial_key_share( + config: &ClientConfig, + server_name: &ServerName, +) -> Result { + let key = persist::ClientSessionKey::hint_for_server_name(server_name); + let key_buf = key.get_encoding(); + + let maybe_value = config.session_storage.get(&key_buf); + + let group = maybe_value + .and_then(|enc| NamedGroup::read_bytes(&enc)) + .and_then(|group| kx::KeyExchange::choose(group, &config.kx_groups)) + .unwrap_or_else(|| { + config + .kx_groups + .first() + .expect("No kx groups configured") + }); + + kx::KeyExchange::start(group).ok_or(Error::FailedToGetRandomBytes) +} + +fn save_kx_hint(config: &ClientConfig, server_name: &ServerName, group: NamedGroup) { + let key = persist::ClientSessionKey::hint_for_server_name(server_name); + + config + .session_storage + .put(key.get_encoding(), group.get_encoding()); +} + +/// This implements the horrifying TLS1.3 hack where PSK binders have a +/// data dependency on the message they are contained within. +pub(super) fn fill_in_psk_binder( + resuming: &persist::Tls13ClientSessionValue, + transcript: &HandshakeHashBuffer, + hmp: &mut HandshakeMessagePayload, +) -> KeyScheduleEarly { + // We need to know the hash function of the suite we're trying to resume into. + let hkdf_alg = resuming.suite().hkdf_algorithm; + let suite_hash = resuming.suite().hash_algorithm(); + + // The binder is calculated over the clienthello, but doesn't include itself or its + // length, or the length of its container. + let binder_plaintext = hmp.get_encoding_for_binder_signing(); + let handshake_hash = transcript.get_hash_given(suite_hash, &binder_plaintext); + + // Run a fake key_schedule to simulate what the server will do if it chooses + // to resume. + let key_schedule = KeyScheduleEarly::new(hkdf_alg, resuming.secret()); + let real_binder = key_schedule.resumption_psk_binder_key_and_sign_verify_data(&handshake_hash); + + if let HandshakePayload::ClientHello(ref mut ch) = hmp.payload { + ch.set_psk_binder(real_binder.as_ref()); + }; + + key_schedule +} + +pub(super) fn prepare_resumption( + config: &ClientConfig, + cx: &mut ClientContext<'_>, + ticket: Vec, + resuming_session: &persist::Retrieved<&persist::Tls13ClientSessionValue>, + exts: &mut Vec, + doing_retry: bool, +) { + let resuming_suite = resuming_session.suite(); + cx.common.suite = Some(resuming_suite.into()); + cx.data.resumption_ciphersuite = Some(resuming_suite.into()); + // The EarlyData extension MUST be supplied together with the + // PreSharedKey extension. + let max_early_data_size = resuming_session.max_early_data_size(); + if config.enable_early_data && max_early_data_size > 0 && !doing_retry { + cx.data + .early_data + .enable(max_early_data_size as usize); + exts.push(ClientExtension::EarlyData); + } + + // Finally, and only for TLS1.3 with a ticket resumption, include a binder + // for our ticket. This must go last. + // + // Include an empty binder. It gets filled in below because it depends on + // the message it's contained in (!!!). + let obfuscated_ticket_age = resuming_session.obfuscated_ticket_age(); + + let binder_len = resuming_suite + .hash_algorithm() + .output_len; + let binder = vec![0u8; binder_len]; + + let psk_identity = PresharedKeyIdentity::new(ticket, obfuscated_ticket_age); + let psk_ext = PresharedKeyOffer::new(psk_identity, binder); + exts.push(ClientExtension::PresharedKey(psk_ext)); +} + +pub(super) fn derive_early_traffic_secret( + key_log: &dyn KeyLog, + cx: &mut ClientContext<'_>, + resuming_suite: &'static Tls13CipherSuite, + early_key_schedule: &KeyScheduleEarly, + sent_tls13_fake_ccs: &mut bool, + transcript_buffer: &HandshakeHashBuffer, + client_random: &[u8; 32], +) { + // For middlebox compatibility + emit_fake_ccs(sent_tls13_fake_ccs, cx.common); + + let client_hello_hash = transcript_buffer.get_hash_given(resuming_suite.hash_algorithm(), &[]); + let client_early_traffic_secret = + early_key_schedule.client_early_traffic_secret(&client_hello_hash, key_log, client_random); + // Set early data encryption key + cx.common + .record_layer + .set_message_encrypter(resuming_suite.derive_encrypter(&client_early_traffic_secret)); + + #[cfg(feature = "quic")] + if cx.common.is_quic() { + cx.common.quic.early_secret = Some(client_early_traffic_secret); + } + + // Now the client can send encrypted early data + cx.common.early_traffic = true; + trace!("Starting early data traffic"); +} + +pub(super) fn emit_fake_ccs(sent_tls13_fake_ccs: &mut bool, common: &mut CommonState) { + if common.is_quic() { + return; + } + + if std::mem::replace(sent_tls13_fake_ccs, true) { + return; + } + + let m = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::ChangeCipherSpec(ChangeCipherSpecPayload {}), + }; + common.send_msg(m, false); +} + +fn validate_encrypted_extensions( + common: &mut CommonState, + hello: &ClientHelloDetails, + exts: &EncryptedExtensions, +) -> Result<(), Error> { + if exts.has_duplicate_extension() { + common.send_fatal_alert(AlertDescription::DecodeError); + return Err(Error::PeerMisbehavedError( + "server sent duplicate encrypted extensions".to_string(), + )); + } + + if hello.server_sent_unsolicited_extensions(exts, &[]) { + common.send_fatal_alert(AlertDescription::UnsupportedExtension); + let msg = "server sent unsolicited encrypted extension".to_string(); + return Err(Error::PeerMisbehavedError(msg)); + } + + for ext in exts { + if ALLOWED_PLAINTEXT_EXTS.contains(&ext.get_type()) + || DISALLOWED_TLS13_EXTS.contains(&ext.get_type()) + { + common.send_fatal_alert(AlertDescription::UnsupportedExtension); + let msg = "server sent inappropriate encrypted extension".to_string(); + return Err(Error::PeerMisbehavedError(msg)); + } + } + + Ok(()) +} + +struct ExpectEncryptedExtensions { + config: Arc, + resuming_session: Option, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + hello: ClientHelloDetails, +} + +impl State for ExpectEncryptedExtensions { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let exts = require_handshake_msg!( + m, + HandshakeType::EncryptedExtensions, + HandshakePayload::EncryptedExtensions + )?; + debug!("TLS1.3 encrypted extensions: {:?}", exts); + self.transcript.add_message(&m); + + validate_encrypted_extensions(cx.common, &self.hello, exts)?; + hs::process_alpn_protocol(cx.common, &self.config, exts.get_alpn_protocol())?; + + #[cfg(feature = "quic")] + { + // QUIC transport parameters + if cx.common.is_quic() { + match exts.get_quic_params_extension() { + Some(params) => cx.common.quic.params = Some(params), + None => { + return Err(cx + .common + .missing_extension("QUIC transport parameters not found")); + } + } + } + } + + if let Some(resuming_session) = self.resuming_session { + let was_early_traffic = cx.common.early_traffic; + if was_early_traffic { + if exts.early_data_extension_offered() { + cx.data.early_data.accepted(); + } else { + cx.data.early_data.rejected(); + cx.common.early_traffic = false; + } + } + + if was_early_traffic && !cx.common.early_traffic { + // If no early traffic, set the encryption key for handshakes + cx.common + .record_layer + .set_message_encrypter( + self.suite + .derive_encrypter(self.key_schedule.client_key()), + ); + } + + cx.common.peer_certificates = Some( + resuming_session + .server_cert_chain() + .to_vec(), + ); + + // We *don't* reverify the certificate chain here: resumption is a + // continuation of the previous session in terms of security policy. + let cert_verified = verify::ServerCertVerified::assertion(); + let sig_verified = verify::HandshakeSignatureValid::assertion(); + Ok(Box::new(ExpectFinished { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + client_auth: None, + cert_verified, + sig_verified, + })) + } else { + if exts.early_data_extension_offered() { + let msg = "server sent early data extension without resumption".to_string(); + return Err(Error::PeerMisbehavedError(msg)); + } + Ok(Box::new(ExpectCertificateOrCertReq { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + may_send_sct_list: self.hello.server_may_send_sct_list(), + })) + } + } +} + +struct ExpectCertificateOrCertReq { + config: Arc, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + may_send_sct_list: bool, +} + +impl State for ExpectCertificateOrCertReq { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::CertificateTLS13(..), + .. + }, + .. + } => Box::new(ExpectCertificate { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + may_send_sct_list: self.may_send_sct_list, + client_auth: None, + }) + .handle(cx, m), + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::CertificateRequestTLS13(..), + .. + }, + .. + } => Box::new(ExpectCertificateRequest { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + may_send_sct_list: self.may_send_sct_list, + }) + .handle(cx, m), + payload => Err(inappropriate_handshake_message( + &payload, + &[ContentType::Handshake], + &[ + HandshakeType::Certificate, + HandshakeType::CertificateRequest, + ], + )), + } + } +} + +// TLS1.3 version of CertificateRequest handling. We then move to expecting the server +// Certificate. Unfortunately the CertificateRequest type changed in an annoying way +// in TLS1.3. +struct ExpectCertificateRequest { + config: Arc, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + may_send_sct_list: bool, +} + +impl State for ExpectCertificateRequest { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let certreq = &require_handshake_msg!( + m, + HandshakeType::CertificateRequest, + HandshakePayload::CertificateRequestTLS13 + )?; + self.transcript.add_message(&m); + debug!("Got CertificateRequest {:?}", certreq); + + // Fortunately the problems here in TLS1.2 and prior are corrected in + // TLS1.3. + + // Must be empty during handshake. + if !certreq.context.0.is_empty() { + warn!("Server sent non-empty certreq context"); + cx.common + .send_fatal_alert(AlertDescription::DecodeError); + return Err(Error::CorruptMessagePayload(ContentType::Handshake)); + } + + let tls13_sign_schemes = sign::supported_sign_tls13(); + let no_sigschemes = Vec::new(); + let compat_sigschemes = certreq + .get_sigalgs_extension() + .unwrap_or(&no_sigschemes) + .iter() + .cloned() + .filter(|scheme| tls13_sign_schemes.contains(scheme)) + .collect::>(); + + if compat_sigschemes.is_empty() { + cx.common + .send_fatal_alert(AlertDescription::HandshakeFailure); + return Err(Error::PeerIncompatibleError( + "server sent bad certreq schemes".to_string(), + )); + } + + let client_auth = ClientAuthDetails::resolve( + self.config + .client_auth_cert_resolver + .as_ref(), + certreq.get_authorities_extension(), + &compat_sigschemes, + Some(certreq.context.0.clone()), + ); + + Ok(Box::new(ExpectCertificate { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + may_send_sct_list: self.may_send_sct_list, + client_auth: Some(client_auth), + })) + } +} + +struct ExpectCertificate { + config: Arc, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + may_send_sct_list: bool, + client_auth: Option, +} + +impl State for ExpectCertificate { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let cert_chain = require_handshake_msg!( + m, + HandshakeType::Certificate, + HandshakePayload::CertificateTLS13 + )?; + self.transcript.add_message(&m); + + // This is only non-empty for client auth. + if !cert_chain.context.0.is_empty() { + warn!("certificate with non-empty context during handshake"); + cx.common + .send_fatal_alert(AlertDescription::DecodeError); + return Err(Error::CorruptMessagePayload(ContentType::Handshake)); + } + + if cert_chain.any_entry_has_duplicate_extension() + || cert_chain.any_entry_has_unknown_extension() + { + warn!("certificate chain contains unsolicited/unknown extension"); + cx.common + .send_fatal_alert(AlertDescription::UnsupportedExtension); + return Err(Error::PeerMisbehavedError( + "bad cert chain extensions".to_string(), + )); + } + + let server_cert = ServerCertDetails::new( + cert_chain.convert(), + cert_chain.get_end_entity_ocsp(), + cert_chain.get_end_entity_scts(), + ); + + if let Some(sct_list) = server_cert.scts.as_ref() { + if hs::sct_list_is_invalid(sct_list) { + let error_msg = "server sent invalid SCT list".to_string(); + return Err(Error::PeerMisbehavedError(error_msg)); + } + + if !self.may_send_sct_list { + let error_msg = "server sent unsolicited SCT list".to_string(); + return Err(Error::PeerMisbehavedError(error_msg)); + } + } + + Ok(Box::new(ExpectCertificateVerify { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + server_cert, + client_auth: self.client_auth, + })) + } +} + +// --- TLS1.3 CertificateVerify --- +struct ExpectCertificateVerify { + config: Arc, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + server_cert: ServerCertDetails, + client_auth: Option, +} + +impl State for ExpectCertificateVerify { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let cert_verify = require_handshake_msg!( + m, + HandshakeType::CertificateVerify, + HandshakePayload::CertificateVerify + )?; + + trace!("Server cert is {:?}", self.server_cert.cert_chain); + + // 1. Verify the certificate chain. + let (end_entity, intermediates) = self + .server_cert + .cert_chain + .split_first() + .ok_or(Error::NoCertificatesPresented)?; + let now = std::time::SystemTime::now(); + let cert_verified = self + .config + .verifier + .verify_server_cert( + end_entity, + intermediates, + &self.server_name, + &mut self.server_cert.scts(), + &self.server_cert.ocsp_response, + now, + ) + .map_err(|err| hs::send_cert_error_alert(cx.common, err))?; + + // 2. Verify their signature on the handshake. + let handshake_hash = self.transcript.get_current_hash(); + let sig_verified = self + .config + .verifier + .verify_tls13_signature( + &verify::construct_tls13_server_verify_message(&handshake_hash), + &self.server_cert.cert_chain[0], + cert_verify, + ) + .map_err(|err| hs::send_cert_error_alert(cx.common, err))?; + + cx.common.peer_certificates = Some(self.server_cert.cert_chain); + self.transcript.add_message(&m); + + Ok(Box::new(ExpectFinished { + config: self.config, + server_name: self.server_name, + randoms: self.randoms, + suite: self.suite, + transcript: self.transcript, + key_schedule: self.key_schedule, + client_auth: self.client_auth, + cert_verified, + sig_verified, + })) + } +} + +fn emit_certificate_tls13( + transcript: &mut HandshakeHash, + certkey: Option<&CertifiedKey>, + auth_context: Option>, + common: &mut CommonState, +) { + let context = auth_context.unwrap_or_default(); + + let mut cert_payload = CertificatePayloadTLS13 { + context: PayloadU8::new(context), + entries: Vec::new(), + }; + + if let Some(certkey) = certkey { + for cert in &certkey.cert { + cert_payload + .entries + .push(CertificateEntry::new(cert.clone())); + } + } + + let m = Message { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::Certificate, + payload: HandshakePayload::CertificateTLS13(cert_payload), + }), + }; + transcript.add_message(&m); + common.send_msg(m, true); +} + +fn emit_certverify_tls13( + transcript: &mut HandshakeHash, + signer: &dyn Signer, + common: &mut CommonState, +) -> Result<(), Error> { + let message = verify::construct_tls13_client_verify_message(&transcript.get_current_hash()); + + let scheme = signer.scheme(); + let sig = signer.sign(&message)?; + let dss = DigitallySignedStruct::new(scheme, sig); + + let m = Message { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::CertificateVerify, + payload: HandshakePayload::CertificateVerify(dss), + }), + }; + + transcript.add_message(&m); + common.send_msg(m, true); + Ok(()) +} + +fn emit_finished_tls13( + transcript: &mut HandshakeHash, + verify_data: ring::hmac::Tag, + common: &mut CommonState, +) { + let verify_data_payload = Payload::new(verify_data.as_ref()); + + let m = Message { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::Finished, + payload: HandshakePayload::Finished(verify_data_payload), + }), + }; + + transcript.add_message(&m); + common.send_msg(m, true); +} + +fn emit_end_of_early_data_tls13(transcript: &mut HandshakeHash, common: &mut CommonState) { + if common.is_quic() { + return; + } + + let m = Message { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::EndOfEarlyData, + payload: HandshakePayload::EndOfEarlyData, + }), + }; + + transcript.add_message(&m); + common.send_msg(m, true); +} + +struct ExpectFinished { + config: Arc, + server_name: ServerName, + randoms: ConnectionRandoms, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleHandshake, + client_auth: Option, + cert_verified: verify::ServerCertVerified, + sig_verified: verify::HandshakeSignatureValid, +} + +impl State for ExpectFinished { + fn handle(self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let mut st = *self; + let finished = + require_handshake_msg!(m, HandshakeType::Finished, HandshakePayload::Finished)?; + + let handshake_hash = st.transcript.get_current_hash(); + let expect_verify_data = st + .key_schedule + .sign_server_finish(&handshake_hash); + + let fin = constant_time::verify_slices_are_equal(expect_verify_data.as_ref(), &finished.0) + .map_err(|_| { + cx.common + .send_fatal_alert(AlertDescription::DecryptError); + Error::DecryptError + }) + .map(|_| verify::FinishedMessageVerified::assertion())?; + + st.transcript.add_message(&m); + + let hash_after_handshake = st.transcript.get_current_hash(); + /* The EndOfEarlyData message to server is still encrypted with early data keys, + * but appears in the transcript after the server Finished. */ + if cx.common.early_traffic { + emit_end_of_early_data_tls13(&mut st.transcript, cx.common); + cx.common.early_traffic = false; + cx.data.early_data.finished(); + cx.common + .record_layer + .set_message_encrypter( + st.suite + .derive_encrypter(st.key_schedule.client_key()), + ); + } + + /* Send our authentication/finished messages. These are still encrypted + * with our handshake keys. */ + if let Some(client_auth) = st.client_auth { + match client_auth { + ClientAuthDetails::Empty { + auth_context_tls13: auth_context, + } => { + emit_certificate_tls13(&mut st.transcript, None, auth_context, cx.common); + } + ClientAuthDetails::Verify { + certkey, + signer, + auth_context_tls13: auth_context, + } => { + emit_certificate_tls13( + &mut st.transcript, + Some(&certkey), + auth_context, + cx.common, + ); + emit_certverify_tls13(&mut st.transcript, signer.as_ref(), cx.common)?; + } + } + } + + let (key_schedule_finished, client_key, server_key) = st + .key_schedule + .into_traffic_with_client_finished_pending( + hash_after_handshake, + &*st.config.key_log, + &st.randoms.client, + ); + let handshake_hash = st.transcript.get_current_hash(); + let (key_schedule_traffic, verify_data, _) = + key_schedule_finished.sign_client_finish(&handshake_hash); + emit_finished_tls13(&mut st.transcript, verify_data, cx.common); + + /* Now move to our application traffic keys. */ + cx.common.check_aligned_handshake()?; + + cx.common + .record_layer + .set_message_decrypter(st.suite.derive_decrypter(&server_key)); + + cx.common + .record_layer + .set_message_encrypter(st.suite.derive_encrypter(&client_key)); + + cx.common.start_traffic(); + + let st = ExpectTraffic { + session_storage: Arc::clone(&st.config.session_storage), + server_name: st.server_name, + suite: st.suite, + transcript: st.transcript, + key_schedule: key_schedule_traffic, + want_write_key_update: false, + _cert_verified: st.cert_verified, + _sig_verified: st.sig_verified, + _fin_verified: fin, + }; + + #[cfg(feature = "quic")] + { + if cx.common.protocol == Protocol::Quic { + cx.common.quic.traffic_secrets = + Some(quic::Secrets::new(client_key, server_key, st.suite, true)); + return Ok(Box::new(ExpectQuicTraffic(st))); + } + } + + Ok(Box::new(st)) + } +} + +// -- Traffic transit state (TLS1.3) -- +// In this state we can be sent tickets, key updates, +// and application data. +struct ExpectTraffic { + session_storage: Arc, + server_name: ServerName, + suite: &'static Tls13CipherSuite, + transcript: HandshakeHash, + key_schedule: KeyScheduleTraffic, + want_write_key_update: bool, + _cert_verified: verify::ServerCertVerified, + _sig_verified: verify::HandshakeSignatureValid, + _fin_verified: verify::FinishedMessageVerified, +} + +impl ExpectTraffic { + #[allow(clippy::unnecessary_wraps)] // returns Err for #[cfg(feature = "quic")] + fn handle_new_ticket_tls13( + &mut self, + cx: &mut ClientContext<'_>, + nst: &NewSessionTicketPayloadTLS13, + ) -> Result<(), Error> { + if nst.has_duplicate_extension() { + cx.common + .send_fatal_alert(AlertDescription::IllegalParameter); + return Err(Error::PeerMisbehavedError( + "peer sent duplicate NewSessionTicket extensions".into(), + )); + } + + let handshake_hash = self.transcript.get_current_hash(); + let secret = self + .key_schedule + .resumption_master_secret_and_derive_ticket_psk(&handshake_hash, &nst.nonce.0); + + let time_now = match TimeBase::now() { + Ok(t) => t, + #[allow(unused_variables)] + Err(e) => { + debug!("Session not saved: {}", e); + return Ok(()); + } + }; + + let value = persist::Tls13ClientSessionValue::new( + self.suite, + nst.ticket.0.clone(), + secret, + cx.common + .peer_certificates + .clone() + .unwrap_or_default(), + time_now, + nst.lifetime, + nst.age_add, + nst.get_max_early_data_size() + .unwrap_or_default(), + ); + + #[cfg(feature = "quic")] + if let Some(sz) = nst.get_max_early_data_size() { + if cx.common.protocol == Protocol::Quic && sz != 0 && sz != 0xffff_ffff { + return Err(Error::PeerMisbehavedError( + "invalid max_early_data_size".into(), + )); + } + } + + let key = persist::ClientSessionKey::session_for_server_name(&self.server_name); + #[allow(unused_mut)] + let mut ticket = value.get_encoding(); + + #[cfg(feature = "quic")] + if let (Protocol::Quic, Some(ref quic_params)) = + (cx.common.protocol, &cx.common.quic.params) + { + PayloadU16::encode_slice(quic_params, &mut ticket); + } + + let worked = self + .session_storage + .put(key.get_encoding(), ticket); + + if worked { + debug!("Ticket saved"); + } else { + debug!("Ticket not saved"); + } + Ok(()) + } + + fn handle_key_update( + &mut self, + common: &mut CommonState, + kur: &KeyUpdateRequest, + ) -> Result<(), Error> { + #[cfg(feature = "quic")] + { + if let Protocol::Quic = common.protocol { + common.send_fatal_alert(AlertDescription::UnexpectedMessage); + let msg = "KeyUpdate received in QUIC connection".to_string(); + warn!("{}", msg); + return Err(Error::PeerMisbehavedError(msg)); + } + } + + // Mustn't be interleaved with other handshake messages. + common.check_aligned_handshake()?; + + match kur { + KeyUpdateRequest::UpdateNotRequested => {} + KeyUpdateRequest::UpdateRequested => { + self.want_write_key_update = true; + } + _ => { + common.send_fatal_alert(AlertDescription::IllegalParameter); + return Err(Error::CorruptMessagePayload(ContentType::Handshake)); + } + } + + // Update our read-side keys. + let new_read_key = self + .key_schedule + .next_server_application_traffic_secret(); + common + .record_layer + .set_message_decrypter( + self.suite + .derive_decrypter(&new_read_key), + ); + + Ok(()) + } +} + +impl State for ExpectTraffic { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + match m.payload { + MessagePayload::ApplicationData(payload) => cx + .common + .take_received_plaintext(payload), + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::NewSessionTicketTLS13(ref new_ticket), + .. + }, + .. + } => self.handle_new_ticket_tls13(cx, new_ticket)?, + MessagePayload::Handshake { + parsed: + HandshakeMessagePayload { + payload: HandshakePayload::KeyUpdate(ref key_update), + .. + }, + .. + } => self.handle_key_update(cx.common, key_update)?, + payload => { + return Err(inappropriate_handshake_message( + &payload, + &[ContentType::ApplicationData, ContentType::Handshake], + &[HandshakeType::NewSessionTicket, HandshakeType::KeyUpdate], + )); + } + } + + Ok(self) + } + + fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: Option<&[u8]>, + ) -> Result<(), Error> { + self.key_schedule + .export_keying_material(output, label, context) + } + + fn perhaps_write_key_update(&mut self, common: &mut CommonState) { + if self.want_write_key_update { + self.want_write_key_update = false; + common.send_msg_encrypt(Message::build_key_update_notify().into()); + + let write_key = self + .key_schedule + .next_client_application_traffic_secret(); + common + .record_layer + .set_message_encrypter(self.suite.derive_encrypter(&write_key)); + } + } + + #[cfg(feature = "secret_extraction")] + fn extract_secrets(&self) -> Result { + self.key_schedule + .extract_secrets(self.suite.common.aead_algorithm, Side::Client) + } +} + +#[cfg(feature = "quic")] +struct ExpectQuicTraffic(ExpectTraffic); + +#[cfg(feature = "quic")] +impl State for ExpectQuicTraffic { + fn handle(mut self: Box, cx: &mut ClientContext<'_>, m: Message) -> hs::NextStateOrError { + let nst = require_handshake_msg!( + m, + HandshakeType::NewSessionTicket, + HandshakePayload::NewSessionTicketTLS13 + )?; + self.0 + .handle_new_ticket_tls13(cx, nst)?; + Ok(self) + } + + fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: Option<&[u8]>, + ) -> Result<(), Error> { + self.0 + .export_keying_material(output, label, context) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/conn.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/conn.rs new file mode 100644 index 0000000000000000000000000000000000000000..f7d93a8c83014e7b2eb68e5fc56bad8d64eba1a0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/conn.rs @@ -0,0 +1,1466 @@ +use crate::enums::ProtocolVersion; +use crate::error::Error; +use crate::key; +#[cfg(feature = "logging")] +use crate::log::{debug, error, trace, warn}; +use crate::msgs::alert::AlertMessagePayload; +use crate::msgs::base::Payload; +use crate::msgs::deframer::MessageDeframer; +use crate::msgs::enums::HandshakeType; +use crate::msgs::enums::{AlertDescription, AlertLevel, ContentType}; +use crate::msgs::fragmenter::MessageFragmenter; +use crate::msgs::handshake::Random; +use crate::msgs::hsjoiner::{HandshakeJoiner, JoinerError}; +use crate::msgs::message::{ + BorrowedPlainMessage, Message, MessagePayload, OpaqueMessage, PlainMessage, +}; +#[cfg(feature = "quic")] +use crate::quic; +use crate::record_layer; +use crate::suites::SupportedCipherSuite; +#[cfg(feature = "secret_extraction")] +use crate::suites::{ExtractedSecrets, PartiallyExtractedSecrets}; +#[cfg(feature = "tls12")] +use crate::tls12::ConnectionSecrets; +use crate::vecbuf::ChunkVecBuffer; +#[cfg(feature = "quic")] +use std::collections::VecDeque; + +use std::convert::TryFrom; +use std::fmt::Debug; +use std::io; +use std::mem; +use std::ops::{Deref, DerefMut}; + +/// A client or server connection. +#[derive(Debug)] +pub enum Connection { + /// A client connection + Client(crate::client::ClientConnection), + /// A server connection + Server(crate::server::ServerConnection), +} + +impl Connection { + /// Read TLS content from `rd`. + /// + /// See [`ConnectionCommon::read_tls()`] for more information. + pub fn read_tls(&mut self, rd: &mut dyn io::Read) -> Result { + match self { + Self::Client(conn) => conn.read_tls(rd), + Self::Server(conn) => conn.read_tls(rd), + } + } + + /// Returns an object that allows reading plaintext. + pub fn reader(&mut self) -> Reader { + match self { + Self::Client(conn) => conn.reader(), + Self::Server(conn) => conn.reader(), + } + } + + /// Returns an object that allows writing plaintext. + pub fn writer(&mut self) -> Writer { + match self { + Self::Client(conn) => Writer::new(&mut **conn), + Self::Server(conn) => Writer::new(&mut **conn), + } + } + + /// Processes any new packets read by a previous call to [`Connection::read_tls`]. + /// + /// See [`ConnectionCommon::process_new_packets()`] for more information. + pub fn process_new_packets(&mut self) -> Result { + match self { + Self::Client(conn) => conn.process_new_packets(), + Self::Server(conn) => conn.process_new_packets(), + } + } + + /// Derives key material from the agreed connection secrets. + /// + /// See [`ConnectionCommon::export_keying_material()`] for more information. + pub fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: Option<&[u8]>, + ) -> Result<(), Error> { + match self { + Self::Client(conn) => conn.export_keying_material(output, label, context), + Self::Server(conn) => conn.export_keying_material(output, label, context), + } + } + + /// Extract secrets, to set up kTLS for example + #[cfg(feature = "secret_extraction")] + pub fn extract_secrets(self) -> Result { + match self { + Self::Client(conn) => conn.extract_secrets(), + Self::Server(conn) => conn.extract_secrets(), + } + } + + /// This function uses `io` to complete any outstanding IO for this connection. + /// + /// See [`ConnectionCommon::complete_io()`] for more information. + pub fn complete_io(&mut self, io: &mut T) -> Result<(usize, usize), io::Error> + where + Self: Sized, + T: io::Read + io::Write, + { + match self { + Self::Client(conn) => conn.complete_io(io), + Self::Server(conn) => conn.complete_io(io), + } + } +} + +#[cfg(feature = "quic")] +impl crate::quic::QuicExt for Connection { + fn quic_transport_parameters(&self) -> Option<&[u8]> { + match self { + Self::Client(conn) => conn.quic_transport_parameters(), + Self::Server(conn) => conn.quic_transport_parameters(), + } + } + + fn zero_rtt_keys(&self) -> Option { + match self { + Self::Client(conn) => conn.zero_rtt_keys(), + Self::Server(conn) => conn.zero_rtt_keys(), + } + } + + fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), Error> { + match self { + Self::Client(conn) => conn.read_quic_hs(plaintext), + Self::Server(conn) => conn.read_quic_hs(plaintext), + } + } + + fn write_hs(&mut self, buf: &mut Vec) -> Option { + match self { + Self::Client(conn) => quic::write_hs(conn, buf), + Self::Server(conn) => quic::write_hs(conn, buf), + } + } + + fn alert(&self) -> Option { + match self { + Self::Client(conn) => conn.alert(), + Self::Server(conn) => conn.alert(), + } + } +} + +impl Deref for Connection { + type Target = CommonState; + + fn deref(&self) -> &Self::Target { + match self { + Self::Client(conn) => &conn.common_state, + Self::Server(conn) => &conn.common_state, + } + } +} + +impl DerefMut for Connection { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + Self::Client(conn) => &mut conn.common_state, + Self::Server(conn) => &mut conn.common_state, + } + } +} + +/// Values of this structure are returned from [`Connection::process_new_packets`] +/// and tell the caller the current I/O state of the TLS connection. +#[derive(Debug, Eq, PartialEq)] +pub struct IoState { + tls_bytes_to_write: usize, + plaintext_bytes_to_read: usize, + peer_has_closed: bool, +} + +impl IoState { + /// How many bytes could be written by [`CommonState::write_tls`] if called + /// right now. A non-zero value implies [`CommonState::wants_write`]. + pub fn tls_bytes_to_write(&self) -> usize { + self.tls_bytes_to_write + } + + /// How many plaintext bytes could be obtained via [`std::io::Read`] + /// without further I/O. + pub fn plaintext_bytes_to_read(&self) -> usize { + self.plaintext_bytes_to_read + } + + /// True if the peer has sent us a close_notify alert. This is + /// the TLS mechanism to securely half-close a TLS connection, + /// and signifies that the peer will not send any further data + /// on this connection. + /// + /// This is also signalled via returning `Ok(0)` from + /// [`std::io::Read`], after all the received bytes have been + /// retrieved. + pub fn peer_has_closed(&self) -> bool { + self.peer_has_closed + } +} + +/// A structure that implements [`std::io::Read`] for reading plaintext. +pub struct Reader<'a> { + received_plaintext: &'a mut ChunkVecBuffer, + peer_cleanly_closed: bool, + has_seen_eof: bool, +} + +impl<'a> io::Read for Reader<'a> { + /// Obtain plaintext data received from the peer over this TLS connection. + /// + /// If the peer closes the TLS session cleanly, this returns `Ok(0)` once all + /// the pending data has been read. No further data can be received on that + /// connection, so the underlying TCP connection should be half-closed too. + /// + /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a + /// `close_notify` alert) this function returns `Err(ErrorKind::UnexpectedEof.into())` + /// once any pending data has been read. + /// + /// Note that support for `close_notify` varies in peer TLS libraries: many do not + /// support it and uncleanly close the TCP connection (this might be + /// vulnerable to truncation attacks depending on the application protocol). + /// This means applications using rustls must both handle EOF + /// from this function, *and* unexpected EOF of the underlying TCP connection. + /// + /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`. + /// + /// You may learn the number of bytes available at any time by inspecting + /// the return of [`Connection::process_new_packets`]. + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let len = self.received_plaintext.read(buf)?; + + if len == 0 && !buf.is_empty() { + // No bytes available: + match (self.peer_cleanly_closed, self.has_seen_eof) { + // cleanly closed; don't care about TCP EOF: express this as Ok(0) + (true, _) => {} + // unclean closure + (false, true) => return Err(io::ErrorKind::UnexpectedEof.into()), + // connection still going, but need more data: signal `WouldBlock` so that + // the caller knows this + (false, false) => return Err(io::ErrorKind::WouldBlock.into()), + } + } + + Ok(len) + } + + /// Obtain plaintext data received from the peer over this TLS connection. + /// + /// If the peer closes the TLS session, this returns `Ok(())` without filling + /// any more of the buffer once all the pending data has been read. No further + /// data can be received on that connection, so the underlying TCP connection + /// should be half-closed too. + /// + /// If the peer closes the TLS session uncleanly (a TCP EOF without sending a + /// `close_notify` alert) this function returns `Err(ErrorKind::UnexpectedEof.into())` + /// once any pending data has been read. + /// + /// Note that support for `close_notify` varies in peer TLS libraries: many do not + /// support it and uncleanly close the TCP connection (this might be + /// vulnerable to truncation attacks depending on the application protocol). + /// This means applications using rustls must both handle EOF + /// from this function, *and* unexpected EOF of the underlying TCP connection. + /// + /// If there are no bytes to read, this returns `Err(ErrorKind::WouldBlock.into())`. + /// + /// You may learn the number of bytes available at any time by inspecting + /// the return of [`Connection::process_new_packets`]. + #[cfg(read_buf)] + fn read_buf(&mut self, mut cursor: io::BorrowedCursor<'_>) -> io::Result<()> { + let before = cursor.written(); + self.received_plaintext + .read_buf(cursor.reborrow())?; + let len = cursor.written() - before; + + if len == 0 && cursor.capacity() > 0 { + // No bytes available: + match (self.peer_cleanly_closed, self.has_seen_eof) { + // cleanly closed; don't care about TCP EOF: express this as Ok(0) + (true, _) => {} + // unclean closure + (false, true) => return Err(io::ErrorKind::UnexpectedEof.into()), + // connection still going, but need more data: signal `WouldBlock` so that + // the caller knows this + (false, false) => return Err(io::ErrorKind::WouldBlock.into()), + } + } + + Ok(()) + } +} + +/// Internal trait implemented by the [`ServerConnection`]/[`ClientConnection`] +/// allowing them to be the subject of a [`Writer`]. +pub trait PlaintextSink { + fn write(&mut self, buf: &[u8]) -> io::Result; + fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result; + fn flush(&mut self) -> io::Result<()>; +} + +impl PlaintextSink for ConnectionCommon { + fn write(&mut self, buf: &[u8]) -> io::Result { + Ok(self.send_some_plaintext(buf)) + } + + fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result { + let mut sz = 0; + for buf in bufs { + sz += self.send_some_plaintext(buf); + } + Ok(sz) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +/// A structure that implements [`std::io::Write`] for writing plaintext. +pub struct Writer<'a> { + sink: &'a mut dyn PlaintextSink, +} + +impl<'a> Writer<'a> { + /// Create a new Writer. + /// + /// This is not an external interface. Get one of these objects + /// from [`Connection::writer`]. + #[doc(hidden)] + pub fn new(sink: &'a mut dyn PlaintextSink) -> Writer<'a> { + Writer { sink } + } +} + +impl<'a> io::Write for Writer<'a> { + /// Send the plaintext `buf` to the peer, encrypting + /// and authenticating it. Once this function succeeds + /// you should call [`CommonState::write_tls`] which will output the + /// corresponding TLS records. + /// + /// This function buffers plaintext sent before the + /// TLS handshake completes, and sends it as soon + /// as it can. See [`CommonState::set_buffer_limit`] to control + /// the size of this buffer. + fn write(&mut self, buf: &[u8]) -> io::Result { + self.sink.write(buf) + } + + fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result { + self.sink.write_vectored(bufs) + } + + fn flush(&mut self) -> io::Result<()> { + self.sink.flush() + } +} + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum Protocol { + Tcp, + #[cfg(feature = "quic")] + Quic, +} + +#[derive(Debug)] +pub(crate) struct ConnectionRandoms { + pub(crate) client: [u8; 32], + pub(crate) server: [u8; 32], +} + +/// How many ChangeCipherSpec messages we accept and drop in TLS1.3 handshakes. +/// The spec says 1, but implementations (namely the boringssl test suite) get +/// this wrong. BoringSSL itself accepts up to 32. +static TLS13_MAX_DROPPED_CCS: u8 = 2u8; + +impl ConnectionRandoms { + pub(crate) fn new(client: Random, server: Random) -> Self { + Self { + client: client.0, + server: server.0, + } + } +} + +// --- Common (to client and server) connection functions --- + +fn is_valid_ccs(msg: &OpaqueMessage) -> bool { + // nb. this is prior to the record layer, so is unencrypted. see + // third paragraph of section 5 in RFC8446. + msg.typ == ContentType::ChangeCipherSpec && msg.payload.0 == [0x01] +} + +enum Limit { + Yes, + No, +} + +/// Interface shared by client and server connections. +pub struct ConnectionCommon { + state: Result>, Error>, + pub(crate) data: Data, + pub(crate) common_state: CommonState, + message_deframer: MessageDeframer, + handshake_joiner: HandshakeJoiner, +} + +impl ConnectionCommon { + pub(crate) fn new(state: Box>, data: Data, common_state: CommonState) -> Self { + Self { + state: Ok(state), + data, + common_state, + message_deframer: MessageDeframer::new(), + handshake_joiner: HandshakeJoiner::new(), + } + } + + /// Returns an object that allows reading plaintext. + pub fn reader(&mut self) -> Reader { + Reader { + received_plaintext: &mut self.common_state.received_plaintext, + /// Are we done? i.e., have we processed all received messages, and received a + /// close_notify to indicate that no new messages will arrive? + peer_cleanly_closed: self + .common_state + .has_received_close_notify + && !self.message_deframer.has_pending(), + has_seen_eof: self.common_state.has_seen_eof, + } + } + + /// Returns an object that allows writing plaintext. + pub fn writer(&mut self) -> Writer { + Writer::new(self) + } + + /// This function uses `io` to complete any outstanding IO for + /// this connection. + /// + /// This is a convenience function which solely uses other parts + /// of the public API. + /// + /// What this means depends on the connection state: + /// + /// - If the connection [`is_handshaking`], then IO is performed until + /// the handshake is complete. + /// - Otherwise, if [`wants_write`] is true, [`write_tls`] is invoked + /// until it is all written. + /// - Otherwise, if [`wants_read`] is true, [`read_tls`] is invoked + /// once. + /// + /// The return value is the number of bytes read from and written + /// to `io`, respectively. + /// + /// This function will block if `io` blocks. + /// + /// Errors from TLS record handling (i.e., from [`process_new_packets`]) + /// are wrapped in an `io::ErrorKind::InvalidData`-kind error. + /// + /// [`is_handshaking`]: CommonState::is_handshaking + /// [`wants_read`]: CommonState::wants_read + /// [`wants_write`]: CommonState::wants_write + /// [`write_tls`]: CommonState::write_tls + /// [`read_tls`]: ConnectionCommon::read_tls + /// [`process_new_packets`]: ConnectionCommon::process_new_packets + pub fn complete_io(&mut self, io: &mut T) -> Result<(usize, usize), io::Error> + where + Self: Sized, + T: io::Read + io::Write, + { + let until_handshaked = self.is_handshaking(); + let mut eof = false; + let mut wrlen = 0; + let mut rdlen = 0; + + loop { + while self.wants_write() { + wrlen += self.write_tls(io)?; + } + + if !until_handshaked && wrlen > 0 { + return Ok((rdlen, wrlen)); + } + + while !eof && self.wants_read() { + let read_size = match self.read_tls(io) { + Ok(0) => { + eof = true; + Some(0) + } + Ok(n) => { + rdlen += n; + Some(n) + } + Err(ref err) if err.kind() == io::ErrorKind::Interrupted => None, // nothing to do + Err(err) => return Err(err), + }; + if read_size.is_some() { + break; + } + } + + match self.process_new_packets() { + Ok(_) => {} + Err(e) => { + // In case we have an alert to send describing this error, + // try a last-gasp write -- but don't predate the primary + // error. + let _ignored = self.write_tls(io); + + return Err(io::Error::new(io::ErrorKind::InvalidData, e)); + } + }; + + match (eof, until_handshaked, self.is_handshaking()) { + (_, true, false) => return Ok((rdlen, wrlen)), + (_, false, _) => return Ok((rdlen, wrlen)), + (true, true, true) => return Err(io::Error::from(io::ErrorKind::UnexpectedEof)), + (..) => {} + } + } + } + + /// Extract the first handshake message. + /// + /// This is a shortcut to the `process_new_packets()` -> `process_msg()` -> + /// `process_handshake_messages()` path, specialized for the first handshake message. + pub(crate) fn first_handshake_message(&mut self) -> Result, Error> { + let msg = match self.message_deframer.pop()? { + Some(msg) => msg, + None => return Ok(None), + }; + + let msg = msg.into_plain_message(); + self.handshake_joiner + .push(msg) + .and_then(|aligned| { + self.common_state.aligned_handshake = aligned; + self.handshake_joiner.pop() + }) + .map_err(|_| { + self.common_state + .send_fatal_alert(AlertDescription::DecodeError); + Error::CorruptMessagePayload(ContentType::Handshake) + }) + } + + pub(crate) fn replace_state(&mut self, new: Box>) { + self.state = Ok(new); + } + + fn process_msg( + &mut self, + msg: OpaqueMessage, + state: Box>, + ) -> Result>, Error> { + // Drop CCS messages during handshake in TLS1.3 + if msg.typ == ContentType::ChangeCipherSpec + && !self + .common_state + .may_receive_application_data + && self.common_state.is_tls13() + { + if !is_valid_ccs(&msg) + || self.common_state.received_middlebox_ccs > TLS13_MAX_DROPPED_CCS + { + // "An implementation which receives any other change_cipher_spec value or + // which receives a protected change_cipher_spec record MUST abort the + // handshake with an "unexpected_message" alert." + self.common_state + .send_fatal_alert(AlertDescription::UnexpectedMessage); + return Err(Error::PeerMisbehavedError( + "illegal middlebox CCS received".into(), + )); + } else { + self.common_state.received_middlebox_ccs += 1; + trace!("Dropping CCS"); + return Ok(state); + } + } + + // Decrypt if demanded by current state. + let msg = match self + .common_state + .record_layer + .is_decrypting() + { + true => match self.common_state.decrypt_incoming(msg) { + Ok(None) => { + // message dropped + return Ok(state); + } + Err(e) => { + return Err(e); + } + Ok(Some(msg)) => msg, + }, + false => msg.into_plain_message(), + }; + + // For handshake messages, we need to join them before parsing and processing. + let msg = match self.handshake_joiner.push(msg) { + // Handshake message, we handle these in another method. + Ok(aligned) => { + self.common_state.aligned_handshake = aligned; + + // First decryptable handshake message concludes trial decryption + self.common_state + .record_layer + .finish_trial_decryption(); + + return self.process_new_handshake_messages(state); + } + // Not a handshake message, continue to handle it here. + Err(JoinerError::Unwanted(msg)) => msg, + // Decoding the handshake message failed, yield an error. + Err(JoinerError::Decode) => { + self.common_state + .send_fatal_alert(AlertDescription::DecodeError); + return Err(Error::CorruptMessagePayload(ContentType::Handshake)); + } + }; + + // Now we can fully parse the message payload. + let msg = Message::try_from(msg)?; + + // For alerts, we have separate logic. + if let MessagePayload::Alert(alert) = &msg.payload { + self.common_state.process_alert(alert)?; + return Ok(state); + } + + self.common_state + .process_main_protocol(msg, state, &mut self.data) + } + + /// Processes any new packets read by a previous call to + /// [`Connection::read_tls`]. + /// + /// Errors from this function relate to TLS protocol errors, and + /// are fatal to the connection. Future calls after an error will do + /// no new work and will return the same error. After an error is + /// received from [`process_new_packets`], you should not call [`read_tls`] + /// any more (it will fill up buffers to no purpose). However, you + /// may call the other methods on the connection, including `write`, + /// `send_close_notify`, and `write_tls`. Most likely you will want to + /// call `write_tls` to send any alerts queued by the error and then + /// close the underlying connection. + /// + /// Success from this function comes with some sundry state data + /// about the connection. + /// + /// [`read_tls`]: Connection::read_tls + /// [`process_new_packets`]: Connection::process_new_packets + pub fn process_new_packets(&mut self) -> Result { + let mut state = match mem::replace(&mut self.state, Err(Error::HandshakeNotComplete)) { + Ok(state) => state, + Err(e) => { + self.state = Err(e.clone()); + return Err(e); + } + }; + + while let Some(msg) = self.message_deframer.pop()? { + match self.process_msg(msg, state) { + Ok(new) => state = new, + Err(e) => { + self.state = Err(e.clone()); + return Err(e); + } + } + } + + self.state = Ok(state); + Ok(self.common_state.current_io_state()) + } + + fn process_new_handshake_messages( + &mut self, + mut state: Box>, + ) -> Result>, Error> { + loop { + match self.handshake_joiner.pop() { + Ok(Some(msg)) => { + state = self + .common_state + .process_main_protocol(msg, state, &mut self.data)?; + } + Ok(None) => return Ok(state), + Err(_) => { + #[cfg(feature = "quic")] + if self.common_state.is_quic() { + self.common_state.quic.alert = Some(AlertDescription::DecodeError); + } + + if !self.common_state.is_quic() { + self.common_state + .send_fatal_alert(AlertDescription::DecodeError); + } + + return Err(Error::CorruptMessagePayload(ContentType::Handshake)); + } + } + } + } + + pub(crate) fn send_some_plaintext(&mut self, buf: &[u8]) -> usize { + if let Ok(st) = &mut self.state { + st.perhaps_write_key_update(&mut self.common_state); + } + self.common_state + .send_some_plaintext(buf) + } + + /// Read TLS content from `rd` into the internal buffer. + /// + /// Due to the internal buffering, `rd` can supply TLS messages in arbitrary-sized chunks (like + /// a socket or pipe might). + /// + /// You should call [`process_new_packets()`] each time a call to this function succeeds in order + /// to empty the incoming TLS data buffer. + /// + /// This function returns `Ok(0)` when the underlying `rd` does so. This typically happens when + /// a socket is cleanly closed, or a file is at EOF. Errors may result from the IO done through + /// `rd`; additionally, errors of `ErrorKind::Other` are emitted to signal backpressure: + /// + /// * In order to empty the incoming TLS data buffer, you should call [`process_new_packets()`] + /// each time a call to this function succeeds. + /// * In order to empty the incoming plaintext data buffer, you should empty it through + /// the [`reader()`] after the call to [`process_new_packets()`]. + /// + /// [`process_new_packets()`]: ConnectionCommon::process_new_packets + /// [`reader()`]: ConnectionCommon::reader + pub fn read_tls(&mut self, rd: &mut dyn io::Read) -> Result { + if self.received_plaintext.is_full() { + return Err(io::Error::new( + io::ErrorKind::Other, + "received plaintext buffer full", + )); + } + + let res = self.message_deframer.read(rd); + if let Ok(0) = res { + self.common_state.has_seen_eof = true; + } + res + } + + /// Derives key material from the agreed connection secrets. + /// + /// This function fills in `output` with `output.len()` bytes of key + /// material derived from the master session secret using `label` + /// and `context` for diversification. + /// + /// See RFC5705 for more details on what this does and is for. + /// + /// For TLS1.3 connections, this function does not use the + /// "early" exporter at any point. + /// + /// This function fails if called prior to the handshake completing; + /// check with [`CommonState::is_handshaking`] first. + pub fn export_keying_material( + &self, + output: &mut [u8], + label: &[u8], + context: Option<&[u8]>, + ) -> Result<(), Error> { + match self.state.as_ref() { + Ok(st) => st.export_keying_material(output, label, context), + Err(e) => Err(e.clone()), + } + } + + /// Extract secrets, so they can be used when configuring kTLS, for example. + #[cfg(feature = "secret_extraction")] + pub fn extract_secrets(self) -> Result { + if !self.enable_secret_extraction { + return Err(Error::General("Secret extraction is disabled".into())); + } + + let st = self.state?; + + let record_layer = self.common_state.record_layer; + let PartiallyExtractedSecrets { tx, rx } = st.extract_secrets()?; + Ok(ExtractedSecrets { + tx: (record_layer.write_seq(), tx), + rx: (record_layer.read_seq(), rx), + }) + } +} + +#[cfg(feature = "quic")] +impl ConnectionCommon { + pub(crate) fn read_quic_hs(&mut self, plaintext: &[u8]) -> Result<(), Error> { + let state = match mem::replace(&mut self.state, Err(Error::HandshakeNotComplete)) { + Ok(state) => state, + Err(e) => { + self.state = Err(e.clone()); + return Err(e); + } + }; + + let msg = PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_3, + payload: Payload::new(plaintext.to_vec()), + }; + + if self.handshake_joiner.push(msg).is_err() { + self.common_state.quic.alert = Some(AlertDescription::DecodeError); + return Err(Error::CorruptMessage); + } + + self.process_new_handshake_messages(state) + .map(|state| self.state = Ok(state)) + } +} + +impl Deref for ConnectionCommon { + type Target = CommonState; + + fn deref(&self) -> &Self::Target { + &self.common_state + } +} + +impl DerefMut for ConnectionCommon { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.common_state + } +} + +/// Connection state common to both client and server connections. +pub struct CommonState { + pub(crate) negotiated_version: Option, + pub(crate) side: Side, + pub(crate) record_layer: record_layer::RecordLayer, + pub(crate) suite: Option, + pub(crate) alpn_protocol: Option>, + aligned_handshake: bool, + pub(crate) may_send_application_data: bool, + pub(crate) may_receive_application_data: bool, + pub(crate) early_traffic: bool, + sent_fatal_alert: bool, + /// If the peer has signaled end of stream. + has_received_close_notify: bool, + has_seen_eof: bool, + received_middlebox_ccs: u8, + pub(crate) peer_certificates: Option>, + message_fragmenter: MessageFragmenter, + received_plaintext: ChunkVecBuffer, + sendable_plaintext: ChunkVecBuffer, + pub(crate) sendable_tls: ChunkVecBuffer, + #[allow(dead_code)] // only read for QUIC + /// Protocol whose key schedule should be used. Unused for TLS < 1.3. + pub(crate) protocol: Protocol, + #[cfg(feature = "quic")] + pub(crate) quic: Quic, + #[cfg(feature = "secret_extraction")] + pub(crate) enable_secret_extraction: bool, +} + +impl CommonState { + pub(crate) fn new(side: Side) -> Self { + Self { + negotiated_version: None, + side, + record_layer: record_layer::RecordLayer::new(), + suite: None, + alpn_protocol: None, + aligned_handshake: true, + may_send_application_data: false, + may_receive_application_data: false, + early_traffic: false, + sent_fatal_alert: false, + has_received_close_notify: false, + has_seen_eof: false, + received_middlebox_ccs: 0, + peer_certificates: None, + message_fragmenter: MessageFragmenter::default(), + received_plaintext: ChunkVecBuffer::new(Some(DEFAULT_RECEIVED_PLAINTEXT_LIMIT)), + sendable_plaintext: ChunkVecBuffer::new(Some(DEFAULT_BUFFER_LIMIT)), + sendable_tls: ChunkVecBuffer::new(Some(DEFAULT_BUFFER_LIMIT)), + + protocol: Protocol::Tcp, + #[cfg(feature = "quic")] + quic: Quic::new(), + #[cfg(feature = "secret_extraction")] + enable_secret_extraction: false, + } + } + + /// Returns true if the caller should call [`CommonState::write_tls`] as soon + /// as possible. + pub fn wants_write(&self) -> bool { + !self.sendable_tls.is_empty() + } + + /// Returns true if the connection is currently performing the TLS handshake. + /// + /// During this time plaintext written to the connection is buffered in memory. After + /// [`Connection::process_new_packets`] has been called, this might start to return `false` + /// while the final handshake packets still need to be extracted from the connection's buffers. + pub fn is_handshaking(&self) -> bool { + !(self.may_send_application_data && self.may_receive_application_data) + } + + /// Retrieves the certificate chain used by the peer to authenticate. + /// + /// The order of the certificate chain is as it appears in the TLS + /// protocol: the first certificate relates to the peer, the + /// second certifies the first, the third certifies the second, and + /// so on. + /// + /// This is made available for both full and resumed handshakes. + /// + /// For clients, this is the certificate chain of the server. + /// + /// For servers, this is the certificate chain of the client, + /// if client authentication was completed. + /// + /// The return value is None until this value is available. + pub fn peer_certificates(&self) -> Option<&[key::Certificate]> { + self.peer_certificates.as_deref() + } + + /// Retrieves the protocol agreed with the peer via ALPN. + /// + /// A return value of `None` after handshake completion + /// means no protocol was agreed (because no protocols + /// were offered or accepted by the peer). + pub fn alpn_protocol(&self) -> Option<&[u8]> { + self.get_alpn_protocol() + } + + /// Retrieves the ciphersuite agreed with the peer. + /// + /// This returns None until the ciphersuite is agreed. + pub fn negotiated_cipher_suite(&self) -> Option { + self.suite + } + + /// Retrieves the protocol version agreed with the peer. + /// + /// This returns `None` until the version is agreed. + pub fn protocol_version(&self) -> Option { + self.negotiated_version + } + + pub(crate) fn is_tls13(&self) -> bool { + matches!(self.negotiated_version, Some(ProtocolVersion::TLSv1_3)) + } + + fn process_main_protocol( + &mut self, + msg: Message, + mut state: Box>, + data: &mut Data, + ) -> Result>, Error> { + // For TLS1.2, outside of the handshake, send rejection alerts for + // renegotiation requests. These can occur any time. + if self.may_receive_application_data && !self.is_tls13() { + let reject_ty = match self.side { + Side::Client => HandshakeType::HelloRequest, + Side::Server => HandshakeType::ClientHello, + }; + if msg.is_handshake_type(reject_ty) { + self.send_warning_alert(AlertDescription::NoRenegotiation); + return Ok(state); + } + } + + let mut cx = Context { common: self, data }; + match state.handle(&mut cx, msg) { + Ok(next) => { + state = next; + Ok(state) + } + Err(e @ Error::InappropriateMessage { .. }) + | Err(e @ Error::InappropriateHandshakeMessage { .. }) => { + self.send_fatal_alert(AlertDescription::UnexpectedMessage); + Err(e) + } + Err(e) => Err(e), + } + } + + /// Send plaintext application data, fragmenting and + /// encrypting it as it goes out. + /// + /// If internal buffers are too small, this function will not accept + /// all the data. + pub(crate) fn send_some_plaintext(&mut self, data: &[u8]) -> usize { + self.send_plain(data, Limit::Yes) + } + + pub(crate) fn send_early_plaintext(&mut self, data: &[u8]) -> usize { + debug_assert!(self.early_traffic); + debug_assert!(self.record_layer.is_encrypting()); + + if data.is_empty() { + // Don't send empty fragments. + return 0; + } + + self.send_appdata_encrypt(data, Limit::Yes) + } + + // Changing the keys must not span any fragmented handshake + // messages. Otherwise the defragmented messages will have + // been protected with two different record layer protections, + // which is illegal. Not mentioned in RFC. + pub(crate) fn check_aligned_handshake(&mut self) -> Result<(), Error> { + if !self.aligned_handshake { + self.send_fatal_alert(AlertDescription::UnexpectedMessage); + Err(Error::PeerMisbehavedError( + "key epoch or handshake flight with pending fragment".to_string(), + )) + } else { + Ok(()) + } + } + + pub(crate) fn illegal_param(&mut self, why: &str) -> Error { + self.send_fatal_alert(AlertDescription::IllegalParameter); + Error::PeerMisbehavedError(why.to_string()) + } + + pub(crate) fn decrypt_incoming( + &mut self, + encr: OpaqueMessage, + ) -> Result, Error> { + if self + .record_layer + .wants_close_before_decrypt() + { + self.send_close_notify(); + } + + let encrypted_len = encr.payload.0.len(); + let plain = self.record_layer.decrypt_incoming(encr); + + match plain { + Err(Error::PeerSentOversizedRecord) => { + self.send_fatal_alert(AlertDescription::RecordOverflow); + Err(Error::PeerSentOversizedRecord) + } + Err(Error::DecryptError) + if self + .record_layer + .doing_trial_decryption(encrypted_len) => + { + trace!("Dropping undecryptable message after aborted early_data"); + Ok(None) + } + Err(Error::DecryptError) => { + self.send_fatal_alert(AlertDescription::BadRecordMac); + Err(Error::DecryptError) + } + Err(e) => Err(e), + Ok(plain) => Ok(Some(plain)), + } + } + + /// Fragment `m`, encrypt the fragments, and then queue + /// the encrypted fragments for sending. + pub(crate) fn send_msg_encrypt(&mut self, m: PlainMessage) { + let iter = self + .message_fragmenter + .fragment_message(&m); + for m in iter { + self.send_single_fragment(m); + } + } + + /// Like send_msg_encrypt, but operate on an appdata directly. + fn send_appdata_encrypt(&mut self, payload: &[u8], limit: Limit) -> usize { + // Here, the limit on sendable_tls applies to encrypted data, + // but we're respecting it for plaintext data -- so we'll + // be out by whatever the cipher+record overhead is. That's a + // constant and predictable amount, so it's not a terrible issue. + let len = match limit { + Limit::Yes => self + .sendable_tls + .apply_limit(payload.len()), + Limit::No => payload.len(), + }; + + let iter = self.message_fragmenter.fragment_slice( + ContentType::ApplicationData, + ProtocolVersion::TLSv1_2, + &payload[..len], + ); + for m in iter { + self.send_single_fragment(m); + } + + len + } + + fn send_single_fragment(&mut self, m: BorrowedPlainMessage) { + // Close connection once we start to run out of + // sequence space. + if self + .record_layer + .wants_close_before_encrypt() + { + self.send_close_notify(); + } + + // Refuse to wrap counter at all costs. This + // is basically untestable unfortunately. + if self.record_layer.encrypt_exhausted() { + return; + } + + let em = self.record_layer.encrypt_outgoing(m); + self.queue_tls_message(em); + } + + /// Writes TLS messages to `wr`. + /// + /// On success, this function returns `Ok(n)` where `n` is a number of bytes written to `wr` + /// (after encoding and encryption). + /// + /// After this function returns, the connection buffer may not yet be fully flushed. The + /// [`CommonState::wants_write`] function can be used to check if the output buffer is empty. + pub fn write_tls(&mut self, wr: &mut dyn io::Write) -> Result { + self.sendable_tls.write_to(wr) + } + + /// Encrypt and send some plaintext `data`. `limit` controls + /// whether the per-connection buffer limits apply. + /// + /// Returns the number of bytes written from `data`: this might + /// be less than `data.len()` if buffer limits were exceeded. + fn send_plain(&mut self, data: &[u8], limit: Limit) -> usize { + if !self.may_send_application_data { + // If we haven't completed handshaking, buffer + // plaintext to send once we do. + let len = match limit { + Limit::Yes => self + .sendable_plaintext + .append_limited_copy(data), + Limit::No => self + .sendable_plaintext + .append(data.to_vec()), + }; + return len; + } + + debug_assert!(self.record_layer.is_encrypting()); + + if data.is_empty() { + // Don't send empty fragments. + return 0; + } + + self.send_appdata_encrypt(data, limit) + } + + pub(crate) fn start_outgoing_traffic(&mut self) { + self.may_send_application_data = true; + self.flush_plaintext(); + } + + pub(crate) fn start_traffic(&mut self) { + self.may_receive_application_data = true; + self.start_outgoing_traffic(); + } + + /// Sets a limit on the internal buffers used to buffer + /// unsent plaintext (prior to completing the TLS handshake) + /// and unsent TLS records. This limit acts only on application + /// data written through [`Connection::writer`]. + /// + /// By default the limit is 64KB. The limit can be set + /// at any time, even if the current buffer use is higher. + /// + /// [`None`] means no limit applies, and will mean that written + /// data is buffered without bound -- it is up to the application + /// to appropriately schedule its plaintext and TLS writes to bound + /// memory usage. + /// + /// For illustration: `Some(1)` means a limit of one byte applies: + /// [`Connection::writer`] will accept only one byte, encrypt it and + /// add a TLS header. Once this is sent via [`CommonState::write_tls`], + /// another byte may be sent. + /// + /// # Internal write-direction buffering + /// rustls has two buffers whose size are bounded by this setting: + /// + /// ## Buffering of unsent plaintext data prior to handshake completion + /// + /// Calls to [`Connection::writer`] before or during the handshake + /// are buffered (up to the limit specified here). Once the + /// handshake completes this data is encrypted and the resulting + /// TLS records are added to the outgoing buffer. + /// + /// ## Buffering of outgoing TLS records + /// + /// This buffer is used to store TLS records that rustls needs to + /// send to the peer. It is used in these two circumstances: + /// + /// - by [`Connection::process_new_packets`] when a handshake or alert + /// TLS record needs to be sent. + /// - by [`Connection::writer`] post-handshake: the plaintext is + /// encrypted and the resulting TLS record is buffered. + /// + /// This buffer is emptied by [`CommonState::write_tls`]. + pub fn set_buffer_limit(&mut self, limit: Option) { + self.sendable_plaintext.set_limit(limit); + self.sendable_tls.set_limit(limit); + } + + /// Send any buffered plaintext. Plaintext is buffered if + /// written during handshake. + fn flush_plaintext(&mut self) { + if !self.may_send_application_data { + return; + } + + while let Some(buf) = self.sendable_plaintext.pop() { + self.send_plain(&buf, Limit::No); + } + } + + // Put m into sendable_tls for writing. + fn queue_tls_message(&mut self, m: OpaqueMessage) { + self.sendable_tls.append(m.encode()); + } + + /// Send a raw TLS message, fragmenting it if needed. + pub(crate) fn send_msg(&mut self, m: Message, must_encrypt: bool) { + #[cfg(feature = "quic")] + { + if let Protocol::Quic = self.protocol { + if let MessagePayload::Alert(alert) = m.payload { + self.quic.alert = Some(alert.description); + } else { + debug_assert!( + matches!(m.payload, MessagePayload::Handshake { .. }), + "QUIC uses TLS for the cryptographic handshake only" + ); + let mut bytes = Vec::new(); + m.payload.encode(&mut bytes); + self.quic + .hs_queue + .push_back((must_encrypt, bytes)); + } + return; + } + } + if !must_encrypt { + let msg = &m.into(); + let iter = self + .message_fragmenter + .fragment_message(msg); + for m in iter { + self.queue_tls_message(m.to_unencrypted_opaque()); + } + } else { + self.send_msg_encrypt(m.into()); + } + } + + pub(crate) fn take_received_plaintext(&mut self, bytes: Payload) { + self.received_plaintext.append(bytes.0); + } + + #[cfg(feature = "tls12")] + pub(crate) fn start_encryption_tls12(&mut self, secrets: &ConnectionSecrets, side: Side) { + let (dec, enc) = secrets.make_cipher_pair(side); + self.record_layer + .prepare_message_encrypter(enc); + self.record_layer + .prepare_message_decrypter(dec); + } + + #[cfg(feature = "quic")] + pub(crate) fn missing_extension(&mut self, why: &str) -> Error { + self.send_fatal_alert(AlertDescription::MissingExtension); + Error::PeerMisbehavedError(why.to_string()) + } + + fn send_warning_alert(&mut self, desc: AlertDescription) { + warn!("Sending warning alert {:?}", desc); + self.send_warning_alert_no_log(desc); + } + + fn process_alert(&mut self, alert: &AlertMessagePayload) -> Result<(), Error> { + // Reject unknown AlertLevels. + if let AlertLevel::Unknown(_) = alert.level { + self.send_fatal_alert(AlertDescription::IllegalParameter); + } + + // If we get a CloseNotify, make a note to declare EOF to our + // caller. + if alert.description == AlertDescription::CloseNotify { + self.has_received_close_notify = true; + return Ok(()); + } + + // Warnings are nonfatal for TLS1.2, but outlawed in TLS1.3 + // (except, for no good reason, user_cancelled). + if alert.level == AlertLevel::Warning { + if self.is_tls13() && alert.description != AlertDescription::UserCanceled { + self.send_fatal_alert(AlertDescription::DecodeError); + } else { + warn!("TLS alert warning received: {:#?}", alert); + return Ok(()); + } + } + + error!("TLS alert received: {:#?}", alert); + Err(Error::AlertReceived(alert.description)) + } + + pub(crate) fn send_fatal_alert(&mut self, desc: AlertDescription) { + warn!("Sending fatal alert {:?}", desc); + debug_assert!(!self.sent_fatal_alert); + let m = Message::build_alert(AlertLevel::Fatal, desc); + self.send_msg(m, self.record_layer.is_encrypting()); + self.sent_fatal_alert = true; + } + + /// Queues a close_notify warning alert to be sent in the next + /// [`CommonState::write_tls`] call. This informs the peer that the + /// connection is being closed. + pub fn send_close_notify(&mut self) { + debug!("Sending warning alert {:?}", AlertDescription::CloseNotify); + self.send_warning_alert_no_log(AlertDescription::CloseNotify); + } + + fn send_warning_alert_no_log(&mut self, desc: AlertDescription) { + let m = Message::build_alert(AlertLevel::Warning, desc); + self.send_msg(m, self.record_layer.is_encrypting()); + } + + pub(crate) fn set_max_fragment_size(&mut self, new: Option) -> Result<(), Error> { + self.message_fragmenter + .set_max_fragment_size(new) + } + + pub(crate) fn get_alpn_protocol(&self) -> Option<&[u8]> { + self.alpn_protocol + .as_ref() + .map(AsRef::as_ref) + } + + /// Returns true if the caller should call [`Connection::read_tls`] as soon + /// as possible. + /// + /// If there is pending plaintext data to read with [`Connection::reader`], + /// this returns false. If your application respects this mechanism, + /// only one full TLS message will be buffered by rustls. + pub fn wants_read(&self) -> bool { + // We want to read more data all the time, except when we have unprocessed plaintext. + // This provides back-pressure to the TCP buffers. We also don't want to read more after + // the peer has sent us a close notification. + // + // In the handshake case we don't have readable plaintext before the handshake has + // completed, but also don't want to read if we still have sendable tls. + self.received_plaintext.is_empty() + && !self.has_received_close_notify + && (self.may_send_application_data || self.sendable_tls.is_empty()) + } + + fn current_io_state(&self) -> IoState { + IoState { + tls_bytes_to_write: self.sendable_tls.len(), + plaintext_bytes_to_read: self.received_plaintext.len(), + peer_has_closed: self.has_received_close_notify, + } + } + + pub(crate) fn is_quic(&self) -> bool { + #[cfg(feature = "quic")] + { + self.protocol == Protocol::Quic + } + #[cfg(not(feature = "quic"))] + false + } +} + +pub(crate) trait State: Send + Sync { + fn handle( + self: Box, + cx: &mut Context<'_, Data>, + message: Message, + ) -> Result>, Error>; + + fn export_keying_material( + &self, + _output: &mut [u8], + _label: &[u8], + _context: Option<&[u8]>, + ) -> Result<(), Error> { + Err(Error::HandshakeNotComplete) + } + + #[cfg(feature = "secret_extraction")] + fn extract_secrets(&self) -> Result { + Err(Error::HandshakeNotComplete) + } + + fn perhaps_write_key_update(&mut self, _cx: &mut CommonState) {} +} + +pub(crate) struct Context<'a, Data> { + pub(crate) common: &'a mut CommonState, + pub(crate) data: &'a mut Data, +} + +#[cfg(feature = "quic")] +pub(crate) struct Quic { + /// QUIC transport parameters received from the peer during the handshake + pub(crate) params: Option>, + pub(crate) alert: Option, + pub(crate) hs_queue: VecDeque<(bool, Vec)>, + pub(crate) early_secret: Option, + pub(crate) hs_secrets: Option, + pub(crate) traffic_secrets: Option, + /// Whether keys derived from traffic_secrets have been passed to the QUIC implementation + pub(crate) returned_traffic_keys: bool, +} + +#[cfg(feature = "quic")] +impl Quic { + fn new() -> Self { + Self { + params: None, + alert: None, + hs_queue: VecDeque::new(), + early_secret: None, + hs_secrets: None, + traffic_secrets: None, + returned_traffic_keys: false, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub(crate) enum Side { + Client, + Server, +} + +/// Data specific to the peer's side (client or server). +pub trait SideData {} + +const DEFAULT_RECEIVED_PLAINTEXT_LIMIT: usize = 16 * 1024; +const DEFAULT_BUFFER_LIMIT: usize = 64 * 1024; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/enums.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/enums.rs new file mode 100644 index 0000000000000000000000000000000000000000..20c39ee4b0c4ab4c1770cfeaf8d6b08e5b54632c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/enums.rs @@ -0,0 +1,431 @@ +#![allow(non_camel_case_types)] +#![allow(missing_docs)] +use crate::msgs::codec::{Codec, Reader}; + +enum_builder! { + /// The `ProtocolVersion` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: ProtocolVersion; + EnumVal{ + SSLv2 => 0x0200, + SSLv3 => 0x0300, + TLSv1_0 => 0x0301, + TLSv1_1 => 0x0302, + TLSv1_2 => 0x0303, + TLSv1_3 => 0x0304, + DTLSv1_0 => 0xFEFF, + DTLSv1_2 => 0xFEFD, + DTLSv1_3 => 0xFEFC + } +} + +enum_builder! { + /// The `CipherSuite` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: CipherSuite; + EnumVal{ + TLS_NULL_WITH_NULL_NULL => 0x0000, + TLS_RSA_WITH_NULL_MD5 => 0x0001, + TLS_RSA_WITH_NULL_SHA => 0x0002, + TLS_RSA_EXPORT_WITH_RC4_40_MD5 => 0x0003, + TLS_RSA_WITH_RC4_128_MD5 => 0x0004, + TLS_RSA_WITH_RC4_128_SHA => 0x0005, + TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 => 0x0006, + TLS_RSA_WITH_IDEA_CBC_SHA => 0x0007, + TLS_RSA_EXPORT_WITH_DES40_CBC_SHA => 0x0008, + TLS_RSA_WITH_DES_CBC_SHA => 0x0009, + TLS_RSA_WITH_3DES_EDE_CBC_SHA => 0x000a, + TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA => 0x000b, + TLS_DH_DSS_WITH_DES_CBC_SHA => 0x000c, + TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA => 0x000d, + TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA => 0x000e, + TLS_DH_RSA_WITH_DES_CBC_SHA => 0x000f, + TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA => 0x0010, + TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA => 0x0011, + TLS_DHE_DSS_WITH_DES_CBC_SHA => 0x0012, + TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA => 0x0013, + TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA => 0x0014, + TLS_DHE_RSA_WITH_DES_CBC_SHA => 0x0015, + TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA => 0x0016, + TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 => 0x0017, + TLS_DH_anon_WITH_RC4_128_MD5 => 0x0018, + TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA => 0x0019, + TLS_DH_anon_WITH_DES_CBC_SHA => 0x001a, + TLS_DH_anon_WITH_3DES_EDE_CBC_SHA => 0x001b, + SSL_FORTEZZA_KEA_WITH_NULL_SHA => 0x001c, + SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA => 0x001d, + TLS_KRB5_WITH_DES_CBC_SHA_or_SSL_FORTEZZA_KEA_WITH_RC4_128_SHA => 0x001e, + TLS_KRB5_WITH_3DES_EDE_CBC_SHA => 0x001f, + TLS_KRB5_WITH_RC4_128_SHA => 0x0020, + TLS_KRB5_WITH_IDEA_CBC_SHA => 0x0021, + TLS_KRB5_WITH_DES_CBC_MD5 => 0x0022, + TLS_KRB5_WITH_3DES_EDE_CBC_MD5 => 0x0023, + TLS_KRB5_WITH_RC4_128_MD5 => 0x0024, + TLS_KRB5_WITH_IDEA_CBC_MD5 => 0x0025, + TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA => 0x0026, + TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA => 0x0027, + TLS_KRB5_EXPORT_WITH_RC4_40_SHA => 0x0028, + TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 => 0x0029, + TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 => 0x002a, + TLS_KRB5_EXPORT_WITH_RC4_40_MD5 => 0x002b, + TLS_PSK_WITH_NULL_SHA => 0x002c, + TLS_DHE_PSK_WITH_NULL_SHA => 0x002d, + TLS_RSA_PSK_WITH_NULL_SHA => 0x002e, + TLS_RSA_WITH_AES_128_CBC_SHA => 0x002f, + TLS_DH_DSS_WITH_AES_128_CBC_SHA => 0x0030, + TLS_DH_RSA_WITH_AES_128_CBC_SHA => 0x0031, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA => 0x0032, + TLS_DHE_RSA_WITH_AES_128_CBC_SHA => 0x0033, + TLS_DH_anon_WITH_AES_128_CBC_SHA => 0x0034, + TLS_RSA_WITH_AES_256_CBC_SHA => 0x0035, + TLS_DH_DSS_WITH_AES_256_CBC_SHA => 0x0036, + TLS_DH_RSA_WITH_AES_256_CBC_SHA => 0x0037, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA => 0x0038, + TLS_DHE_RSA_WITH_AES_256_CBC_SHA => 0x0039, + TLS_DH_anon_WITH_AES_256_CBC_SHA => 0x003a, + TLS_RSA_WITH_NULL_SHA256 => 0x003b, + TLS_RSA_WITH_AES_128_CBC_SHA256 => 0x003c, + TLS_RSA_WITH_AES_256_CBC_SHA256 => 0x003d, + TLS_DH_DSS_WITH_AES_128_CBC_SHA256 => 0x003e, + TLS_DH_RSA_WITH_AES_128_CBC_SHA256 => 0x003f, + TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 => 0x0040, + TLS_RSA_WITH_CAMELLIA_128_CBC_SHA => 0x0041, + TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA => 0x0042, + TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA => 0x0043, + TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA => 0x0044, + TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA => 0x0045, + TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA => 0x0046, + TLS_ECDH_ECDSA_WITH_NULL_SHA_draft => 0x0047, + TLS_ECDH_ECDSA_WITH_RC4_128_SHA_draft => 0x0048, + TLS_ECDH_ECDSA_WITH_DES_CBC_SHA_draft => 0x0049, + TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA_draft => 0x004a, + TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA_draft => 0x004b, + TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA_draft => 0x004c, + TLS_ECDH_ECNRA_WITH_DES_CBC_SHA_draft => 0x004d, + TLS_ECDH_ECNRA_WITH_3DES_EDE_CBC_SHA_draft => 0x004e, + TLS_ECMQV_ECDSA_NULL_SHA_draft => 0x004f, + TLS_ECMQV_ECDSA_WITH_RC4_128_SHA_draft => 0x0050, + TLS_ECMQV_ECDSA_WITH_DES_CBC_SHA_draft => 0x0051, + TLS_ECMQV_ECDSA_WITH_3DES_EDE_CBC_SHA_draft => 0x0052, + TLS_ECMQV_ECNRA_NULL_SHA_draft => 0x0053, + TLS_ECMQV_ECNRA_WITH_RC4_128_SHA_draft => 0x0054, + TLS_ECMQV_ECNRA_WITH_DES_CBC_SHA_draft => 0x0055, + TLS_ECMQV_ECNRA_WITH_3DES_EDE_CBC_SHA_draft => 0x0056, + TLS_ECDH_anon_NULL_WITH_SHA_draft => 0x0057, + TLS_ECDH_anon_WITH_RC4_128_SHA_draft => 0x0058, + TLS_ECDH_anon_WITH_DES_CBC_SHA_draft => 0x0059, + TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA_draft => 0x005a, + TLS_ECDH_anon_EXPORT_WITH_DES40_CBC_SHA_draft => 0x005b, + TLS_ECDH_anon_EXPORT_WITH_RC4_40_SHA_draft => 0x005c, + TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 => 0x0060, + TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 => 0x0061, + TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA => 0x0062, + TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA => 0x0063, + TLS_RSA_EXPORT1024_WITH_RC4_56_SHA => 0x0064, + TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA => 0x0065, + TLS_DHE_DSS_WITH_RC4_128_SHA => 0x0066, + TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 => 0x0067, + TLS_DH_DSS_WITH_AES_256_CBC_SHA256 => 0x0068, + TLS_DH_RSA_WITH_AES_256_CBC_SHA256 => 0x0069, + TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 => 0x006a, + TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 => 0x006b, + TLS_DH_anon_WITH_AES_128_CBC_SHA256 => 0x006c, + TLS_DH_anon_WITH_AES_256_CBC_SHA256 => 0x006d, + TLS_DHE_DSS_WITH_3DES_EDE_CBC_RMD => 0x0072, + TLS_DHE_DSS_WITH_AES_128_CBC_RMD => 0x0073, + TLS_DHE_DSS_WITH_AES_256_CBC_RMD => 0x0074, + TLS_DHE_RSA_WITH_3DES_EDE_CBC_RMD => 0x0077, + TLS_DHE_RSA_WITH_AES_128_CBC_RMD => 0x0078, + TLS_DHE_RSA_WITH_AES_256_CBC_RMD => 0x0079, + TLS_RSA_WITH_3DES_EDE_CBC_RMD => 0x007c, + TLS_RSA_WITH_AES_128_CBC_RMD => 0x007d, + TLS_RSA_WITH_AES_256_CBC_RMD => 0x007e, + TLS_GOSTR341094_WITH_28147_CNT_IMIT => 0x0080, + TLS_GOSTR341001_WITH_28147_CNT_IMIT => 0x0081, + TLS_GOSTR341094_WITH_NULL_GOSTR3411 => 0x0082, + TLS_GOSTR341001_WITH_NULL_GOSTR3411 => 0x0083, + TLS_RSA_WITH_CAMELLIA_256_CBC_SHA => 0x0084, + TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA => 0x0085, + TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA => 0x0086, + TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA => 0x0087, + TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA => 0x0088, + TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA => 0x0089, + TLS_PSK_WITH_RC4_128_SHA => 0x008a, + TLS_PSK_WITH_3DES_EDE_CBC_SHA => 0x008b, + TLS_PSK_WITH_AES_128_CBC_SHA => 0x008c, + TLS_PSK_WITH_AES_256_CBC_SHA => 0x008d, + TLS_DHE_PSK_WITH_RC4_128_SHA => 0x008e, + TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA => 0x008f, + TLS_DHE_PSK_WITH_AES_128_CBC_SHA => 0x0090, + TLS_DHE_PSK_WITH_AES_256_CBC_SHA => 0x0091, + TLS_RSA_PSK_WITH_RC4_128_SHA => 0x0092, + TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA => 0x0093, + TLS_RSA_PSK_WITH_AES_128_CBC_SHA => 0x0094, + TLS_RSA_PSK_WITH_AES_256_CBC_SHA => 0x0095, + TLS_RSA_WITH_SEED_CBC_SHA => 0x0096, + TLS_DH_DSS_WITH_SEED_CBC_SHA => 0x0097, + TLS_DH_RSA_WITH_SEED_CBC_SHA => 0x0098, + TLS_DHE_DSS_WITH_SEED_CBC_SHA => 0x0099, + TLS_DHE_RSA_WITH_SEED_CBC_SHA => 0x009a, + TLS_DH_anon_WITH_SEED_CBC_SHA => 0x009b, + TLS_RSA_WITH_AES_128_GCM_SHA256 => 0x009c, + TLS_RSA_WITH_AES_256_GCM_SHA384 => 0x009d, + TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 => 0x009e, + TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 => 0x009f, + TLS_DH_RSA_WITH_AES_128_GCM_SHA256 => 0x00a0, + TLS_DH_RSA_WITH_AES_256_GCM_SHA384 => 0x00a1, + TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 => 0x00a2, + TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 => 0x00a3, + TLS_DH_DSS_WITH_AES_128_GCM_SHA256 => 0x00a4, + TLS_DH_DSS_WITH_AES_256_GCM_SHA384 => 0x00a5, + TLS_DH_anon_WITH_AES_128_GCM_SHA256 => 0x00a6, + TLS_DH_anon_WITH_AES_256_GCM_SHA384 => 0x00a7, + TLS_PSK_WITH_AES_128_GCM_SHA256 => 0x00a8, + TLS_PSK_WITH_AES_256_GCM_SHA384 => 0x00a9, + TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 => 0x00aa, + TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 => 0x00ab, + TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 => 0x00ac, + TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 => 0x00ad, + TLS_PSK_WITH_AES_128_CBC_SHA256 => 0x00ae, + TLS_PSK_WITH_AES_256_CBC_SHA384 => 0x00af, + TLS_PSK_WITH_NULL_SHA256 => 0x00b0, + TLS_PSK_WITH_NULL_SHA384 => 0x00b1, + TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 => 0x00b2, + TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 => 0x00b3, + TLS_DHE_PSK_WITH_NULL_SHA256 => 0x00b4, + TLS_DHE_PSK_WITH_NULL_SHA384 => 0x00b5, + TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 => 0x00b6, + TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 => 0x00b7, + TLS_RSA_PSK_WITH_NULL_SHA256 => 0x00b8, + TLS_RSA_PSK_WITH_NULL_SHA384 => 0x00b9, + TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 => 0x00ba, + TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 => 0x00bb, + TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 => 0x00bc, + TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 => 0x00bd, + TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 => 0x00be, + TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 => 0x00bf, + TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c0, + TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c1, + TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c2, + TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c3, + TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c4, + TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 => 0x00c5, + TLS_EMPTY_RENEGOTIATION_INFO_SCSV => 0x00ff, + TLS13_AES_128_GCM_SHA256 => 0x1301, + TLS13_AES_256_GCM_SHA384 => 0x1302, + TLS13_CHACHA20_POLY1305_SHA256 => 0x1303, + TLS13_AES_128_CCM_SHA256 => 0x1304, + TLS13_AES_128_CCM_8_SHA256 => 0x1305, + TLS_ECDH_ECDSA_WITH_NULL_SHA => 0xc001, + TLS_ECDH_ECDSA_WITH_RC4_128_SHA => 0xc002, + TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA => 0xc003, + TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA => 0xc004, + TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA => 0xc005, + TLS_ECDHE_ECDSA_WITH_NULL_SHA => 0xc006, + TLS_ECDHE_ECDSA_WITH_RC4_128_SHA => 0xc007, + TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA => 0xc008, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA => 0xc009, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA => 0xc00a, + TLS_ECDH_RSA_WITH_NULL_SHA => 0xc00b, + TLS_ECDH_RSA_WITH_RC4_128_SHA => 0xc00c, + TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA => 0xc00d, + TLS_ECDH_RSA_WITH_AES_128_CBC_SHA => 0xc00e, + TLS_ECDH_RSA_WITH_AES_256_CBC_SHA => 0xc00f, + TLS_ECDHE_RSA_WITH_NULL_SHA => 0xc010, + TLS_ECDHE_RSA_WITH_RC4_128_SHA => 0xc011, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA => 0xc012, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA => 0xc013, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA => 0xc014, + TLS_ECDH_anon_WITH_NULL_SHA => 0xc015, + TLS_ECDH_anon_WITH_RC4_128_SHA => 0xc016, + TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA => 0xc017, + TLS_ECDH_anon_WITH_AES_128_CBC_SHA => 0xc018, + TLS_ECDH_anon_WITH_AES_256_CBC_SHA => 0xc019, + TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA => 0xc01a, + TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA => 0xc01b, + TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA => 0xc01c, + TLS_SRP_SHA_WITH_AES_128_CBC_SHA => 0xc01d, + TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA => 0xc01e, + TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA => 0xc01f, + TLS_SRP_SHA_WITH_AES_256_CBC_SHA => 0xc020, + TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA => 0xc021, + TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA => 0xc022, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 => 0xc023, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 => 0xc024, + TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 => 0xc025, + TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 => 0xc026, + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 => 0xc027, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 => 0xc028, + TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 => 0xc029, + TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 => 0xc02a, + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 => 0xc02b, + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 => 0xc02c, + TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 => 0xc02d, + TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 => 0xc02e, + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 => 0xc02f, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 => 0xc030, + TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 => 0xc031, + TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 => 0xc032, + TLS_ECDHE_PSK_WITH_RC4_128_SHA => 0xc033, + TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA => 0xc034, + TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA => 0xc035, + TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA => 0xc036, + TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 => 0xc037, + TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 => 0xc038, + TLS_ECDHE_PSK_WITH_NULL_SHA => 0xc039, + TLS_ECDHE_PSK_WITH_NULL_SHA256 => 0xc03a, + TLS_ECDHE_PSK_WITH_NULL_SHA384 => 0xc03b, + TLS_RSA_WITH_ARIA_128_CBC_SHA256 => 0xc03c, + TLS_RSA_WITH_ARIA_256_CBC_SHA384 => 0xc03d, + TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 => 0xc03e, + TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 => 0xc03f, + TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 => 0xc040, + TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 => 0xc041, + TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 => 0xc042, + TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 => 0xc043, + TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 => 0xc044, + TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 => 0xc045, + TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 => 0xc046, + TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 => 0xc047, + TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 => 0xc048, + TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 => 0xc049, + TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 => 0xc04a, + TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 => 0xc04b, + TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 => 0xc04c, + TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 => 0xc04d, + TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 => 0xc04e, + TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 => 0xc04f, + TLS_RSA_WITH_ARIA_128_GCM_SHA256 => 0xc050, + TLS_RSA_WITH_ARIA_256_GCM_SHA384 => 0xc051, + TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 => 0xc052, + TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 => 0xc053, + TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 => 0xc054, + TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 => 0xc055, + TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 => 0xc056, + TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 => 0xc057, + TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 => 0xc058, + TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 => 0xc059, + TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 => 0xc05a, + TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 => 0xc05b, + TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 => 0xc05c, + TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 => 0xc05d, + TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 => 0xc05e, + TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 => 0xc05f, + TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 => 0xc060, + TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 => 0xc061, + TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 => 0xc062, + TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 => 0xc063, + TLS_PSK_WITH_ARIA_128_CBC_SHA256 => 0xc064, + TLS_PSK_WITH_ARIA_256_CBC_SHA384 => 0xc065, + TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 => 0xc066, + TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 => 0xc067, + TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 => 0xc068, + TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 => 0xc069, + TLS_PSK_WITH_ARIA_128_GCM_SHA256 => 0xc06a, + TLS_PSK_WITH_ARIA_256_GCM_SHA384 => 0xc06b, + TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 => 0xc06c, + TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 => 0xc06d, + TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 => 0xc06e, + TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 => 0xc06f, + TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 => 0xc070, + TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 => 0xc071, + TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 => 0xc072, + TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 => 0xc073, + TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 => 0xc074, + TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 => 0xc075, + TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 => 0xc076, + TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 => 0xc077, + TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 => 0xc078, + TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 => 0xc079, + TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc07a, + TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc07b, + TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc07c, + TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc07d, + TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc07e, + TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc07f, + TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 => 0xc080, + TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 => 0xc081, + TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 => 0xc082, + TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 => 0xc083, + TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 => 0xc084, + TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 => 0xc085, + TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc086, + TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc087, + TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc088, + TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc089, + TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc08a, + TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc08b, + TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 => 0xc08c, + TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 => 0xc08d, + TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 => 0xc08e, + TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 => 0xc08f, + TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 => 0xc090, + TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 => 0xc091, + TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 => 0xc092, + TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 => 0xc093, + TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 => 0xc094, + TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 => 0xc095, + TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 => 0xc096, + TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 => 0xc097, + TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 => 0xc098, + TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 => 0xc099, + TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 => 0xc09a, + TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 => 0xc09b, + TLS_RSA_WITH_AES_128_CCM => 0xc09c, + TLS_RSA_WITH_AES_256_CCM => 0xc09d, + TLS_DHE_RSA_WITH_AES_128_CCM => 0xc09e, + TLS_DHE_RSA_WITH_AES_256_CCM => 0xc09f, + TLS_RSA_WITH_AES_128_CCM_8 => 0xc0a0, + TLS_RSA_WITH_AES_256_CCM_8 => 0xc0a1, + TLS_DHE_RSA_WITH_AES_128_CCM_8 => 0xc0a2, + TLS_DHE_RSA_WITH_AES_256_CCM_8 => 0xc0a3, + TLS_PSK_WITH_AES_128_CCM => 0xc0a4, + TLS_PSK_WITH_AES_256_CCM => 0xc0a5, + TLS_DHE_PSK_WITH_AES_128_CCM => 0xc0a6, + TLS_DHE_PSK_WITH_AES_256_CCM => 0xc0a7, + TLS_PSK_WITH_AES_128_CCM_8 => 0xc0a8, + TLS_PSK_WITH_AES_256_CCM_8 => 0xc0a9, + TLS_PSK_DHE_WITH_AES_128_CCM_8 => 0xc0aa, + TLS_PSK_DHE_WITH_AES_256_CCM_8 => 0xc0ab, + TLS_ECDHE_ECDSA_WITH_AES_128_CCM => 0xc0ac, + TLS_ECDHE_ECDSA_WITH_AES_256_CCM => 0xc0ad, + TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 => 0xc0ae, + TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 => 0xc0af, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 => 0xcca8, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 => 0xcca9, + TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 => 0xccaa, + TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 => 0xccab, + TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 => 0xccac, + TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 => 0xccad, + TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 => 0xccae, + SSL_RSA_FIPS_WITH_DES_CBC_SHA => 0xfefe, + SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA => 0xfeff + } +} + +enum_builder! { + /// The `SignatureScheme` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: SignatureScheme; + EnumVal{ + RSA_PKCS1_SHA1 => 0x0201, + ECDSA_SHA1_Legacy => 0x0203, + RSA_PKCS1_SHA256 => 0x0401, + ECDSA_NISTP256_SHA256 => 0x0403, + RSA_PKCS1_SHA384 => 0x0501, + ECDSA_NISTP384_SHA384 => 0x0503, + RSA_PKCS1_SHA512 => 0x0601, + ECDSA_NISTP521_SHA512 => 0x0603, + RSA_PSS_SHA256 => 0x0804, + RSA_PSS_SHA384 => 0x0805, + RSA_PSS_SHA512 => 0x0806, + ED25519 => 0x0807, + ED448 => 0x0808 + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..32d49032e84389bb2e2bad70864bf5526824209d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/error.rs @@ -0,0 +1,245 @@ +use crate::msgs::enums::{AlertDescription, ContentType, HandshakeType}; +use crate::rand; + +use std::error::Error as StdError; +use std::fmt; +use std::time::SystemTimeError; + +/// rustls reports protocol errors using this type. +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + /// We received a TLS message that isn't valid right now. + /// `expect_types` lists the message types we can expect right now. + /// `got_type` is the type we found. This error is typically + /// caused by a buggy TLS stack (the peer or this one), a broken + /// network, or an attack. + InappropriateMessage { + /// Which types we expected + expect_types: Vec, + /// What type we received + got_type: ContentType, + }, + + /// We received a TLS handshake message that isn't valid right now. + /// `expect_types` lists the handshake message types we can expect + /// right now. `got_type` is the type we found. + InappropriateHandshakeMessage { + /// Which handshake type we expected + expect_types: Vec, + /// What handshake type we received + got_type: HandshakeType, + }, + + /// The peer sent us a syntactically incorrect TLS message. + CorruptMessage, + + /// The peer sent us a TLS message with invalid contents. + CorruptMessagePayload(ContentType), + + /// The peer didn't give us any certificates. + NoCertificatesPresented, + + /// The certificate verifier doesn't support the given type of name. + UnsupportedNameType, + + /// We couldn't decrypt a message. This is invariably fatal. + DecryptError, + + /// We couldn't encrypt a message because it was larger than the allowed message size. + /// This should never happen if the application is using valid record sizes. + EncryptError, + + /// The peer doesn't support a protocol version/feature we require. + /// The parameter gives a hint as to what version/feature it is. + PeerIncompatibleError(String), + + /// The peer deviated from the standard TLS protocol. + /// The parameter gives a hint where. + PeerMisbehavedError(String), + + /// We received a fatal alert. This means the peer is unhappy. + AlertReceived(AlertDescription), + + /// We received an invalidly encoded certificate from the peer. + InvalidCertificateEncoding, + + /// We received a certificate with invalid signature type. + InvalidCertificateSignatureType, + + /// We received a certificate with invalid signature. + InvalidCertificateSignature, + + /// We received a certificate which includes invalid data. + InvalidCertificateData(String), + + /// The presented SCT(s) were invalid. + InvalidSct(sct::Error), + + /// A catch-all error for unlikely errors. + General(String), + + /// We failed to figure out what time it currently is. + FailedToGetCurrentTime, + + /// We failed to acquire random bytes from the system. + FailedToGetRandomBytes, + + /// This function doesn't work until the TLS handshake + /// is complete. + HandshakeNotComplete, + + /// The peer sent an oversized record/fragment. + PeerSentOversizedRecord, + + /// An incoming connection did not support any known application protocol. + NoApplicationProtocol, + + /// The `max_fragment_size` value supplied in configuration was too small, + /// or too large. + BadMaxFragmentSize, +} + +fn join(items: &[T]) -> String { + items + .iter() + .map(|x| format!("{:?}", x)) + .collect::>() + .join(" or ") +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::InappropriateMessage { + ref expect_types, + ref got_type, + } => write!( + f, + "received unexpected message: got {:?} when expecting {}", + got_type, + join::(expect_types) + ), + Self::InappropriateHandshakeMessage { + ref expect_types, + ref got_type, + } => write!( + f, + "received unexpected handshake message: got {:?} when expecting {}", + got_type, + join::(expect_types) + ), + Self::CorruptMessagePayload(ref typ) => { + write!(f, "received corrupt message of type {:?}", typ) + } + Self::PeerIncompatibleError(ref why) => write!(f, "peer is incompatible: {}", why), + Self::PeerMisbehavedError(ref why) => write!(f, "peer misbehaved: {}", why), + Self::AlertReceived(ref alert) => write!(f, "received fatal alert: {:?}", alert), + Self::InvalidCertificateEncoding => { + write!(f, "invalid peer certificate encoding") + } + Self::InvalidCertificateSignatureType => { + write!(f, "invalid peer certificate signature type") + } + Self::InvalidCertificateSignature => { + write!(f, "invalid peer certificate signature") + } + Self::InvalidCertificateData(ref reason) => { + write!(f, "invalid peer certificate contents: {}", reason) + } + Self::CorruptMessage => write!(f, "received corrupt message"), + Self::NoCertificatesPresented => write!(f, "peer sent no certificates"), + Self::UnsupportedNameType => write!(f, "presented server name type wasn't supported"), + Self::DecryptError => write!(f, "cannot decrypt peer's message"), + Self::EncryptError => write!(f, "cannot encrypt message"), + Self::PeerSentOversizedRecord => write!(f, "peer sent excess record size"), + Self::HandshakeNotComplete => write!(f, "handshake not complete"), + Self::NoApplicationProtocol => write!(f, "peer doesn't support any known protocol"), + Self::InvalidSct(ref err) => write!(f, "invalid certificate timestamp: {:?}", err), + Self::FailedToGetCurrentTime => write!(f, "failed to get current time"), + Self::FailedToGetRandomBytes => write!(f, "failed to get random bytes"), + Self::BadMaxFragmentSize => { + write!(f, "the supplied max_fragment_size was too small or large") + } + Self::General(ref err) => write!(f, "unexpected error: {}", err), + } + } +} + +impl From for Error { + #[inline] + fn from(_: SystemTimeError) -> Self { + Self::FailedToGetCurrentTime + } +} + +impl StdError for Error {} + +impl From for Error { + fn from(_: rand::GetRandomFailed) -> Self { + Self::FailedToGetRandomBytes + } +} + +#[cfg(test)] +mod tests { + use super::Error; + + #[test] + fn smoke() { + use crate::msgs::enums::{AlertDescription, ContentType, HandshakeType}; + use sct; + + let all = vec![ + Error::InappropriateMessage { + expect_types: vec![ContentType::Alert], + got_type: ContentType::Handshake, + }, + Error::InappropriateHandshakeMessage { + expect_types: vec![HandshakeType::ClientHello, HandshakeType::Finished], + got_type: HandshakeType::ServerHello, + }, + Error::CorruptMessage, + Error::CorruptMessagePayload(ContentType::Alert), + Error::NoCertificatesPresented, + Error::DecryptError, + Error::PeerIncompatibleError("no tls1.2".to_string()), + Error::PeerMisbehavedError("inconsistent something".to_string()), + Error::AlertReceived(AlertDescription::ExportRestriction), + Error::InvalidCertificateEncoding, + Error::InvalidCertificateSignatureType, + Error::InvalidCertificateSignature, + Error::InvalidCertificateData("Data".into()), + Error::InvalidSct(sct::Error::MalformedSct), + Error::General("undocumented error".to_string()), + Error::FailedToGetCurrentTime, + Error::FailedToGetRandomBytes, + Error::HandshakeNotComplete, + Error::PeerSentOversizedRecord, + Error::NoApplicationProtocol, + Error::BadMaxFragmentSize, + ]; + + for err in all { + println!("{:?}:", err); + println!(" fmt '{}'", err); + } + } + + #[test] + fn rand_error_mapping() { + use super::rand; + let err: Error = rand::GetRandomFailed.into(); + assert_eq!(err, Error::FailedToGetRandomBytes); + } + + #[test] + fn time_error_mapping() { + use std::time::SystemTime; + + let time_error = SystemTime::UNIX_EPOCH + .duration_since(SystemTime::now()) + .unwrap_err(); + let err: Error = time_error.into(); + assert_eq!(err, Error::FailedToGetCurrentTime); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/hash_hs.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/hash_hs.rs new file mode 100644 index 0000000000000000000000000000000000000000..3dd66b118c0001c440e50b9d6d5b773f73341808 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/hash_hs.rs @@ -0,0 +1,240 @@ +use crate::msgs::codec::Codec; +use crate::msgs::handshake::HandshakeMessagePayload; +use crate::msgs::message::{Message, MessagePayload}; +use ring::digest; +use std::mem; + +/// Early stage buffering of handshake payloads. +/// +/// Before we know the hash algorithm to use to verify the handshake, we just buffer the messages. +/// During the handshake, we may restart the transcript due to a HelloRetryRequest, reverting +/// from the `HandshakeHash` to a `HandshakeHashBuffer` again. +pub(crate) struct HandshakeHashBuffer { + buffer: Vec, + client_auth_enabled: bool, +} + +impl HandshakeHashBuffer { + pub(crate) fn new() -> Self { + Self { + buffer: Vec::new(), + client_auth_enabled: false, + } + } + + /// We might be doing client auth, so need to keep a full + /// log of the handshake. + pub(crate) fn set_client_auth_enabled(&mut self) { + self.client_auth_enabled = true; + } + + /// Hash/buffer a handshake message. + pub(crate) fn add_message(&mut self, m: &Message) { + if let MessagePayload::Handshake { encoded, .. } = &m.payload { + self.buffer + .extend_from_slice(&encoded.0); + } + } + + /// Hash or buffer a byte slice. + #[cfg(test)] + fn update_raw(&mut self, buf: &[u8]) { + self.buffer.extend_from_slice(buf); + } + + /// Get the hash value if we were to hash `extra` too. + pub(crate) fn get_hash_given( + &self, + hash: &'static digest::Algorithm, + extra: &[u8], + ) -> digest::Digest { + let mut ctx = digest::Context::new(hash); + ctx.update(&self.buffer); + ctx.update(extra); + ctx.finish() + } + + /// We now know what hash function the verify_data will use. + pub(crate) fn start_hash(self, alg: &'static digest::Algorithm) -> HandshakeHash { + let mut ctx = digest::Context::new(alg); + ctx.update(&self.buffer); + HandshakeHash { + ctx, + client_auth: match self.client_auth_enabled { + true => Some(self.buffer), + false => None, + }, + } + } +} + +/// This deals with keeping a running hash of the handshake +/// payloads. This is computed by buffering initially. Once +/// we know what hash function we need to use we switch to +/// incremental hashing. +/// +/// For client auth, we also need to buffer all the messages. +/// This is disabled in cases where client auth is not possible. +pub(crate) struct HandshakeHash { + /// None before we know what hash function we're using + ctx: digest::Context, + + /// buffer for client-auth. + client_auth: Option>, +} + +impl HandshakeHash { + /// We decided not to do client auth after all, so discard + /// the transcript. + pub(crate) fn abandon_client_auth(&mut self) { + self.client_auth = None; + } + + /// Hash/buffer a handshake message. + pub(crate) fn add_message(&mut self, m: &Message) -> &mut Self { + if let MessagePayload::Handshake { encoded, .. } = &m.payload { + self.update_raw(&encoded.0); + } + self + } + + /// Hash or buffer a byte slice. + fn update_raw(&mut self, buf: &[u8]) -> &mut Self { + self.ctx.update(buf); + + if let Some(buffer) = &mut self.client_auth { + buffer.extend_from_slice(buf); + } + + self + } + + /// Get the hash value if we were to hash `extra` too, + /// using hash function `hash`. + pub(crate) fn get_hash_given(&self, extra: &[u8]) -> digest::Digest { + let mut ctx = self.ctx.clone(); + ctx.update(extra); + ctx.finish() + } + + pub(crate) fn into_hrr_buffer(self) -> HandshakeHashBuffer { + let old_hash = self.ctx.finish(); + let old_handshake_hash_msg = + HandshakeMessagePayload::build_handshake_hash(old_hash.as_ref()); + + HandshakeHashBuffer { + client_auth_enabled: self.client_auth.is_some(), + buffer: old_handshake_hash_msg.get_encoding(), + } + } + + /// Take the current hash value, and encapsulate it in a + /// 'handshake_hash' handshake message. Start this hash + /// again, with that message at the front. + pub(crate) fn rollup_for_hrr(&mut self) { + let ctx = &mut self.ctx; + + let old_ctx = mem::replace(ctx, digest::Context::new(ctx.algorithm())); + let old_hash = old_ctx.finish(); + let old_handshake_hash_msg = + HandshakeMessagePayload::build_handshake_hash(old_hash.as_ref()); + + self.update_raw(&old_handshake_hash_msg.get_encoding()); + } + + /// Get the current hash value. + pub(crate) fn get_current_hash(&self) -> digest::Digest { + self.ctx.clone().finish() + } + + /// Takes this object's buffer containing all handshake messages + /// so far. This method only works once; it resets the buffer + /// to empty. + #[cfg(feature = "tls12")] + pub(crate) fn take_handshake_buf(&mut self) -> Option> { + self.client_auth.take() + } + + /// The digest algorithm + pub(crate) fn algorithm(&self) -> &'static digest::Algorithm { + self.ctx.algorithm() + } +} + +#[cfg(test)] +mod test { + use super::HandshakeHashBuffer; + use ring::digest; + + #[test] + fn hashes_correctly() { + let mut hhb = HandshakeHashBuffer::new(); + hhb.update_raw(b"hello"); + assert_eq!(hhb.buffer.len(), 5); + let mut hh = hhb.start_hash(&digest::SHA256); + assert!(hh.client_auth.is_none()); + hh.update_raw(b"world"); + let h = hh.get_current_hash(); + let h = h.as_ref(); + assert_eq!(h[0], 0x93); + assert_eq!(h[1], 0x6a); + assert_eq!(h[2], 0x18); + assert_eq!(h[3], 0x5c); + } + + #[cfg(feature = "tls12")] + #[test] + fn buffers_correctly() { + let mut hhb = HandshakeHashBuffer::new(); + hhb.set_client_auth_enabled(); + hhb.update_raw(b"hello"); + assert_eq!(hhb.buffer.len(), 5); + let mut hh = hhb.start_hash(&digest::SHA256); + assert_eq!( + hh.client_auth + .as_ref() + .map(|buf| buf.len()), + Some(5) + ); + hh.update_raw(b"world"); + assert_eq!( + hh.client_auth + .as_ref() + .map(|buf| buf.len()), + Some(10) + ); + let h = hh.get_current_hash(); + let h = h.as_ref(); + assert_eq!(h[0], 0x93); + assert_eq!(h[1], 0x6a); + assert_eq!(h[2], 0x18); + assert_eq!(h[3], 0x5c); + let buf = hh.take_handshake_buf(); + assert_eq!(Some(b"helloworld".to_vec()), buf); + } + + #[test] + fn abandon() { + let mut hhb = HandshakeHashBuffer::new(); + hhb.set_client_auth_enabled(); + hhb.update_raw(b"hello"); + assert_eq!(hhb.buffer.len(), 5); + let mut hh = hhb.start_hash(&digest::SHA256); + assert_eq!( + hh.client_auth + .as_ref() + .map(|buf| buf.len()), + Some(5) + ); + hh.abandon_client_auth(); + assert_eq!(hh.client_auth, None); + hh.update_raw(b"world"); + assert_eq!(hh.client_auth, None); + let h = hh.get_current_hash(); + let h = h.as_ref(); + assert_eq!(h[0], 0x93); + assert_eq!(h[1], 0x6a); + assert_eq!(h[2], 0x18); + assert_eq!(h[3], 0x5c); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key.rs new file mode 100644 index 0000000000000000000000000000000000000000..dbd4cc5470e8594077c33cd564d5cd7749fc3e4a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key.rs @@ -0,0 +1,52 @@ +use std::fmt; + +/// This type contains a private key by value. +/// +/// The private key must be DER-encoded ASN.1 in either +/// PKCS#8 or PKCS#1 format. +/// +/// The `rustls-pemfile` crate can be used to extract +/// private keys from a PEM file in these formats. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct PrivateKey(pub Vec); + +/// This type contains a single certificate by value. +/// +/// The certificate must be DER-encoded X.509. +/// +/// The `rustls-pemfile` crate can be used to parse a PEM file. +/// +/// ## Note +/// +/// If you are receiving certificates from an untrusted client or server, the contents +/// must be validated manually. +#[derive(Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Certificate(pub Vec); + +impl AsRef<[u8]> for Certificate { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl fmt::Debug for Certificate { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use super::bs_debug::BsDebug; + f.debug_tuple("Certificate") + .field(&BsDebug(&self.0)) + .finish() + } +} + +#[cfg(test)] +mod test { + use super::Certificate; + + #[test] + fn certificate_debug() { + assert_eq!( + "Certificate(b\"ab\")", + format!("{:?}", Certificate(b"ab".to_vec())) + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log.rs new file mode 100644 index 0000000000000000000000000000000000000000..1b6b3fec709dd0a216cfc813668916b353c1b281 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log.rs @@ -0,0 +1,55 @@ +/// This trait represents the ability to do something useful +/// with key material, such as logging it to a file for debugging. +/// +/// Naturally, secrets passed over the interface are *extremely* +/// sensitive and can break the security of past, present and +/// future sessions. +/// +/// You'll likely want some interior mutability in your +/// implementation to make this useful. +/// +/// See [`KeyLogFile`](crate::KeyLogFile) that implements the standard +/// `SSLKEYLOGFILE` environment variable behaviour. +pub trait KeyLog: Send + Sync { + /// Log the given `secret`. `client_random` is provided for + /// session identification. `label` describes precisely what + /// `secret` means: + /// + /// - `CLIENT_RANDOM`: `secret` is the master secret for a TLSv1.2 session. + /// - `CLIENT_EARLY_TRAFFIC_SECRET`: `secret` encrypts early data + /// transmitted by a client + /// - `SERVER_HANDSHAKE_TRAFFIC_SECRET`: `secret` encrypts + /// handshake messages from the server during a TLSv1.3 handshake. + /// - `CLIENT_HANDSHAKE_TRAFFIC_SECRET`: `secret` encrypts + /// handshake messages from the client during a TLSv1.3 handshake. + /// - `SERVER_TRAFFIC_SECRET_0`: `secret` encrypts post-handshake data + /// from the server in a TLSv1.3 session. + /// - `CLIENT_TRAFFIC_SECRET_0`: `secret` encrypts post-handshake data + /// from the client in a TLSv1.3 session. + /// - `EXPORTER_SECRET`: `secret` is the post-handshake exporter secret + /// in a TLSv1.3 session. + /// + /// These strings are selected to match the NSS key log format: + /// + fn log(&self, label: &str, client_random: &[u8], secret: &[u8]); + + /// Indicates whether the secret with label `label` will be logged. + /// + /// If `will_log` returns true then `log` will be called with the secret. + /// Otherwise, `log` will not be called for the secret. This is a + /// performance optimization. + fn will_log(&self, _label: &str) -> bool { + true + } +} + +/// KeyLog that does exactly nothing. +pub struct NoKeyLog; + +impl KeyLog for NoKeyLog { + fn log(&self, _: &str, _: &[u8], _: &[u8]) {} + #[inline] + fn will_log(&self, _label: &str) -> bool { + false + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log_file.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log_file.rs new file mode 100644 index 0000000000000000000000000000000000000000..79071131bcf6933f873c54c977ce0c345578906b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/key_log_file.rs @@ -0,0 +1,154 @@ +#[cfg(feature = "logging")] +use crate::log::warn; +use crate::KeyLog; +use std::env; +use std::fs::{File, OpenOptions}; +use std::io; +use std::io::Write; +use std::path::Path; +use std::sync::Mutex; + +// Internal mutable state for KeyLogFile +struct KeyLogFileInner { + file: Option, + buf: Vec, +} + +impl KeyLogFileInner { + fn new(var: Result) -> Self { + let path = match var { + Ok(ref s) => Path::new(s), + Err(env::VarError::NotUnicode(ref s)) => Path::new(s), + Err(env::VarError::NotPresent) => { + return Self { + file: None, + buf: Vec::new(), + }; + } + }; + + #[cfg_attr(not(feature = "logging"), allow(unused_variables))] + let file = match OpenOptions::new() + .append(true) + .create(true) + .open(path) + { + Ok(f) => Some(f), + Err(e) => { + warn!("unable to create key log file {:?}: {}", path, e); + None + } + }; + + Self { + file, + buf: Vec::new(), + } + } + + fn try_write(&mut self, label: &str, client_random: &[u8], secret: &[u8]) -> io::Result<()> { + let mut file = match self.file { + None => { + return Ok(()); + } + Some(ref f) => f, + }; + + self.buf.truncate(0); + write!(self.buf, "{} ", label)?; + for b in client_random.iter() { + write!(self.buf, "{:02x}", b)?; + } + write!(self.buf, " ")?; + for b in secret.iter() { + write!(self.buf, "{:02x}", b)?; + } + writeln!(self.buf)?; + file.write_all(&self.buf) + } +} + +/// [`KeyLog`] implementation that opens a file whose name is +/// given by the `SSLKEYLOGFILE` environment variable, and writes +/// keys into it. +/// +/// If `SSLKEYLOGFILE` is not set, this does nothing. +/// +/// If such a file cannot be opened, or cannot be written then +/// this does nothing but logs errors at warning-level. +pub struct KeyLogFile(Mutex); + +impl KeyLogFile { + /// Makes a new `KeyLogFile`. The environment variable is + /// inspected and the named file is opened during this call. + pub fn new() -> Self { + let var = env::var("SSLKEYLOGFILE"); + Self(Mutex::new(KeyLogFileInner::new(var))) + } +} + +impl KeyLog for KeyLogFile { + fn log(&self, label: &str, client_random: &[u8], secret: &[u8]) { + #[cfg_attr(not(feature = "logging"), allow(unused_variables))] + match self + .0 + .lock() + .unwrap() + .try_write(label, client_random, secret) + { + Ok(()) => {} + Err(e) => { + warn!("error writing to key log file: {}", e); + } + } + } +} + +#[cfg(all(test, target_os = "linux"))] +mod test { + use super::*; + + fn init() { + let _ = env_logger::builder() + .is_test(true) + .try_init(); + } + + #[test] + fn test_env_var_is_not_unicode() { + init(); + let mut inner = KeyLogFileInner::new(Err(env::VarError::NotUnicode( + "/tmp/keylogfileinnertest".into(), + ))); + assert!(inner + .try_write("label", b"random", b"secret") + .is_ok()); + } + + #[test] + fn test_env_var_is_not_set() { + init(); + let mut inner = KeyLogFileInner::new(Err(env::VarError::NotPresent)); + assert!(inner + .try_write("label", b"random", b"secret") + .is_ok()); + } + + #[test] + fn test_env_var_cannot_be_opened() { + init(); + let mut inner = KeyLogFileInner::new(Ok("/dev/does-not-exist".into())); + assert!(inner + .try_write("label", b"random", b"secret") + .is_ok()); + } + + #[test] + fn test_env_var_cannot_be_written() { + init(); + let mut inner = KeyLogFileInner::new(Ok("/dev/full".into())); + assert!(inner + .try_write("label", b"random", b"secret") + .is_err()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/kx.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/kx.rs new file mode 100644 index 0000000000000000000000000000000000000000..4736570bd4478b1169b3ce56662b2c92e97f1f33 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/kx.rs @@ -0,0 +1,100 @@ +use std::fmt; + +use crate::error::Error; +use crate::msgs::enums::NamedGroup; + +/// An in-progress key exchange. This has the algorithm, +/// our private key, and our public key. +pub(crate) struct KeyExchange { + skxg: &'static SupportedKxGroup, + privkey: ring::agreement::EphemeralPrivateKey, + pub(crate) pubkey: ring::agreement::PublicKey, +} + +impl KeyExchange { + /// Choose a SupportedKxGroup by name, from a list of supported groups. + pub(crate) fn choose( + name: NamedGroup, + supported: &[&'static SupportedKxGroup], + ) -> Option<&'static SupportedKxGroup> { + supported + .iter() + .find(|skxg| skxg.name == name) + .cloned() + } + + /// Start a key exchange, using the given SupportedKxGroup. + /// + /// This generates an ephemeral key pair and stores it in the returned KeyExchange object. + pub(crate) fn start(skxg: &'static SupportedKxGroup) -> Option { + let rng = ring::rand::SystemRandom::new(); + let ours = + ring::agreement::EphemeralPrivateKey::generate(skxg.agreement_algorithm, &rng).ok()?; + + let pubkey = ours.compute_public_key().ok()?; + + Some(Self { + skxg, + privkey: ours, + pubkey, + }) + } + + /// Return the group being used. + pub(crate) fn group(&self) -> NamedGroup { + self.skxg.name + } + + /// Completes the key exchange, given the peer's public key. + /// + /// The shared secret is passed into the closure passed down in `f`, and the result of calling + /// `f` is returned to the caller. + pub(crate) fn complete( + self, + peer: &[u8], + f: impl FnOnce(&[u8]) -> Result, + ) -> Result { + let peer_key = ring::agreement::UnparsedPublicKey::new(self.skxg.agreement_algorithm, peer); + ring::agreement::agree_ephemeral(self.privkey, &peer_key, (), f) + .map_err(|()| Error::PeerMisbehavedError("key agreement failed".to_string())) + } +} + +/// A key-exchange group supported by rustls. +/// +/// All possible instances of this class are provided by the library in +/// the `ALL_KX_GROUPS` array. +pub struct SupportedKxGroup { + /// The IANA "TLS Supported Groups" name of the group + pub name: NamedGroup, + + /// The corresponding ring agreement::Algorithm + agreement_algorithm: &'static ring::agreement::Algorithm, +} + +impl fmt::Debug for SupportedKxGroup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.name.fmt(f) + } +} + +/// Ephemeral ECDH on curve25519 (see RFC7748) +pub static X25519: SupportedKxGroup = SupportedKxGroup { + name: NamedGroup::X25519, + agreement_algorithm: &ring::agreement::X25519, +}; + +/// Ephemeral ECDH on secp256r1 (aka NIST-P256) +pub static SECP256R1: SupportedKxGroup = SupportedKxGroup { + name: NamedGroup::secp256r1, + agreement_algorithm: &ring::agreement::ECDH_P256, +}; + +/// Ephemeral ECDH on secp384r1 (aka NIST-P384) +pub static SECP384R1: SupportedKxGroup = SupportedKxGroup { + name: NamedGroup::secp384r1, + agreement_algorithm: &ring::agreement::ECDH_P384, +}; + +/// A list of all the key exchange groups supported by rustls. +pub static ALL_KX_GROUPS: [&SupportedKxGroup; 3] = [&X25519, &SECP256R1, &SECP384R1]; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ad18489ea94861e939ab0136b6a4e3c8ef2a4f0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/lib.rs @@ -0,0 +1,529 @@ +//! # Rustls - a modern TLS library +//! Rustls is a TLS library that aims to provide a good level of cryptographic security, +//! requires no configuration to achieve that security, and provides no unsafe features or +//! obsolete cryptography. +//! +//! ## Current features +//! +//! * TLS1.2 and TLS1.3. +//! * ECDSA, Ed25519 or RSA server authentication by clients. +//! * ECDSA, Ed25519 or RSA server authentication by servers. +//! * Forward secrecy using ECDHE; with curve25519, nistp256 or nistp384 curves. +//! * AES128-GCM and AES256-GCM bulk encryption, with safe nonces. +//! * ChaCha20-Poly1305 bulk encryption ([RFC7905](https://tools.ietf.org/html/rfc7905)). +//! * ALPN support. +//! * SNI support. +//! * Tunable fragment size to make TLS messages match size of underlying transport. +//! * Optional use of vectored IO to minimise system calls. +//! * TLS1.2 session resumption. +//! * TLS1.2 resumption via tickets ([RFC5077](https://tools.ietf.org/html/rfc5077)). +//! * TLS1.3 resumption via tickets or session storage. +//! * TLS1.3 0-RTT data for clients. +//! * TLS1.3 0-RTT data for servers. +//! * Client authentication by clients. +//! * Client authentication by servers. +//! * Extended master secret support ([RFC7627](https://tools.ietf.org/html/rfc7627)). +//! * Exporters ([RFC5705](https://tools.ietf.org/html/rfc5705)). +//! * OCSP stapling by servers. +//! * SCT stapling by servers. +//! * SCT verification by clients. +//! +//! ## Possible future features +//! +//! * PSK support. +//! * OCSP verification by clients. +//! * Certificate pinning. +//! +//! ## Non-features +//! +//! For reasons [explained in the manual](manual), +//! rustls does not and will not support: +//! +//! * SSL1, SSL2, SSL3, TLS1 or TLS1.1. +//! * RC4. +//! * DES or triple DES. +//! * EXPORT ciphersuites. +//! * MAC-then-encrypt ciphersuites. +//! * Ciphersuites without forward secrecy. +//! * Renegotiation. +//! * Kerberos. +//! * Compression. +//! * Discrete-log Diffie-Hellman. +//! * Automatic protocol version downgrade. +//! +//! There are plenty of other libraries that provide these features should you +//! need them. +//! +//! ### Platform support +//! +//! Rustls uses [`ring`](https://crates.io/crates/ring) for implementing the +//! cryptography in TLS. As a result, rustls only runs on platforms +//! [supported by `ring`](https://github.com/briansmith/ring#online-automated-testing). +//! At the time of writing this means x86, x86-64, armv7, and aarch64. +//! +//! ## Design Overview +//! ### Rustls does not take care of network IO +//! It doesn't make or accept TCP connections, or do DNS, or read or write files. +//! +//! There's example client and server code which uses mio to do all needed network +//! IO. +//! +//! ### Rustls provides encrypted pipes +//! These are the [`ServerConnection`] and [`ClientConnection`] types. You supply raw TLS traffic +//! on the left (via the [`read_tls()`] and [`write_tls()`] methods) and then read/write the +//! plaintext on the right: +//! +//! [`read_tls()`]: Connection::read_tls +//! [`write_tls()`]: Connection::read_tls +//! +//! ```text +//! TLS Plaintext +//! === ========= +//! read_tls() +-----------------------+ reader() as io::Read +//! | | +//! +---------> ClientConnection +---------> +//! | or | +//! <---------+ ServerConnection <---------+ +//! | | +//! write_tls() +-----------------------+ writer() as io::Write +//! ``` +//! +//! ### Rustls takes care of server certificate verification +//! You do not need to provide anything other than a set of root certificates to trust. +//! Certificate verification cannot be turned off or disabled in the main API. +//! +//! ## Getting started +//! This is the minimum you need to do to make a TLS client connection. +//! +//! First we load some root certificates. These are used to authenticate the server. +//! The recommended way is to depend on the `webpki_roots` crate which contains +//! the Mozilla set of root certificates. +//! +//! ```rust,no_run +//! let mut root_store = rustls::RootCertStore::empty(); +//! root_store.add_server_trust_anchors( +//! webpki_roots::TLS_SERVER_ROOTS +//! .0 +//! .iter() +//! .map(|ta| { +//! rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( +//! ta.subject, +//! ta.spki, +//! ta.name_constraints, +//! ) +//! }) +//! ); +//! ``` +//! +//! Next, we make a `ClientConfig`. You're likely to make one of these per process, +//! and use it for all connections made by that process. +//! +//! ```rust,no_run +//! # let root_store: rustls::RootCertStore = panic!(); +//! let config = rustls::ClientConfig::builder() +//! .with_safe_defaults() +//! .with_root_certificates(root_store) +//! .with_no_client_auth(); +//! ``` +//! +//! Now we can make a connection. You need to provide the server's hostname so we +//! know what to expect to find in the server's certificate. +//! +//! ```rust +//! # use rustls; +//! # use webpki; +//! # use std::sync::Arc; +//! # use std::convert::TryInto; +//! # let mut root_store = rustls::RootCertStore::empty(); +//! # root_store.add_server_trust_anchors( +//! # webpki_roots::TLS_SERVER_ROOTS +//! # .0 +//! # .iter() +//! # .map(|ta| { +//! # rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( +//! # ta.subject, +//! # ta.spki, +//! # ta.name_constraints, +//! # ) +//! # }) +//! # ); +//! # let config = rustls::ClientConfig::builder() +//! # .with_safe_defaults() +//! # .with_root_certificates(root_store) +//! # .with_no_client_auth(); +//! let rc_config = Arc::new(config); +//! let example_com = "example.com".try_into().unwrap(); +//! let mut client = rustls::ClientConnection::new(rc_config, example_com); +//! ``` +//! +//! Now you should do appropriate IO for the `client` object. If `client.wants_read()` yields +//! true, you should call `client.read_tls()` when the underlying connection has data. +//! Likewise, if `client.wants_write()` yields true, you should call `client.write_tls()` +//! when the underlying connection is able to send data. You should continue doing this +//! as long as the connection is valid. +//! +//! The return types of `read_tls()` and `write_tls()` only tell you if the IO worked. No +//! parsing or processing of the TLS messages is done. After each `read_tls()` you should +//! therefore call `client.process_new_packets()` which parses and processes the messages. +//! Any error returned from `process_new_packets` is fatal to the connection, and will tell you +//! why. For example, if the server's certificate is expired `process_new_packets` will +//! return `Err(WebPkiError(CertExpired, ValidateServerCert))`. From this point on, +//! `process_new_packets` will not do any new work and will return that error continually. +//! +//! You can extract newly received data by calling `client.reader()` (which implements the +//! `io::Read` trait). You can send data to the peer by calling `client.writer()` (which +//! implements `io::Write` trait). Note that `client.writer().write()` buffers data you +//! send if the TLS connection is not yet established: this is useful for writing (say) a +//! HTTP request, but this is buffered so avoid large amounts of data. +//! +//! The following code uses a fictional socket IO API for illustration, and does not handle +//! errors. +//! +//! ```rust,no_run +//! # let mut client = rustls::ClientConnection::new(panic!(), panic!()).unwrap(); +//! # struct Socket { } +//! # impl Socket { +//! # fn ready_for_write(&self) -> bool { false } +//! # fn ready_for_read(&self) -> bool { false } +//! # fn wait_for_something_to_happen(&self) { } +//! # } +//! # +//! # use std::io::{Read, Write, Result}; +//! # impl Read for Socket { +//! # fn read(&mut self, buf: &mut [u8]) -> Result { panic!() } +//! # } +//! # impl Write for Socket { +//! # fn write(&mut self, buf: &[u8]) -> Result { panic!() } +//! # fn flush(&mut self) -> Result<()> { panic!() } +//! # } +//! # +//! # fn connect(_address: &str, _port: u16) -> Socket { +//! # panic!(); +//! # } +//! use std::io; +//! use rustls::Connection; +//! +//! client.writer().write(b"GET / HTTP/1.0\r\n\r\n").unwrap(); +//! let mut socket = connect("example.com", 443); +//! loop { +//! if client.wants_read() && socket.ready_for_read() { +//! client.read_tls(&mut socket).unwrap(); +//! client.process_new_packets().unwrap(); +//! +//! let mut plaintext = Vec::new(); +//! client.reader().read_to_end(&mut plaintext).unwrap(); +//! io::stdout().write(&plaintext).unwrap(); +//! } +//! +//! if client.wants_write() && socket.ready_for_write() { +//! client.write_tls(&mut socket).unwrap(); +//! } +//! +//! socket.wait_for_something_to_happen(); +//! } +//! ``` +//! +//! # Examples +//! [`tlsserver`](https://github.com/rustls/rustls/blob/main/examples/src/bin/tlsserver-mio.rs) +//! and [`tlsclient`](https://github.com/rustls/rustls/blob/main/examples/src/bin/tlsclient-mio.rs) +//! are full worked examples. These both use mio. +//! +//! # Crate features +//! Here's a list of what features are exposed by the rustls crate and what +//! they mean. +//! +//! - `logging`: this makes the rustls crate depend on the `log` crate. +//! rustls outputs interesting protocol-level messages at `trace!` and `debug!` +//! level, and protocol-level errors at `warn!` and `error!` level. The log +//! messages do not contain secret key data, and so are safe to archive without +//! affecting session security. This feature is in the default set. +//! +//! - `dangerous_configuration`: this feature enables a `dangerous()` method on +//! `ClientConfig` and `ServerConfig` that allows setting inadvisable options, +//! such as replacing the certificate verification process. Applications +//! requesting this feature should be reviewed carefully. +//! +//! - `quic`: this feature exposes additional constructors and functions +//! for using rustls as a TLS library for QUIC. See the `quic` module for +//! details of these. You will only need this if you're writing a QUIC +//! implementation. +//! +//! - `tls12`: enables support for TLS version 1.2. This feature is in the default +//! set. Note that, due to the additive nature of Cargo features and because it +//! is enabled by default, other crates in your dependency graph could re-enable +//! it for your application. If you want to disable TLS 1.2 for security reasons, +//! consider explicitly enabling TLS 1.3 only in the config builder API. +//! +//! - `read_buf`: When building with Rust Nightly, adds support for the unstable +//! `std::io::ReadBuf` and related APIs. This reduces costs from initializing +//! buffers. Will do nothing on non-Nightly releases. + +// Require docs for public APIs, deny unsafe code, etc. +#![forbid(unsafe_code, unused_must_use)] +#![cfg_attr(not(read_buf), forbid(unstable_features))] +#![deny( + clippy::clone_on_ref_ptr, + clippy::use_self, + trivial_casts, + trivial_numeric_casts, + missing_docs, + unreachable_pub, + unused_import_braces, + unused_extern_crates, + unused_qualifications +)] +// Relax these clippy lints: +// - ptr_arg: this triggers on references to type aliases that are Vec +// underneath. +// - too_many_arguments: some things just need a lot of state, wrapping it +// doesn't necessarily make it easier to follow what's going on +// - new_ret_no_self: we sometimes return `Arc`, which seems fine +// - single_component_path_imports: our top-level `use log` import causes +// a false positive, https://github.com/rust-lang/rust-clippy/issues/5210 +// - new_without_default: for internal constructors, the indirection is not +// helpful +#![allow( + clippy::too_many_arguments, + clippy::new_ret_no_self, + clippy::ptr_arg, + clippy::single_component_path_imports, + clippy::new_without_default +)] +// Enable documentation for all features on docs.rs +#![cfg_attr(docsrs, feature(doc_cfg))] +// XXX: Because of https://github.com/rust-lang/rust/issues/54726, we cannot +// write `#![rustversion::attr(nightly, feature(read_buf))]` here. Instead, +// build.rs set `read_buf` for (only) Rust Nightly to get the same effect. +// +// All the other conditional logic in the crate could use +// `#[rustversion::nightly]` instead of `#[cfg(read_buf)]`; `#[cfg(read_buf)]` +// is used to avoid needing `rustversion` to be compiled twice during +// cross-compiling. +#![cfg_attr(read_buf, feature(read_buf))] + +// log for logging (optional). +#[cfg(feature = "logging")] +use log; + +#[cfg(not(feature = "logging"))] +#[macro_use] +mod log { + macro_rules! trace ( ($($tt:tt)*) => {{}} ); + macro_rules! debug ( ($($tt:tt)*) => {{}} ); + macro_rules! warn ( ($($tt:tt)*) => {{}} ); + macro_rules! error ( ($($tt:tt)*) => {{}} ); +} + +#[macro_use] +mod msgs; +mod anchors; +mod cipher; +mod conn; +mod error; +mod hash_hs; +mod limited_cache; +mod rand; +mod record_layer; +mod stream; +#[cfg(feature = "tls12")] +mod tls12; +mod tls13; +mod vecbuf; +mod verify; +#[cfg(test)] +mod verifybench; +mod x509; +#[macro_use] +mod check; +mod bs_debug; +mod builder; +mod enums; +mod key; +mod key_log; +mod key_log_file; +mod kx; +mod suites; +mod ticketer; +mod versions; + +/// Internal classes which may be useful outside the library. +/// The contents of this section DO NOT form part of the stable interface. +pub mod internal { + /// Low-level TLS message parsing and encoding functions. + pub mod msgs { + pub use crate::msgs::*; + } + /// Low-level TLS message decryption functions. + pub mod cipher { + pub use crate::cipher::MessageDecrypter; + } +} + +// The public interface is: +pub use crate::anchors::{OwnedTrustAnchor, RootCertStore}; +pub use crate::builder::{ + ConfigBuilder, ConfigSide, WantsCipherSuites, WantsKxGroups, WantsVerifier, WantsVersions, +}; +pub use crate::conn::{ + CommonState, Connection, ConnectionCommon, IoState, Reader, SideData, Writer, +}; +pub use crate::enums::{CipherSuite, ProtocolVersion, SignatureScheme}; +pub use crate::error::Error; +pub use crate::key::{Certificate, PrivateKey}; +pub use crate::key_log::{KeyLog, NoKeyLog}; +pub use crate::key_log_file::KeyLogFile; +pub use crate::kx::{SupportedKxGroup, ALL_KX_GROUPS}; +pub use crate::msgs::enums::{ + AlertDescription, ContentType, HandshakeType, NamedGroup, SignatureAlgorithm, +}; +pub use crate::msgs::handshake::{DigitallySignedStruct, DistinguishedNames}; +pub use crate::stream::{Stream, StreamOwned}; +pub use crate::suites::{ + BulkAlgorithm, SupportedCipherSuite, ALL_CIPHER_SUITES, DEFAULT_CIPHER_SUITES, +}; +#[cfg(feature = "secret_extraction")] +pub use crate::suites::{ConnectionTrafficSecrets, ExtractedSecrets}; +pub use crate::ticketer::Ticketer; +#[cfg(feature = "tls12")] +pub use crate::tls12::Tls12CipherSuite; +pub use crate::tls13::Tls13CipherSuite; +pub use crate::versions::{SupportedProtocolVersion, ALL_VERSIONS, DEFAULT_VERSIONS}; + +/// Items for use in a client. +pub mod client { + pub(super) mod builder; + mod client_conn; + mod common; + pub(super) mod handy; + mod hs; + #[cfg(feature = "tls12")] + mod tls12; + mod tls13; + + pub use builder::{WantsClientCert, WantsTransparencyPolicyOrClientCert}; + #[cfg(feature = "quic")] + pub use client_conn::ClientQuicExt; + pub use client_conn::InvalidDnsNameError; + pub use client_conn::ResolvesClientCert; + pub use client_conn::ServerName; + pub use client_conn::StoresClientSessions; + pub use client_conn::{ClientConfig, ClientConnection, ClientConnectionData, WriteEarlyData}; + pub use handy::{ClientSessionMemoryCache, NoClientSessionStorage}; + + #[cfg(feature = "dangerous_configuration")] + pub use crate::verify::{ + CertificateTransparencyPolicy, HandshakeSignatureValid, ServerCertVerified, + ServerCertVerifier, WebPkiVerifier, + }; + #[cfg(feature = "dangerous_configuration")] + pub use client_conn::danger::DangerousClientConfig; +} + +pub use client::{ClientConfig, ClientConnection, ServerName}; + +/// Items for use in a server. +pub mod server { + pub(crate) mod builder; + mod common; + pub(crate) mod handy; + mod hs; + mod server_conn; + #[cfg(feature = "tls12")] + mod tls12; + mod tls13; + + pub use crate::verify::{ + AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, NoClientAuth, + }; + pub use builder::WantsServerCert; + pub use handy::ResolvesServerCertUsingSni; + pub use handy::{NoServerSessionStorage, ServerSessionMemoryCache}; + #[cfg(feature = "quic")] + pub use server_conn::ServerQuicExt; + pub use server_conn::StoresServerSessions; + pub use server_conn::{ + Accepted, Acceptor, ReadEarlyData, ServerConfig, ServerConnection, ServerConnectionData, + }; + pub use server_conn::{ClientHello, ProducesTickets, ResolvesServerCert}; + + #[cfg(feature = "dangerous_configuration")] + pub use crate::verify::{ClientCertVerified, ClientCertVerifier, DnsName}; +} + +pub use server::{ServerConfig, ServerConnection}; + +/// All defined ciphersuites appear in this module. +/// +/// [`ALL_CIPHER_SUITES`] is provided as an array of all of these values. +pub mod cipher_suite { + pub use crate::suites::CipherSuiteCommon; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384; + #[cfg(feature = "tls12")] + pub use crate::tls12::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256; + pub use crate::tls13::TLS13_AES_128_GCM_SHA256; + pub use crate::tls13::TLS13_AES_256_GCM_SHA384; + pub use crate::tls13::TLS13_CHACHA20_POLY1305_SHA256; +} + +/// All defined protocol versions appear in this module. +/// +/// ALL_VERSIONS is a provided as an array of all of these values. +pub mod version { + #[cfg(feature = "tls12")] + pub use crate::versions::TLS12; + pub use crate::versions::TLS13; +} + +/// All defined key exchange groups appear in this module. +/// +/// ALL_KX_GROUPS is provided as an array of all of these values. +pub mod kx_group { + pub use crate::kx::SECP256R1; + pub use crate::kx::SECP384R1; + pub use crate::kx::X25519; +} + +/// Message signing interfaces and implementations. +pub mod sign; + +#[cfg(feature = "quic")] +#[cfg_attr(docsrs, doc(cfg(feature = "quic")))] +/// APIs for implementing QUIC TLS +pub mod quic; + +/// This is the rustls manual. +pub mod manual; + +/** Type renames. */ +#[allow(clippy::upper_case_acronyms)] +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use ResolvesServerCertUsingSni")] +pub type ResolvesServerCertUsingSNI = server::ResolvesServerCertUsingSni; +#[allow(clippy::upper_case_acronyms)] +#[cfg(feature = "dangerous_configuration")] +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use client::WebPkiVerifier")] +pub type WebPKIVerifier = client::WebPkiVerifier; +#[allow(clippy::upper_case_acronyms)] +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use Error")] +pub type TLSError = Error; +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use ClientConnection")] +pub type ClientSession = ClientConnection; +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use ServerConnection")] +pub type ServerSession = ServerConnection; + +/* Apologies: would make a trait alias here, but those remain unstable. +pub trait Session = Connection; +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/limited_cache.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/limited_cache.rs new file mode 100644 index 0000000000000000000000000000000000000000..6994d881d2c9fa728a118de63d1a7cc3fac52b0c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/limited_cache.rs @@ -0,0 +1,175 @@ +use std::borrow::Borrow; +use std::collections::hash_map::Entry; +use std::collections::{HashMap, VecDeque}; +use std::hash::Hash; + +/// A HashMap-alike, which never gets larger than a specified +/// capacity, and evicts the oldest insertion to maintain this. +/// +/// The requested capacity may be rounded up by the underlying +/// collections. This implementation uses all the allocated +/// storage. +/// +/// This is inefficient: it stores keys twice. +pub(crate) struct LimitedCache { + map: HashMap, + + // first item is the oldest key + oldest: VecDeque, +} + +impl LimitedCache +where + K: Eq + Hash + Clone + std::fmt::Debug, +{ + /// Create a new LimitedCache with the given rough capacity. + pub(crate) fn new(capacity_order_of_magnitude: usize) -> Self { + Self { + map: HashMap::with_capacity(capacity_order_of_magnitude), + oldest: VecDeque::with_capacity(capacity_order_of_magnitude), + } + } + + pub(crate) fn insert(&mut self, k: K, v: V) { + let inserted_new_item = match self.map.entry(k) { + Entry::Occupied(mut old) => { + // nb. does not freshen entry in `oldest` + old.insert(v); + false + } + + entry @ Entry::Vacant(_) => { + self.oldest + .push_back(entry.key().clone()); + entry.or_insert(v); + true + } + }; + + // ensure next insert() does not require a realloc + if inserted_new_item && self.oldest.capacity() == self.oldest.len() { + if let Some(oldest_key) = self.oldest.pop_front() { + self.map.remove(&oldest_key); + } + } + } + + pub(crate) fn get(&self, k: &Q) -> Option<&V> + where + K: Borrow, + Q: Hash + Eq, + { + self.map.get(k) + } + + pub(crate) fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Hash + Eq, + { + if let Some(value) = self.map.remove(k) { + // O(N) search, followed by O(N) removal + if let Some(index) = self + .oldest + .iter() + .position(|item| item.borrow() == k) + { + self.oldest.remove(index); + } + Some(value) + } else { + None + } + } +} + +#[cfg(test)] +mod test { + type Test = super::LimitedCache; + + #[test] + fn test_updates_existing_item() { + let mut t = Test::new(3); + t.insert("abc".into(), 1); + t.insert("abc".into(), 2); + assert_eq!(t.get("abc"), Some(&2)); + } + + #[test] + fn test_evicts_oldest_item() { + let mut t = Test::new(3); + t.insert("abc".into(), 1); + t.insert("def".into(), 2); + t.insert("ghi".into(), 3); + + assert_eq!(t.get("abc"), None); + assert_eq!(t.get("def"), Some(&2)); + assert_eq!(t.get("ghi"), Some(&3)); + } + + #[test] + fn test_evicts_second_oldest_item_if_first_removed() { + let mut t = Test::new(3); + t.insert("abc".into(), 1); + t.insert("def".into(), 2); + + assert_eq!(t.remove("abc"), Some(1)); + + t.insert("ghi".into(), 3); + t.insert("jkl".into(), 4); + + assert_eq!(t.get("abc"), None); + assert_eq!(t.get("def"), None); + assert_eq!(t.get("ghi"), Some(&3)); + assert_eq!(t.get("jkl"), Some(&4)); + } + + #[test] + fn test_evicts_after_second_oldest_item_removed() { + let mut t = Test::new(3); + t.insert("abc".into(), 1); + t.insert("def".into(), 2); + + assert_eq!(t.remove("def"), Some(2)); + assert_eq!(t.get("abc"), Some(&1)); + + t.insert("ghi".into(), 3); + t.insert("jkl".into(), 4); + + assert_eq!(t.get("abc"), None); + assert_eq!(t.get("def"), None); + assert_eq!(t.get("ghi"), Some(&3)); + assert_eq!(t.get("jkl"), Some(&4)); + } + + #[test] + fn test_removes_all_items() { + let mut t = Test::new(3); + t.insert("abc".into(), 1); + t.insert("def".into(), 2); + + assert_eq!(t.remove("def"), Some(2)); + assert_eq!(t.remove("abc"), Some(1)); + + t.insert("ghi".into(), 3); + t.insert("jkl".into(), 4); + t.insert("mno".into(), 5); + + assert_eq!(t.get("abc"), None); + assert_eq!(t.get("def"), None); + assert_eq!(t.get("ghi"), None); + assert_eq!(t.get("jkl"), Some(&4)); + assert_eq!(t.get("mno"), Some(&5)); + } + + #[test] + fn test_inserts_many_items() { + let mut t = Test::new(3); + + for _ in 0..10000 { + t.insert("abc".into(), 1); + t.insert("def".into(), 2); + t.insert("ghi".into(), 3); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/defaults.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/defaults.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa15863c16305e9fe9b60f3a3ee287066652e5cb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/defaults.rs @@ -0,0 +1,29 @@ +/*! + +## Rationale for defaults + +### Why is AES-256 preferred over AES-128? + +This is a trade-off between: + +1. classical security level: searching a 2^128 key space is as implausible as 2^256. +2. post-quantum security level: the difference is more meaningful, and AES-256 seems like the conservative choice. +3. performance: AES-256 is around 40% slower than AES-128, though hardware acceleration typically narrows this gap. + +The choice is frankly quite marginal. + +### Why is AES-GCM preferred over chacha20-poly1305? + +Hardware support for accelerating AES-GCM is widespread, and hardware-accelerated AES-GCM +is quicker than un-accelerated chacha20-poly1305. + +However, if you know your application will run on a platform without that, you should +_definitely_ change the default order to prefer chacha20-poly1305: both the performance and +the implementation security will be improved. We think this is an uncommon case. + +### Why is x25519 preferred for key exchange over nistp256? + +Both provide roughly the same classical security level, but x25519 has better performance and +it's _much_ more likely that both peers will have good quality implementations. + +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/features.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/features.rs new file mode 100644 index 0000000000000000000000000000000000000000..639a23184c9bd6940f9ee7c18704e5b9882d89a6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/features.rs @@ -0,0 +1,50 @@ +/*! + +## Current features + +* TLS1.2 and TLS1.3. +* ECDSA, Ed25519 or RSA server authentication by clients. +* ECDSA, Ed25519 or RSA server authentication by servers. +* Forward secrecy using ECDHE; with curve25519, nistp256 or nistp384 curves. +* AES128-GCM and AES256-GCM bulk encryption, with safe nonces. +* ChaCha20-Poly1305 bulk encryption ([RFC7905](https://tools.ietf.org/html/rfc7905)). +* ALPN support. +* SNI support. +* Tunable MTU to make TLS messages match size of underlying transport. +* Optional use of vectored IO to minimise system calls. +* TLS1.2 session resumption. +* TLS1.2 resumption via tickets (RFC5077). +* TLS1.3 resumption via tickets or session storage. +* TLS1.3 0-RTT data for clients. +* Client authentication by clients. +* Client authentication by servers. +* Extended master secret support (RFC7627). +* Exporters (RFC5705). +* OCSP stapling by servers. +* SCT stapling by servers. +* SCT verification by clients. + +## Possible future features + +* PSK support. +* OCSP verification by clients. +* Certificate pinning. + +## Non-features + +For reasons explained in the other sections of this manual, rustls does not +and will not support: + +* SSL1, SSL2, SSL3, TLS1 or TLS1.1. +* RC4. +* DES or triple DES. +* EXPORT ciphersuites. +* MAC-then-encrypt ciphersuites. +* Ciphersuites without forward secrecy. +* Renegotiation. +* Kerberos. +* Compression. +* Discrete-log Diffie-Hellman. +* Automatic protocol version downgrade. + +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/howto.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/howto.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa68a1e390bc5363b3cc92dfa18087d50aa92f67 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/howto.rs @@ -0,0 +1,36 @@ +/*! # Customising private key usage + +By default rustls supports PKCS#8-format[^1] RSA or ECDSA keys, plus PKCS#1-format RSA keys. + +However, if your private key resides in a HSM, or in another process, or perhaps +another machine, rustls has some extension points to support this: + +The main trait you must implement is [`sign::SigningKey`][signing_key]. The primary method here +is [`choose_scheme`][choose_scheme] where you are given a set of [`SignatureScheme`s][sig_scheme] the client says +it supports: you must choose one (or return `None` -- this aborts the handshake). Having +done that, you return an implementation of the [`sign::Signer`][signer] trait. +The [`sign()`][sign_method] performs the signature and returns it. + +(Unfortunately this is currently designed for keys with low latency access, like in a +PKCS#11 provider, Microsoft CryptoAPI, etc. so is blocking rather than asynchronous. +It's a TODO to make these and other extension points async.) + +Once you have these two pieces, configuring a server to use them involves, briefly: + +- packaging your `sign::SigningKey` with the matching certificate chain into a [`sign::CertifiedKey`][certified_key] +- making a [`ResolvesServerCertUsingSni`][cert_using_sni] and feeding in your `sign::CertifiedKey` for all SNI hostnames you want to use it for, +- setting that as your `ServerConfig`'s [`cert_resolver`][cert_resolver] + +[signing_key]: ../../sign/trait.SigningKey.html +[choose_scheme]: ../../sign/trait.SigningKey.html#tymethod.choose_scheme +[sig_scheme]: ../../enum.SignatureScheme.html +[signer]: ../../sign/trait.Signer.html +[sign_method]: ../../sign/trait.Signer.html#tymethod.sign +[certified_key]: ../../sign/struct.CertifiedKey.html +[cert_using_sni]: ../../struct.ResolvesServerCertUsingSni.html +[cert_resolver]: ../../struct.ServerConfig.html#structfield.cert_resolver + +[^1]: For PKCS#8 it does not support password encryption -- there's not a meaningful threat + model addressed by this, and the encryption supported is typically extremely poor. + +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/implvulns.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/implvulns.rs new file mode 100644 index 0000000000000000000000000000000000000000..d08e1100aeda0808ed07e9ab0460c2dc9367d32d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/implvulns.rs @@ -0,0 +1,104 @@ +/*! # A review of TLS Implementation Vulnerabilities + +An important part of engineering involves studying and learning from the mistakes of the past. +It would be tremendously unfortunate to spend effort re-discovering and re-fixing the same +vulnerabilities that were discovered in the past. + +## Memory safety + +Being written entirely in the safe-subset of Rust immediately offers us freedom from the entire +class of memory safety vulnerabilities. There are too many to exhaustively list, and there will +certainly be more in the future. + +Examples: + +- Heartbleed [CVE-2014-0160](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-0160) (OpenSSL) +- Memory corruption in ASN.1 decoder [CVE-2016-2108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2108) (OpenSSL) +- Buffer overflow in read_server_hello [CVE-2014-3466](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3466) (GnuTLS) + +## `goto fail` + +This is the name of a vulnerability in Apple Secure Transport [CVE-2014-1266](https://nvd.nist.gov/vuln/detail/CVE-2014-1266). +This boiled down to the following code, which validates the server's signature on the key exchange: + +```c + if ((err = SSLHashSHA1.update(&hashCtx, &serverRandom)) != 0) + goto fail; + if ((err = SSLHashSHA1.update(&hashCtx, &signedParams)) != 0) + goto fail; +> goto fail; + if ((err = SSLHashSHA1.final(&hashCtx, &hashOut)) != 0) + goto fail; +``` + +The marked line was duplicated, likely accidentally during a merge. This meant +the remaining part of the function (including the actual signature validation) +was unconditionally skipped. + +Ultimately the one countermeasure to this type of bug is basic testing: that a +valid signature returns success, and that an invalid one does not. rustls +has such testing, but this is really table stakes for security code. + +Further than this, though, we could consider that the *lack* of an error from +this function is a poor indicator that the signature was valid. rustls, instead, +has zero-size and non-copyable types that indicate a particular signature validation +has been performed. These types can be thought of as *capabilities* originated only +by designated signature verification functions -- such functions can then be a focus +of manual code review. Like capabilities, values of these types are otherwise unforgeable, +and are communicable only by Rust's move semantics. + +Values of these types are threaded through the protocol state machine, leading to terminal +states that look like: + +```ignore +struct ExpectTraffic { + (...) + _cert_verified: verify::ServerCertVerified, + _sig_verified: verify::HandshakeSignatureValid, + _fin_verified: verify::FinishedMessageVerified, +} +``` + +Since this state requires a value of these types, it will be a compile-time error to +reach that state without performing the requisite security-critical operations. + +This approach is not infallible, but it has zero runtime cost. + +## State machine attacks: EarlyCCS and SMACK/SKIP/FREAK + +EarlyCCS [CVE-2014-0224](https://nvd.nist.gov/vuln/detail/CVE-2014-0224) was a vulnerability in OpenSSL +found in 2014. The TLS `ChangeCipherSpec` message would be processed at inappropriate times, leading +to data being encrypted with the wrong keys (specifically, keys which were not secret). This resulted +from OpenSSL taking a *reactive* strategy to incoming messages ("when I get a message X, I should do Y") +which allows it to diverge from the proper state machine under attacker control. + +[SMACK](https://mitls.org/pages/attacks/SMACK) is a similar suite of vulnerabilities found in JSSE, +CyaSSL, OpenSSL, Mono and axTLS. "SKIP-TLS" demonstrated that some implementations allowed handshake +messages (and in one case, the entire handshake!) to be skipped leading to breaks in security. "FREAK" +found that some implementations incorrectly allowed export-only state transitions (i.e., transitions that +were only valid when an export ciphersuite was in use). + +rustls represents its protocol state machine carefully to avoid these defects. We model the handshake, +CCS and application data subprotocols in the same single state machine. Each state in this machine is +represented with a single struct, and transitions are modelled as functions that consume the current state +plus one TLS message[^1] and return a struct representing the next state. These functions fully validate +the message type before further operations. + +A sample sequence for a full TLSv1.2 handshake by a client looks like: + +- `hs::ExpectServerHello` (nb. ClientHello is logically sent before this state); transition to `tls12::ExpectCertificate` +- `tls12::ExpectCertificate`; transition to `tls12::ExpectServerKX` +- `tls12::ExpectServerKX`; transition to `tls12::ExpectServerDoneOrCertReq` +- `tls12::ExpectServerDoneOrCertReq`; delegates to `tls12::ExpectCertificateRequest` or `tls12::ExpectServerDone` depending on incoming message. + - `tls12::ExpectServerDone`; transition to `tls12::ExpectCCS` +- `tls12::ExpectCCS`; transition to `tls12::ExpectFinished` +- `tls12::ExpectFinished`; transition to `tls12::ExpectTraffic` +- `tls12::ExpectTraffic`; terminal state; transitions to `tls12::ExpectTraffic` + +In the future we plan to formally prove that all possible transitions modelled in this system of types +are correct with respect to the standard(s). At the moment we rely merely on exhaustive testing. + +[^1]: a logical TLS message: post-decryption, post-fragmentation. + + +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..778d24b280448844cc478f05af2dad5af8bb1b3c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/mod.rs @@ -0,0 +1,30 @@ +/*! + +This documentation primarily aims to explain design decisions taken in rustls. + +It does this from a few aspects: how rustls attempts to avoid construction errors +that occurred in other TLS libraries, how rustls attempts to avoid past TLS +protocol vulnerabilities, and assorted advice for achieving common tasks with rustls. +*/ +#![allow(non_snake_case)] + +/// This section discusses vulnerabilities in other TLS implementations, theorising their +/// root cause and how we aim to avoid them in rustls. +#[path = "implvulns.rs"] +pub mod _01_impl_vulnerabilities; + +/// This section discusses vulnerabilities and design errors in the TLS protocol. +#[path = "tlsvulns.rs"] +pub mod _02_tls_vulnerabilities; + +/// This section collects together goal-oriented documentation. +#[path = "howto.rs"] +pub mod _03_howto; + +/// This section documents rustls itself: what protocol features are and are not implemented. +#[path = "features.rs"] +pub mod _04_features; + +/// This section provides rationale for the defaults in rustls. +#[path = "defaults.rs"] +pub mod _05_defaults; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/tlsvulns.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/tlsvulns.rs new file mode 100644 index 0000000000000000000000000000000000000000..77d5510270af9c1a08378139b1f9d5b7d79f57fa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/manual/tlsvulns.rs @@ -0,0 +1,173 @@ +/*! # A review of protocol vulnerabilities + +## CBC MAC-then-encrypt ciphersuites + +Back in 2000 [Bellare and Namprempre](https://eprint.iacr.org/2000/025) discussed how to make authenticated +encryption by composing separate encryption and authentication primitives. That paper included this table: + +| Composition Method | Privacy || Integrity || +|--------------------|---------||-----------|| +|| IND-CPA | IND-CCA | NM-CPA | INT-PTXT | INT-CTXT | +| Encrypt-and-MAC | insecure | insecure | insecure | secure | insecure | +| MAC-then-encrypt | secure | insecure | insecure | secure | insecure | +| Encrypt-then-MAC | secure | secure | secure | secure | secure | + +One may assume from this fairly clear result that encrypt-and-MAC and MAC-then-encrypt compositions would be quickly abandoned +in favour of the remaining proven-secure option. But that didn't happen, not in TLSv1.1 (2006) nor in TLSv1.2 (2008). Worse, +both RFCs included incorrect advice on countermeasures for implementers, suggesting that the flaw was "not believed to be large +enough to be exploitable". + +[Lucky 13](http://www.isg.rhul.ac.uk/tls/Lucky13.html) (2013) exploited this flaw and affected all implementations, including +those written [after discovery](https://aws.amazon.com/blogs/security/s2n-and-lucky-13/). OpenSSL even had a +[memory safety vulnerability in the fix for Lucky 13](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2107), which +gives a flavour of the kind of complexity required to remove the side channel. + +rustls does not implement CBC MAC-then-encrypt ciphersuites for these reasons. TLSv1.3 removed support for these +ciphersuites in 2018. + +There are some further rejected options worth mentioning: [RFC7366](https://tools.ietf.org/html/rfc7366) defines +Encrypt-then-MAC for TLS, but unfortunately cannot be negotiated without also supporting MAC-then-encrypt +(clients cannot express "I offer CBC, but only EtM and not MtE"). + +## RSA PKCS#1 encryption + +"RSA key exchange" in TLS involves the client choosing a large random value and encrypting it using the server's +public key. This has two overall problems: + +1. It provides no _forward secrecy_: later compromise of the server's private key breaks confidentiality of + *all* past sessions using that key. This is a crucial property in the presence of software that is often + [poor at keeping a secret](http://heartbleed.com/). +2. The padding used in practice in TLS ("PKCS#1", or fully "RSAES-PKCS1-v1_5") has been known to be broken since + [1998](http://archiv.infsec.ethz.ch/education/fs08/secsem/bleichenbacher98.pdf). + +In a similar pattern to the MAC-then-encrypt problem discussed above, TLSv1.0 (1999), TLSv1.1 (2006) and TLSv1.2 (2008) +continued to specify use of PKCS#1 encryption, again with incrementally more complex and incorrect advice on countermeasures. + +[ROBOT](https://robotattack.org/) (2018) showed that implementations were still vulnerable to these attacks twenty years later. + +rustls does not support RSA key exchange. TLSv1.3 also removed support. + +## BEAST + +[BEAST](https://vnhacker.blogspot.com/2011/09/beast.html) ([CVE-2011-3389](https://nvd.nist.gov/vuln/detail/CVE-2011-3389)) +was demonstrated in 2011 by Thai Duong and Juliano Rizzo, +and was another vulnerability in CBC-based ciphersuites in SSLv3.0 and TLSv1.0. CBC mode is vulnerable to adaptive +chosen-plaintext attacks if the IV is predictable. In the case of these protocol versions, the IV was the previous +block of ciphertext (as if the entire TLS session was one CBC ciphertext, albeit revealed incrementally). This was +obviously predictable, since it was published on the wire. + +OpenSSL contained a countermeasure for this problem from 2002 onwards: it encrypts an empty message before each real +one, so that the IV used in the real message is unpredictable. This was turned off by default due to bugs in IE6. + +TLSv1.1 fix this vulnerability, but not any of the other deficiencies of CBC mode (see above). + +rustls does not support these ciphersuites. + +## CRIME + +In 2002 [John Kelsey](https://www.iacr.org/cryptodb/archive/2002/FSE/3091/3091.pdf) discussed the length side channel +as applied to compression of combined secret and attacker-chosen strings. + +Compression continued to be an option in TLSv1.1 (2006) and in TLSv1.2 (2008). Support in libraries was widespread. + +[CRIME](http://netifera.com/research/crime/CRIME_ekoparty2012.pdf) ([CVE-2012-4929](https://nvd.nist.gov/vuln/detail/CVE-2012-4929)) +was demonstrated in 2012, again by Thai Duong and Juliano Rizzo. It attacked several protocols offering transparent +compression of application data, allowing quick adaptive chosen-plaintext attacks against secret values like cookies. + +rustls does not implement compression. TLSv1.3 also removed support. + +## Logjam / FREAK + +Way back when SSL was first being born, circa 1995, the US government considered cryptography a munition requiring +export control. SSL contained specific ciphersuites with dramatically small key sizes that were not subject +to export control. These controls were dropped in 2000. + +Since the "export-grade" ciphersuites no longer fulfilled any purpose, and because they were actively harmful to users, +one may have expected software support to disappear quickly. This did not happen. + +In 2015 [the FREAK attack](https://mitls.org/pages/attacks/SMACK#freak) ([CVE-2015-0204](https://nvd.nist.gov/vuln/detail/CVE-2015-0204)) +and [the Logjam attack](https://weakdh.org/) ([CVE-2015-4000](https://nvd.nist.gov/vuln/detail/CVE-2015-4000)) both +demonstrated total breaks of security in the presence of servers that accepted export ciphersuites. FREAK factored +512-bit RSA keys, while Logjam optimised solving discrete logs in the 512-bit group used by many different servers. + +Naturally, rustls does not implement any of these ciphersuites. + +## SWEET32 + +Block ciphers are vulnerable to birthday attacks, where the probability of repeating a block increases dramatically +once a particular key has been used for many blocks. For block ciphers with 64-bit blocks, this becomes probable +once a given key encrypts the order of 32GB of data. + +[Sweet32](https://sweet32.info/) ([CVE-2016-2183](https://nvd.nist.gov/vuln/detail/CVE-2016-2183)) attacked this fact +in the context of TLS support for 3DES, breaking confidentiality by analysing a large amount of attacker-induced traffic +in one session. + +rustls does not support any 64-bit block ciphers. + +## DROWN + +[DROWN](https://drownattack.com/) ([CVE-2016-0800](https://nvd.nist.gov/vuln/detail/CVE-2016-0800)) is a cross-protocol +attack that breaks the security of TLSv1.2 and earlier (when used with RSA key exchange) by using SSLv2. It is required +that the server uses the same key for both protocol versions. + +rustls naturally does not support SSLv2, but most importantly does not support RSA key exchange for TLSv1.2. + +## Poodle + +[POODLE](https://www.openssl.org/~bodo/ssl-poodle.pdf) ([CVE-2014-3566](https://nvd.nist.gov/vuln/detail/CVE-2014-3566)) +is an attack against CBC mode ciphersuites in SSLv3. This was possible in most cases because some clients willingly +downgraded to SSLv3 after failed handshakes for later versions. + +rustls does not support CBC mode ciphersuites, or SSLv3. Note that rustls does not need to implement `TLS_FALLBACK_SCSV` +introduced as a countermeasure because it contains no ability to downgrade to earlier protocol versions. + +## GCM nonces + +[RFC5288](https://tools.ietf.org/html/rfc5288) introduced GCM-based ciphersuites for use in TLS. Unfortunately +the design was poor; it reused design for an unrelated security setting proposed in RFC5116. + +GCM is a typical nonce-based AEAD: it requires a unique (but not necessarily unpredictable) 96-bit nonce for each encryption +with a given key. The design specified by RFC5288 left two-thirds of the nonce construction up to implementations: + +- wasting 8 bytes per TLS ciphertext, +- meaning correct operation cannot be tested for (e.g., in protocol-level test vectors). + +There were no trade-offs here: TLS has a 64-bit sequence number that is not allowed to wrap and would make an ideal nonce. + +As a result, a [2016 study](https://eprint.iacr.org/2016/475.pdf) found: + +- implementations from IBM, A10 and Citrix used randomly-chosen nonces, which are unlikely to be unique over long connections, +- an implementation from Radware used the same nonce for the first two messages. + +rustls uses a counter from a random starting point for GCM nonces. TLSv1.3 and the Chacha20-Poly1305 TLSv1.2 ciphersuite +standardise this method. + +## Renegotiation + +In 2009 Marsh Ray and Steve Dispensa [discovered](https://kryptera.se/Renegotiating%20TLS.pdf) that the renegotiation +feature of all versions of TLS allows a MitM to splice a request of their choice onto the front of the client's real HTTP +request. A countermeasure was proposed and widely implemented to bind renegotiations to their previous negotiations; +unfortunately this was insufficient. + +rustls does not support renegotiation in TLSv1.2. TLSv1.3 also no longer supports renegotiation. + +## 3SHAKE + +[3SHAKE](https://www.mitls.org/pages/attacks/3SHAKE) (2014) described a complex attack that broke the "Secure Renegotiation" extension +introduced as a countermeasure to the previous protocol flaw. + +rustls does not support renegotiation for TLSv1.2 connections, or RSA key exchange, and both are required for this attack +to work. rustls implements the "Extended Master Secret" (RFC7627) extension for TLSv1.2 which was standardised as a countermeasure. + +TLSv1.3 no longer supports renegotiation and RSA key exchange. It also effectively incorporates the improvements made in RFC7627. + +## KCI + +[This vulnerability](https://kcitls.org/) makes use of TLS ciphersuites (those offering static DH) which were standardised +yet not widely used. However, they were implemented by libraries, and as a result enabled for various clients. It coupled +this with misconfigured certificates (on services including facebook.com) which allowed their misuse to MitM connections. + +rustls does not support static DH/EC-DH ciphersuites. We assert that it is misissuance to sign an EC certificate +with the keyUsage extension allowing both signatures and key exchange. That it isn't is probably a failure +of CAB Forum baseline requirements. +*/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/alert.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/alert.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e12e6e04a3bad62dd10e342826af4795f604b5d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/alert.rs @@ -0,0 +1,22 @@ +use crate::msgs::codec::{Codec, Reader}; +use crate::msgs::enums::{AlertDescription, AlertLevel}; + +#[derive(Debug)] +pub struct AlertMessagePayload { + pub level: AlertLevel, + pub description: AlertDescription, +} + +impl Codec for AlertMessagePayload { + fn encode(&self, bytes: &mut Vec) { + self.level.encode(bytes); + self.description.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let level = AlertLevel::read(r)?; + let description = AlertDescription::read(r)?; + + Some(Self { level, description }) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/base.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/base.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ae0f6d2d3b7b5db461fbeeeeb0231c5a280d80f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/base.rs @@ -0,0 +1,170 @@ +use std::fmt; + +use crate::key; +use crate::msgs::codec; +use crate::msgs::codec::{Codec, Reader}; + +/// An externally length'd payload +#[derive(Clone, Eq, PartialEq)] +pub struct Payload(pub Vec); + +impl Codec for Payload { + fn encode(&self, bytes: &mut Vec) { + bytes.extend_from_slice(&self.0); + } + + fn read(r: &mut Reader) -> Option { + Some(Self::read(r)) + } +} + +impl Payload { + pub fn new(bytes: impl Into>) -> Self { + Self(bytes.into()) + } + + pub fn empty() -> Self { + Self::new(Vec::new()) + } + + pub fn read(r: &mut Reader) -> Self { + Self(r.rest().to_vec()) + } +} + +impl Codec for key::Certificate { + fn encode(&self, bytes: &mut Vec) { + codec::u24(self.0.len() as u32).encode(bytes); + bytes.extend_from_slice(&self.0); + } + + fn read(r: &mut Reader) -> Option { + let len = codec::u24::read(r)?.0 as usize; + let mut sub = r.sub(len)?; + let body = sub.rest().to_vec(); + Some(Self(body)) + } +} + +impl fmt::Debug for Payload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + hex(f, &self.0) + } +} + +/// An arbitrary, unknown-content, u24-length-prefixed payload +#[derive(Clone, Eq, PartialEq)] +pub struct PayloadU24(pub Vec); + +impl PayloadU24 { + pub fn new(bytes: Vec) -> Self { + Self(bytes) + } +} + +impl Codec for PayloadU24 { + fn encode(&self, bytes: &mut Vec) { + codec::u24(self.0.len() as u32).encode(bytes); + bytes.extend_from_slice(&self.0); + } + + fn read(r: &mut Reader) -> Option { + let len = codec::u24::read(r)?.0 as usize; + let mut sub = r.sub(len)?; + let body = sub.rest().to_vec(); + Some(Self(body)) + } +} + +impl fmt::Debug for PayloadU24 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + hex(f, &self.0) + } +} + +/// An arbitrary, unknown-content, u16-length-prefixed payload +#[derive(Clone, Eq, PartialEq)] +pub struct PayloadU16(pub Vec); + +impl PayloadU16 { + pub fn new(bytes: Vec) -> Self { + Self(bytes) + } + + pub fn empty() -> Self { + Self::new(Vec::new()) + } + + pub fn encode_slice(slice: &[u8], bytes: &mut Vec) { + (slice.len() as u16).encode(bytes); + bytes.extend_from_slice(slice); + } +} + +impl Codec for PayloadU16 { + fn encode(&self, bytes: &mut Vec) { + Self::encode_slice(&self.0, bytes); + } + + fn read(r: &mut Reader) -> Option { + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + let body = sub.rest().to_vec(); + Some(Self(body)) + } +} + +impl fmt::Debug for PayloadU16 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + hex(f, &self.0) + } +} + +/// An arbitrary, unknown-content, u8-length-prefixed payload +#[derive(Clone, Eq, PartialEq)] +pub struct PayloadU8(pub Vec); + +impl PayloadU8 { + pub fn new(bytes: Vec) -> Self { + Self(bytes) + } + + pub fn empty() -> Self { + Self(Vec::new()) + } + + pub fn into_inner(self) -> Vec { + self.0 + } +} + +impl Codec for PayloadU8 { + fn encode(&self, bytes: &mut Vec) { + (self.0.len() as u8).encode(bytes); + bytes.extend_from_slice(&self.0); + } + + fn read(r: &mut Reader) -> Option { + let len = u8::read(r)? as usize; + let mut sub = r.sub(len)?; + let body = sub.rest().to_vec(); + Some(Self(body)) + } +} + +impl fmt::Debug for PayloadU8 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + hex(f, &self.0) + } +} + +// Format an iterator of u8 into a hex string +pub(super) fn hex<'a>( + f: &mut fmt::Formatter<'_>, + payload: impl IntoIterator, +) -> fmt::Result { + for b in payload { + write!(f, "{:02x}", b)? + } + Ok(()) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/ccs.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/ccs.rs new file mode 100644 index 0000000000000000000000000000000000000000..d9b0eb3b4f99f5920a045806e5b591f138decbaa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/ccs.rs @@ -0,0 +1,20 @@ +use crate::msgs::codec::{Codec, Reader}; + +#[derive(Debug)] +pub struct ChangeCipherSpecPayload; + +impl Codec for ChangeCipherSpecPayload { + fn encode(&self, bytes: &mut Vec) { + 1u8.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let typ = u8::read(r)?; + + if typ == 1 && !r.any_left() { + Some(Self {}) + } else { + None + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/codec.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/codec.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ad1b128b7bf2e9856f2030d4741041699c48d7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/codec.rs @@ -0,0 +1,290 @@ +use std::convert::TryInto; +use std::fmt::Debug; + +/// Wrapper over a slice of bytes that allows reading chunks from +/// with the current position state held using a cursor. +/// +/// A new reader for a sub section of the the buffer can be created +/// using the `sub` function or a section of a certain length can +/// be obtained using the `take` function +pub struct Reader<'a> { + /// The underlying buffer storing the readers content + buffer: &'a [u8], + /// Stores the current reading position for the buffer + cursor: usize, +} + +impl<'a> Reader<'a> { + /// Creates a new Reader of the provided `bytes` slice with + /// the initial cursor position of zero. + pub fn init(bytes: &[u8]) -> Reader { + Reader { + buffer: bytes, + cursor: 0, + } + } + + /// Attempts to create a new Reader on a sub section of this + /// readers bytes by taking a slice of the provided `length` + /// will return None if there is not enough bytes + pub fn sub(&mut self, length: usize) -> Option { + self.take(length).map(Reader::init) + } + + /// Borrows a slice of all the remaining bytes + /// that appear after the cursor position. + /// + /// Moves the cursor to the end of the buffer length. + pub fn rest(&mut self) -> &[u8] { + let rest = &self.buffer[self.cursor..]; + self.cursor = self.buffer.len(); + rest + } + + /// Attempts to borrow a slice of bytes from the current + /// cursor position of `length` if there is not enough + /// bytes remaining after the cursor to take the length + /// then None is returned instead. + pub fn take(&mut self, length: usize) -> Option<&[u8]> { + if self.left() < length { + return None; + } + let current = self.cursor; + self.cursor += length; + Some(&self.buffer[current..current + length]) + } + + /// Used to check whether the reader has any content left + /// after the cursor (cursor has not reached end of buffer) + pub fn any_left(&self) -> bool { + self.cursor < self.buffer.len() + } + + /// Returns the cursor position which is also the number + /// of bytes that have been read from the buffer. + pub fn used(&self) -> usize { + self.cursor + } + + /// Returns the number of bytes that are still able to be + /// read (The number of remaining takes) + pub fn left(&self) -> usize { + self.buffer.len() - self.cursor + } +} + +/// Trait for implementing encoding and decoding functionality +/// on something. +pub trait Codec: Debug + Sized { + /// Function for encoding itself by appending itself to + /// the provided vec of bytes. + fn encode(&self, bytes: &mut Vec); + + /// Function for decoding itself from the provided reader + /// will return Some if the decoding was successful or + /// None if it was not. + fn read(_: &mut Reader) -> Option; + + /// Convenience function for encoding the implementation + /// into a vec and returning it + fn get_encoding(&self) -> Vec { + let mut bytes = Vec::new(); + self.encode(&mut bytes); + bytes + } + + /// Function for wrapping a call to the read function in + /// a Reader for the slice of bytes provided + fn read_bytes(bytes: &[u8]) -> Option { + let mut reader = Reader::init(bytes); + Self::read(&mut reader) + } +} + +fn decode_u8(bytes: &[u8]) -> Option { + let [value]: [u8; 1] = bytes.try_into().ok()?; + Some(value) +} + +impl Codec for u8 { + fn encode(&self, bytes: &mut Vec) { + bytes.push(*self); + } + + fn read(r: &mut Reader) -> Option { + r.take(1).and_then(decode_u8) + } +} + +pub fn put_u16(v: u16, out: &mut [u8]) { + let out: &mut [u8; 2] = (&mut out[..2]).try_into().unwrap(); + *out = u16::to_be_bytes(v); +} + +pub fn decode_u16(bytes: &[u8]) -> Option { + Some(u16::from_be_bytes(bytes.try_into().ok()?)) +} + +impl Codec for u16 { + fn encode(&self, bytes: &mut Vec) { + let mut b16 = [0u8; 2]; + put_u16(*self, &mut b16); + bytes.extend_from_slice(&b16); + } + + fn read(r: &mut Reader) -> Option { + r.take(2).and_then(decode_u16) + } +} + +// Make a distinct type for u24, even though it's a u32 underneath +#[allow(non_camel_case_types)] +#[derive(Debug, Copy, Clone)] +pub struct u24(pub u32); + +impl u24 { + pub fn decode(bytes: &[u8]) -> Option { + let [a, b, c]: [u8; 3] = bytes.try_into().ok()?; + Some(Self(u32::from_be_bytes([0, a, b, c]))) + } +} + +#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] +impl From for usize { + #[inline] + fn from(v: u24) -> Self { + v.0 as Self + } +} + +impl Codec for u24 { + fn encode(&self, bytes: &mut Vec) { + let be_bytes = u32::to_be_bytes(self.0); + bytes.extend_from_slice(&be_bytes[1..]) + } + + fn read(r: &mut Reader) -> Option { + r.take(3).and_then(Self::decode) + } +} + +pub fn decode_u32(bytes: &[u8]) -> Option { + Some(u32::from_be_bytes(bytes.try_into().ok()?)) +} + +impl Codec for u32 { + fn encode(&self, bytes: &mut Vec) { + bytes.extend(Self::to_be_bytes(*self)) + } + + fn read(r: &mut Reader) -> Option { + r.take(4).and_then(decode_u32) + } +} + +pub fn put_u64(v: u64, bytes: &mut [u8]) { + let bytes: &mut [u8; 8] = (&mut bytes[..8]).try_into().unwrap(); + *bytes = u64::to_be_bytes(v) +} + +pub fn decode_u64(bytes: &[u8]) -> Option { + Some(u64::from_be_bytes(bytes.try_into().ok()?)) +} + +impl Codec for u64 { + fn encode(&self, bytes: &mut Vec) { + let mut b64 = [0u8; 8]; + put_u64(*self, &mut b64); + bytes.extend_from_slice(&b64); + } + + fn read(r: &mut Reader) -> Option { + r.take(8).and_then(decode_u64) + } +} + +pub fn encode_vec_u8(bytes: &mut Vec, items: &[T]) { + let len_offset = bytes.len(); + bytes.push(0); + + for i in items { + i.encode(bytes); + } + + let len = bytes.len() - len_offset - 1; + debug_assert!(len <= 0xff); + bytes[len_offset] = len as u8; +} + +pub fn encode_vec_u16(bytes: &mut Vec, items: &[T]) { + let len_offset = bytes.len(); + bytes.extend([0, 0]); + + for i in items { + i.encode(bytes); + } + + let len = bytes.len() - len_offset - 2; + debug_assert!(len <= 0xffff); + let out: &mut [u8; 2] = (&mut bytes[len_offset..len_offset + 2]) + .try_into() + .unwrap(); + *out = u16::to_be_bytes(len as u16); +} + +pub fn encode_vec_u24(bytes: &mut Vec, items: &[T]) { + let len_offset = bytes.len(); + bytes.extend([0, 0, 0]); + + for i in items { + i.encode(bytes); + } + + let len = bytes.len() - len_offset - 3; + debug_assert!(len <= 0xff_ffff); + let len_bytes = u32::to_be_bytes(len as u32); + let out: &mut [u8; 3] = (&mut bytes[len_offset..len_offset + 3]) + .try_into() + .unwrap(); + out.copy_from_slice(&len_bytes[1..]); +} + +pub fn read_vec_u8(r: &mut Reader) -> Option> { + let mut ret: Vec = Vec::new(); + let len = usize::from(u8::read(r)?); + let mut sub = r.sub(len)?; + + while sub.any_left() { + ret.push(T::read(&mut sub)?); + } + + Some(ret) +} + +pub fn read_vec_u16(r: &mut Reader) -> Option> { + let mut ret: Vec = Vec::new(); + let len = usize::from(u16::read(r)?); + let mut sub = r.sub(len)?; + + while sub.any_left() { + ret.push(T::read(&mut sub)?); + } + + Some(ret) +} + +pub fn read_vec_u24_limited(r: &mut Reader, max_bytes: usize) -> Option> { + let mut ret: Vec = Vec::new(); + let len = u24::read(r)?.0 as usize; + if len > max_bytes { + return None; + } + + let mut sub = r.sub(len)?; + + while sub.any_left() { + ret.push(T::read(&mut sub)?); + } + + Some(ret) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/deframer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/deframer.rs new file mode 100644 index 0000000000000000000000000000000000000000..2ada2d81b077ad7faa8e2a92a8638cc449c37661 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/deframer.rs @@ -0,0 +1,425 @@ +use std::collections::VecDeque; +use std::io; + +use crate::error::Error; +use crate::msgs::codec; +use crate::msgs::message::{MessageError, OpaqueMessage}; + +/// This deframer works to reconstruct TLS messages +/// from arbitrary-sized reads, buffering as necessary. +/// The input is `read()`, get the output from `pop()`. +pub struct MessageDeframer { + /// Completed frames for output. + frames: VecDeque, + + /// Set to true if the peer is not talking TLS, but some other + /// protocol. The caller should abort the connection, because + /// the deframer cannot recover. + desynced: bool, + + /// A fixed-size buffer containing the currently-accumulating + /// TLS message. + buf: Box<[u8; OpaqueMessage::MAX_WIRE_SIZE]>, + + /// What size prefix of `buf` is used. + used: usize, +} + +impl Default for MessageDeframer { + fn default() -> Self { + Self::new() + } +} + +impl MessageDeframer { + pub fn new() -> Self { + Self { + frames: VecDeque::new(), + desynced: false, + buf: Box::new([0u8; OpaqueMessage::MAX_WIRE_SIZE]), + used: 0, + } + } + + /// Return any complete messages that the deframer has been able to parse. + /// + /// Returns an `Error` if the deframer failed to parse some message contents, + /// `Ok(None)` if no full message is buffered, and `Ok(Some(_))` if a valid message was found. + pub fn pop(&mut self) -> Result, Error> { + if self.desynced { + return Err(Error::CorruptMessage); + } else if let Some(msg) = self.frames.pop_front() { + return Ok(Some(msg)); + } + + let mut taken = 0; + loop { + // Does our `buf` contain a full message? It does if it is big enough to + // contain a header, and that header has a length which falls within `buf`. + // If so, deframe it and place the message onto the frames output queue. + let mut rd = codec::Reader::init(&self.buf[taken..self.used]); + let m = match OpaqueMessage::read(&mut rd) { + Ok(m) => m, + Err(MessageError::TooShortForHeader | MessageError::TooShortForLength) => break, + Err(_) => { + self.desynced = true; + return Err(Error::CorruptMessage); + } + }; + + taken += rd.used(); + self.frames.push_back(m); + } + + #[allow(clippy::comparison_chain)] + if taken < self.used { + /* Before: + * +----------+----------+----------+ + * | taken | pending |xxxxxxxxxx| + * +----------+----------+----------+ + * 0 ^ taken ^ self.used + * + * After: + * +----------+----------+----------+ + * | pending |xxxxxxxxxxxxxxxxxxxxx| + * +----------+----------+----------+ + * 0 ^ self.used + */ + + self.buf + .copy_within(taken..self.used, 0); + self.used -= taken; + } else if taken == self.used { + self.used = 0; + } + + Ok(self.frames.pop_front()) + } + + /// Read some bytes from `rd`, and add them to our internal buffer. + #[allow(clippy::comparison_chain)] + pub fn read(&mut self, rd: &mut dyn io::Read) -> io::Result { + if self.used == OpaqueMessage::MAX_WIRE_SIZE { + return Err(io::Error::new(io::ErrorKind::Other, "message buffer full")); + } + + // Try to do the largest reads possible. Note that if + // we get a message with a length field out of range here, + // we do a zero length read. That looks like an EOF to + // the next layer up, which is fine. + debug_assert!(self.used <= OpaqueMessage::MAX_WIRE_SIZE); + let new_bytes = rd.read(&mut self.buf[self.used..])?; + self.used += new_bytes; + Ok(new_bytes) + } + + /// Returns true if we have messages for the caller + /// to process, either whole messages in our output + /// queue or partial messages in our buffer. + pub fn has_pending(&self) -> bool { + !self.frames.is_empty() || self.used > 0 + } +} + +#[cfg(test)] +mod tests { + use super::MessageDeframer; + use crate::msgs::message::{Message, OpaqueMessage}; + use crate::{msgs, Error}; + use std::convert::TryFrom; + use std::io; + + const FIRST_MESSAGE: &[u8] = include_bytes!("../testdata/deframer-test.1.bin"); + const SECOND_MESSAGE: &[u8] = include_bytes!("../testdata/deframer-test.2.bin"); + + const EMPTY_APPLICATIONDATA_MESSAGE: &[u8] = + include_bytes!("../testdata/deframer-empty-applicationdata.bin"); + + const INVALID_EMPTY_MESSAGE: &[u8] = include_bytes!("../testdata/deframer-invalid-empty.bin"); + const INVALID_CONTENTTYPE_MESSAGE: &[u8] = + include_bytes!("../testdata/deframer-invalid-contenttype.bin"); + const INVALID_VERSION_MESSAGE: &[u8] = + include_bytes!("../testdata/deframer-invalid-version.bin"); + const INVALID_LENGTH_MESSAGE: &[u8] = include_bytes!("../testdata/deframer-invalid-length.bin"); + + struct ByteRead<'a> { + buf: &'a [u8], + offs: usize, + } + + impl<'a> ByteRead<'a> { + fn new(bytes: &'a [u8]) -> Self { + ByteRead { + buf: bytes, + offs: 0, + } + } + } + + impl<'a> io::Read for ByteRead<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let mut len = 0; + + while len < buf.len() && len < self.buf.len() - self.offs { + buf[len] = self.buf[self.offs + len]; + len += 1; + } + + self.offs += len; + + Ok(len) + } + } + + fn input_bytes(d: &mut MessageDeframer, bytes: &[u8]) -> io::Result { + let mut rd = ByteRead::new(bytes); + d.read(&mut rd) + } + + fn input_bytes_concat( + d: &mut MessageDeframer, + bytes1: &[u8], + bytes2: &[u8], + ) -> io::Result { + let mut bytes = vec![0u8; bytes1.len() + bytes2.len()]; + bytes[..bytes1.len()].clone_from_slice(bytes1); + bytes[bytes1.len()..].clone_from_slice(bytes2); + let mut rd = ByteRead::new(&bytes); + d.read(&mut rd) + } + + struct ErrorRead { + error: Option, + } + + impl ErrorRead { + fn new(error: io::Error) -> Self { + Self { error: Some(error) } + } + } + + impl io::Read for ErrorRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + for (i, b) in buf.iter_mut().enumerate() { + *b = i as u8; + } + + let error = self.error.take().unwrap(); + Err(error) + } + } + + fn input_error(d: &mut MessageDeframer) { + let error = io::Error::from(io::ErrorKind::TimedOut); + let mut rd = ErrorRead::new(error); + d.read(&mut rd) + .expect_err("error not propagated"); + } + + fn input_whole_incremental(d: &mut MessageDeframer, bytes: &[u8]) { + let before = d.used; + + for i in 0..bytes.len() { + assert_len(1, input_bytes(d, &bytes[i..i + 1])); + assert!(d.has_pending()); + } + + assert_eq!(before + bytes.len(), d.used); + } + + fn assert_len(want: usize, got: io::Result) { + if let Ok(gotval) = got { + assert_eq!(gotval, want); + } else { + panic!("read failed, expected {:?} bytes", want); + } + } + + fn pop_first(d: &mut MessageDeframer) { + let m = d.pop().unwrap().unwrap(); + assert_eq!(m.typ, msgs::enums::ContentType::Handshake); + Message::try_from(m.into_plain_message()).unwrap(); + } + + fn pop_second(d: &mut MessageDeframer) { + let m = d.pop().unwrap().unwrap(); + assert_eq!(m.typ, msgs::enums::ContentType::Alert); + Message::try_from(m.into_plain_message()).unwrap(); + } + + #[test] + fn check_incremental() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + input_whole_incremental(&mut d, FIRST_MESSAGE); + assert!(d.has_pending()); + assert_eq!(0, d.frames.len()); + pop_first(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn check_incremental_2() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + input_whole_incremental(&mut d, FIRST_MESSAGE); + assert!(d.has_pending()); + input_whole_incremental(&mut d, SECOND_MESSAGE); + assert!(d.has_pending()); + assert_eq!(0, d.frames.len()); + pop_first(&mut d); + assert!(d.has_pending()); + assert_eq!(1, d.frames.len()); + pop_second(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn check_whole() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + assert_len(FIRST_MESSAGE.len(), input_bytes(&mut d, FIRST_MESSAGE)); + assert!(d.has_pending()); + assert_eq!(d.frames.len(), 0); + pop_first(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn check_whole_2() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + assert_len(FIRST_MESSAGE.len(), input_bytes(&mut d, FIRST_MESSAGE)); + assert_len(SECOND_MESSAGE.len(), input_bytes(&mut d, SECOND_MESSAGE)); + assert_eq!(d.frames.len(), 0); + pop_first(&mut d); + assert_eq!(d.frames.len(), 1); + pop_second(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn test_two_in_one_read() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + assert_len( + FIRST_MESSAGE.len() + SECOND_MESSAGE.len(), + input_bytes_concat(&mut d, FIRST_MESSAGE, SECOND_MESSAGE), + ); + assert_eq!(d.frames.len(), 0); + pop_first(&mut d); + assert_eq!(d.frames.len(), 1); + pop_second(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn test_two_in_one_read_shortest_first() { + let mut d = MessageDeframer::new(); + assert!(!d.has_pending()); + assert_len( + FIRST_MESSAGE.len() + SECOND_MESSAGE.len(), + input_bytes_concat(&mut d, SECOND_MESSAGE, FIRST_MESSAGE), + ); + assert_eq!(d.frames.len(), 0); + pop_second(&mut d); + assert_eq!(d.frames.len(), 1); + pop_first(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn test_incremental_with_nonfatal_read_error() { + let mut d = MessageDeframer::new(); + assert_len(3, input_bytes(&mut d, &FIRST_MESSAGE[..3])); + input_error(&mut d); + assert_len( + FIRST_MESSAGE.len() - 3, + input_bytes(&mut d, &FIRST_MESSAGE[3..]), + ); + assert_eq!(d.frames.len(), 0); + pop_first(&mut d); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn test_invalid_contenttype_errors() { + let mut d = MessageDeframer::new(); + assert_len( + INVALID_CONTENTTYPE_MESSAGE.len(), + input_bytes(&mut d, INVALID_CONTENTTYPE_MESSAGE), + ); + assert_eq!(d.pop().unwrap_err(), Error::CorruptMessage); + } + + #[test] + fn test_invalid_version_errors() { + let mut d = MessageDeframer::new(); + assert_len( + INVALID_VERSION_MESSAGE.len(), + input_bytes(&mut d, INVALID_VERSION_MESSAGE), + ); + assert_eq!(d.pop().unwrap_err(), Error::CorruptMessage); + } + + #[test] + fn test_invalid_length_errors() { + let mut d = MessageDeframer::new(); + assert_len( + INVALID_LENGTH_MESSAGE.len(), + input_bytes(&mut d, INVALID_LENGTH_MESSAGE), + ); + assert_eq!(d.pop().unwrap_err(), Error::CorruptMessage); + } + + #[test] + fn test_empty_applicationdata() { + let mut d = MessageDeframer::new(); + assert_len( + EMPTY_APPLICATIONDATA_MESSAGE.len(), + input_bytes(&mut d, EMPTY_APPLICATIONDATA_MESSAGE), + ); + let m = d.pop().unwrap().unwrap(); + assert_eq!(m.typ, msgs::enums::ContentType::ApplicationData); + assert_eq!(m.payload.0.len(), 0); + assert!(!d.has_pending()); + assert!(!d.desynced); + } + + #[test] + fn test_invalid_empty_errors() { + let mut d = MessageDeframer::new(); + assert_len( + INVALID_EMPTY_MESSAGE.len(), + input_bytes(&mut d, INVALID_EMPTY_MESSAGE), + ); + assert_eq!(d.pop().unwrap_err(), Error::CorruptMessage); + // CorruptMessage has been fused + assert_eq!(d.pop().unwrap_err(), Error::CorruptMessage); + } + + #[test] + fn test_limited_buffer() { + const PAYLOAD_LEN: usize = 16_384; + let mut message = Vec::with_capacity(8192); + message.push(0x17); // ApplicationData + message.extend(&[0x03, 0x04]); // ProtocolVersion + message.extend((PAYLOAD_LEN as u16).to_be_bytes()); // payload length + message.extend(&[0; PAYLOAD_LEN]); + + let mut d = MessageDeframer::new(); + assert_len(message.len(), input_bytes(&mut d, &message)); + assert_len( + OpaqueMessage::MAX_WIRE_SIZE - 16_389, + input_bytes(&mut d, &message), + ); + assert!(input_bytes(&mut d, &message).is_err()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums.rs new file mode 100644 index 0000000000000000000000000000000000000000..333db73319831893f7e4d86b50476acfa7396c9a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums.rs @@ -0,0 +1,375 @@ +#![allow(clippy::upper_case_acronyms)] +#![allow(non_camel_case_types)] +/// This file is autogenerated. See https://github.com/ctz/tls-hacking/ +use crate::msgs::codec::{Codec, Reader}; + +enum_builder! { + /// The `HashAlgorithm` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: HashAlgorithm; + EnumVal{ + NONE => 0x00, + MD5 => 0x01, + SHA1 => 0x02, + SHA224 => 0x03, + SHA256 => 0x04, + SHA384 => 0x05, + SHA512 => 0x06 + } +} + +enum_builder! { + /// The `SignatureAlgorithm` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: SignatureAlgorithm; + EnumVal{ + Anonymous => 0x00, + RSA => 0x01, + DSA => 0x02, + ECDSA => 0x03, + ED25519 => 0x07, + ED448 => 0x08 + } +} + +enum_builder! { + /// The `ClientCertificateType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: ClientCertificateType; + EnumVal{ + RSASign => 0x01, + DSSSign => 0x02, + RSAFixedDH => 0x03, + DSSFixedDH => 0x04, + RSAEphemeralDH => 0x05, + DSSEphemeralDH => 0x06, + FortezzaDMS => 0x14, + ECDSASign => 0x40, + RSAFixedECDH => 0x41, + ECDSAFixedECDH => 0x42 + } +} + +enum_builder! { + /// The `Compression` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: Compression; + EnumVal{ + Null => 0x00, + Deflate => 0x01, + LSZ => 0x40 + } +} + +enum_builder! { + /// The `ContentType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: ContentType; + EnumVal{ + ChangeCipherSpec => 0x14, + Alert => 0x15, + Handshake => 0x16, + ApplicationData => 0x17, + Heartbeat => 0x18 + } +} + +enum_builder! { + /// The `HandshakeType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: HandshakeType; + EnumVal{ + HelloRequest => 0x00, + ClientHello => 0x01, + ServerHello => 0x02, + HelloVerifyRequest => 0x03, + NewSessionTicket => 0x04, + EndOfEarlyData => 0x05, + HelloRetryRequest => 0x06, + EncryptedExtensions => 0x08, + Certificate => 0x0b, + ServerKeyExchange => 0x0c, + CertificateRequest => 0x0d, + ServerHelloDone => 0x0e, + CertificateVerify => 0x0f, + ClientKeyExchange => 0x10, + Finished => 0x14, + CertificateURL => 0x15, + CertificateStatus => 0x16, + KeyUpdate => 0x18, + MessageHash => 0xfe + } +} + +enum_builder! { + /// The `AlertLevel` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: AlertLevel; + EnumVal{ + Warning => 0x01, + Fatal => 0x02 + } +} + +enum_builder! { + /// The `AlertDescription` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: AlertDescription; + EnumVal{ + CloseNotify => 0x00, + UnexpectedMessage => 0x0a, + BadRecordMac => 0x14, + DecryptionFailed => 0x15, + RecordOverflow => 0x16, + DecompressionFailure => 0x1e, + HandshakeFailure => 0x28, + NoCertificate => 0x29, + BadCertificate => 0x2a, + UnsupportedCertificate => 0x2b, + CertificateRevoked => 0x2c, + CertificateExpired => 0x2d, + CertificateUnknown => 0x2e, + IllegalParameter => 0x2f, + UnknownCA => 0x30, + AccessDenied => 0x31, + DecodeError => 0x32, + DecryptError => 0x33, + ExportRestriction => 0x3c, + ProtocolVersion => 0x46, + InsufficientSecurity => 0x47, + InternalError => 0x50, + InappropriateFallback => 0x56, + UserCanceled => 0x5a, + NoRenegotiation => 0x64, + MissingExtension => 0x6d, + UnsupportedExtension => 0x6e, + CertificateUnobtainable => 0x6f, + UnrecognisedName => 0x70, + BadCertificateStatusResponse => 0x71, + BadCertificateHashValue => 0x72, + UnknownPSKIdentity => 0x73, + CertificateRequired => 0x74, + NoApplicationProtocol => 0x78 + } +} + +enum_builder! { + /// The `HeartbeatMessageType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: HeartbeatMessageType; + EnumVal{ + Request => 0x01, + Response => 0x02 + } +} + +enum_builder! { + /// The `ExtensionType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: ExtensionType; + EnumVal{ + ServerName => 0x0000, + MaxFragmentLength => 0x0001, + ClientCertificateUrl => 0x0002, + TrustedCAKeys => 0x0003, + TruncatedHMAC => 0x0004, + StatusRequest => 0x0005, + UserMapping => 0x0006, + ClientAuthz => 0x0007, + ServerAuthz => 0x0008, + CertificateType => 0x0009, + EllipticCurves => 0x000a, + ECPointFormats => 0x000b, + SRP => 0x000c, + SignatureAlgorithms => 0x000d, + UseSRTP => 0x000e, + Heartbeat => 0x000f, + ALProtocolNegotiation => 0x0010, + SCT => 0x0012, + Padding => 0x0015, + ExtendedMasterSecret => 0x0017, + SessionTicket => 0x0023, + PreSharedKey => 0x0029, + EarlyData => 0x002a, + SupportedVersions => 0x002b, + Cookie => 0x002c, + PSKKeyExchangeModes => 0x002d, + TicketEarlyDataInfo => 0x002e, + CertificateAuthorities => 0x002f, + OIDFilters => 0x0030, + PostHandshakeAuth => 0x0031, + SignatureAlgorithmsCert => 0x0032, + KeyShare => 0x0033, + TransportParameters => 0x0039, + NextProtocolNegotiation => 0x3374, + ChannelId => 0x754f, + RenegotiationInfo => 0xff01, + TransportParametersDraft => 0xffa5 + } +} + +enum_builder! { + /// The `ServerNameType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: ServerNameType; + EnumVal{ + HostName => 0x00 + } +} + +enum_builder! { + /// The `NamedCurve` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: NamedCurve; + EnumVal{ + sect163k1 => 0x0001, + sect163r1 => 0x0002, + sect163r2 => 0x0003, + sect193r1 => 0x0004, + sect193r2 => 0x0005, + sect233k1 => 0x0006, + sect233r1 => 0x0007, + sect239k1 => 0x0008, + sect283k1 => 0x0009, + sect283r1 => 0x000a, + sect409k1 => 0x000b, + sect409r1 => 0x000c, + sect571k1 => 0x000d, + sect571r1 => 0x000e, + secp160k1 => 0x000f, + secp160r1 => 0x0010, + secp160r2 => 0x0011, + secp192k1 => 0x0012, + secp192r1 => 0x0013, + secp224k1 => 0x0014, + secp224r1 => 0x0015, + secp256k1 => 0x0016, + secp256r1 => 0x0017, + secp384r1 => 0x0018, + secp521r1 => 0x0019, + brainpoolp256r1 => 0x001a, + brainpoolp384r1 => 0x001b, + brainpoolp512r1 => 0x001c, + X25519 => 0x001d, + X448 => 0x001e, + arbitrary_explicit_prime_curves => 0xff01, + arbitrary_explicit_char2_curves => 0xff02 + } +} + +enum_builder! { + /// The `NamedGroup` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U16 + EnumName: NamedGroup; + EnumVal{ + secp256r1 => 0x0017, + secp384r1 => 0x0018, + secp521r1 => 0x0019, + X25519 => 0x001d, + X448 => 0x001e, + FFDHE2048 => 0x0100, + FFDHE3072 => 0x0101, + FFDHE4096 => 0x0102, + FFDHE6144 => 0x0103, + FFDHE8192 => 0x0104 + } +} + +enum_builder! { + /// The `ECPointFormat` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: ECPointFormat; + EnumVal{ + Uncompressed => 0x00, + ANSIX962CompressedPrime => 0x01, + ANSIX962CompressedChar2 => 0x02 + } +} + +enum_builder! { + /// The `HeartbeatMode` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: HeartbeatMode; + EnumVal{ + PeerAllowedToSend => 0x01, + PeerNotAllowedToSend => 0x02 + } +} + +enum_builder! { + /// The `ECCurveType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: ECCurveType; + EnumVal{ + ExplicitPrime => 0x01, + ExplicitChar2 => 0x02, + NamedCurve => 0x03 + } +} + +enum_builder! { + /// The `PSKKeyExchangeMode` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: PSKKeyExchangeMode; + EnumVal{ + PSK_KE => 0x00, + PSK_DHE_KE => 0x01 + } +} + +enum_builder! { + /// The `KeyUpdateRequest` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: KeyUpdateRequest; + EnumVal{ + UpdateNotRequested => 0x00, + UpdateRequested => 0x01 + } +} + +enum_builder! { + /// The `CertificateStatusType` TLS protocol enum. Values in this enum are taken + /// from the various RFCs covering TLS, and are listed by IANA. + /// The `Unknown` item is used when processing unrecognised ordinals. + @U8 + EnumName: CertificateStatusType; + EnumVal{ + OCSP => 0x01 + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums_test.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums_test.rs new file mode 100644 index 0000000000000000000000000000000000000000..220b5aa513f91b39ba2ab2e22054c5758d56afac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/enums_test.rs @@ -0,0 +1,88 @@ +/// These tests are intended to provide coverage and +/// check panic-safety of relatively unused values. +use super::codec::Codec; +use super::enums::*; + +fn get8(enum_value: &T) -> u8 { + let enc = enum_value.get_encoding(); + assert_eq!(enc.len(), 1); + enc[0] +} + +fn get16(enum_value: &T) -> u16 { + let enc = enum_value.get_encoding(); + assert_eq!(enc.len(), 2); + (enc[0] as u16 >> 8) | (enc[1] as u16) +} + +fn test_enum16(first: T, last: T) { + let first_v = get16(&first); + let last_v = get16(&last); + + for val in first_v..last_v + 1 { + let mut buf = Vec::new(); + val.encode(&mut buf); + assert_eq!(buf.len(), 2); + + let t = T::read_bytes(&buf).unwrap(); + assert_eq!(val, get16(&t)); + } +} + +fn test_enum8(first: T, last: T) { + let first_v = get8(&first); + let last_v = get8(&last); + + for val in first_v..last_v + 1 { + let mut buf = Vec::new(); + val.encode(&mut buf); + assert_eq!(buf.len(), 1); + + let t = T::read_bytes(&buf).unwrap(); + assert_eq!(val, get8(&t)); + } +} + +#[test] +fn test_enums() { + test_enum8::(HashAlgorithm::NONE, HashAlgorithm::SHA512); + test_enum8::(SignatureAlgorithm::Anonymous, SignatureAlgorithm::ECDSA); + test_enum8::( + ClientCertificateType::RSASign, + ClientCertificateType::ECDSAFixedECDH, + ); + test_enum8::(Compression::Null, Compression::LSZ); + test_enum8::(ContentType::ChangeCipherSpec, ContentType::Heartbeat); + test_enum8::(HandshakeType::HelloRequest, HandshakeType::MessageHash); + test_enum8::(AlertLevel::Warning, AlertLevel::Fatal); + test_enum8::( + AlertDescription::CloseNotify, + AlertDescription::NoApplicationProtocol, + ); + test_enum8::( + HeartbeatMessageType::Request, + HeartbeatMessageType::Response, + ); + test_enum16::(ExtensionType::ServerName, ExtensionType::RenegotiationInfo); + test_enum8::(ServerNameType::HostName, ServerNameType::HostName); + test_enum16::( + NamedCurve::sect163k1, + NamedCurve::arbitrary_explicit_char2_curves, + ); + test_enum16::(NamedGroup::secp256r1, NamedGroup::FFDHE8192); + test_enum8::( + ECPointFormat::Uncompressed, + ECPointFormat::ANSIX962CompressedChar2, + ); + test_enum8::( + HeartbeatMode::PeerAllowedToSend, + HeartbeatMode::PeerNotAllowedToSend, + ); + test_enum8::(ECCurveType::ExplicitPrime, ECCurveType::NamedCurve); + test_enum8::(PSKKeyExchangeMode::PSK_KE, PSKKeyExchangeMode::PSK_DHE_KE); + test_enum8::( + KeyUpdateRequest::UpdateNotRequested, + KeyUpdateRequest::UpdateRequested, + ); + test_enum8::(CertificateStatusType::OCSP, CertificateStatusType::OCSP); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/fragmenter.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/fragmenter.rs new file mode 100644 index 0000000000000000000000000000000000000000..b649bcfd3290ef04d48d240be8dd69ea0dfa85bc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/fragmenter.rs @@ -0,0 +1,162 @@ +use crate::enums::ProtocolVersion; +use crate::msgs::enums::ContentType; +use crate::msgs::message::{BorrowedPlainMessage, PlainMessage}; +use crate::Error; +pub const MAX_FRAGMENT_LEN: usize = 16384; +pub const PACKET_OVERHEAD: usize = 1 + 2 + 2; +pub const MAX_FRAGMENT_SIZE: usize = MAX_FRAGMENT_LEN + PACKET_OVERHEAD; + +pub struct MessageFragmenter { + max_frag: usize, +} + +impl Default for MessageFragmenter { + fn default() -> Self { + Self { + max_frag: MAX_FRAGMENT_LEN, + } + } +} + +impl MessageFragmenter { + /// Take the Message `msg` and re-fragment it into new + /// messages whose fragment is no more than max_frag. + /// Return an iterator across those messages. + /// Payloads are borrowed. + pub fn fragment_message<'a>( + &self, + msg: &'a PlainMessage, + ) -> impl Iterator> + 'a { + self.fragment_slice(msg.typ, msg.version, &msg.payload.0) + } + + /// Enqueue borrowed fragments of (version, typ, payload) which + /// are no longer than max_frag onto the `out` deque. + pub fn fragment_slice<'a>( + &self, + typ: ContentType, + version: ProtocolVersion, + payload: &'a [u8], + ) -> impl Iterator> + 'a { + payload + .chunks(self.max_frag) + .map(move |c| BorrowedPlainMessage { + typ, + version, + payload: c, + }) + } + + /// Set the maximum fragment size that will be produced. + /// + /// This includes overhead. A `max_fragment_size` of 10 will produce TLS fragments + /// up to 10 bytes long. + /// + /// A `max_fragment_size` of `None` sets the highest allowable fragment size. + /// + /// Returns BadMaxFragmentSize if the size is smaller than 32 or larger than 16389. + pub fn set_max_fragment_size(&mut self, max_fragment_size: Option) -> Result<(), Error> { + self.max_frag = match max_fragment_size { + Some(sz @ 32..=MAX_FRAGMENT_SIZE) => sz - PACKET_OVERHEAD, + None => MAX_FRAGMENT_LEN, + _ => return Err(Error::BadMaxFragmentSize), + }; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::{MessageFragmenter, PACKET_OVERHEAD}; + use crate::enums::ProtocolVersion; + use crate::msgs::base::Payload; + use crate::msgs::enums::ContentType; + use crate::msgs::message::{BorrowedPlainMessage, PlainMessage}; + + fn msg_eq( + m: &BorrowedPlainMessage, + total_len: usize, + typ: &ContentType, + version: &ProtocolVersion, + bytes: &[u8], + ) { + assert_eq!(&m.typ, typ); + assert_eq!(&m.version, version); + assert_eq!(m.payload, bytes); + + let buf = m.to_unencrypted_opaque().encode(); + + assert_eq!(total_len, buf.len()); + } + + #[test] + fn smoke() { + let typ = ContentType::Handshake; + let version = ProtocolVersion::TLSv1_2; + let data: Vec = (1..70u8).collect(); + let m = PlainMessage { + typ, + version, + payload: Payload::new(data), + }; + + let mut frag = MessageFragmenter::default(); + frag.set_max_fragment_size(Some(32)) + .unwrap(); + let q = frag + .fragment_message(&m) + .collect::>(); + assert_eq!(q.len(), 3); + msg_eq( + &q[0], + 32, + &typ, + &version, + &[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, + ], + ); + msg_eq( + &q[1], + 32, + &typ, + &version, + &[ + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, + ], + ); + msg_eq( + &q[2], + 20, + &typ, + &version, + &[55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69], + ); + } + + #[test] + fn non_fragment() { + let m = PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x01\x02\x03\x04\x05\x06\x07\x08".to_vec()), + }; + + let mut frag = MessageFragmenter::default(); + frag.set_max_fragment_size(Some(32)) + .unwrap(); + let q = frag + .fragment_message(&m) + .collect::>(); + assert_eq!(q.len(), 1); + msg_eq( + &q[0], + PACKET_OVERHEAD + 8, + &ContentType::Handshake, + &ProtocolVersion::TLSv1_2, + b"\x01\x02\x03\x04\x05\x06\x07\x08", + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake.rs new file mode 100644 index 0000000000000000000000000000000000000000..43bd791f1557882ebb9d738e57083d1037a1832c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake.rs @@ -0,0 +1,2371 @@ +#![allow(non_camel_case_types)] +use crate::enums::{CipherSuite, ProtocolVersion, SignatureScheme}; +use crate::key; +use crate::msgs::base::{Payload, PayloadU16, PayloadU24, PayloadU8}; +use crate::msgs::codec; +use crate::msgs::codec::{Codec, Reader}; +use crate::msgs::enums::{ + CertificateStatusType, ClientCertificateType, Compression, ECCurveType, ECPointFormat, + ExtensionType, HandshakeType, HashAlgorithm, KeyUpdateRequest, NamedGroup, PSKKeyExchangeMode, + ServerNameType, SignatureAlgorithm, +}; +use crate::rand; + +#[cfg(feature = "logging")] +use crate::log::warn; + +use std::collections; +use std::fmt; + +macro_rules! declare_u8_vec( + ($name:ident, $itemtype:ty) => { + pub type $name = Vec<$itemtype>; + + impl Codec for $name { + fn encode(&self, bytes: &mut Vec) { + codec::encode_vec_u8(bytes, self); + } + + fn read(r: &mut Reader) -> Option { + codec::read_vec_u8::<$itemtype>(r) + } + } + } +); + +macro_rules! declare_u16_vec( + ($name:ident, $itemtype:ty) => { + pub type $name = Vec<$itemtype>; + + impl Codec for $name { + fn encode(&self, bytes: &mut Vec) { + codec::encode_vec_u16(bytes, self); + } + + fn read(r: &mut Reader) -> Option { + codec::read_vec_u16::<$itemtype>(r) + } + } + } +); + +declare_u16_vec!(VecU16OfPayloadU8, PayloadU8); +declare_u16_vec!(VecU16OfPayloadU16, PayloadU16); + +#[derive(Clone, Copy, Eq, PartialEq)] +pub struct Random(pub [u8; 32]); + +impl fmt::Debug for Random { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + super::base::hex(f, &self.0) + } +} + +static HELLO_RETRY_REQUEST_RANDOM: Random = Random([ + 0xcf, 0x21, 0xad, 0x74, 0xe5, 0x9a, 0x61, 0x11, 0xbe, 0x1d, 0x8c, 0x02, 0x1e, 0x65, 0xb8, 0x91, + 0xc2, 0xa2, 0x11, 0x16, 0x7a, 0xbb, 0x8c, 0x5e, 0x07, 0x9e, 0x09, 0xe2, 0xc8, 0xa8, 0x33, 0x9c, +]); + +static ZERO_RANDOM: Random = Random([0u8; 32]); + +impl Codec for Random { + fn encode(&self, bytes: &mut Vec) { + bytes.extend_from_slice(&self.0); + } + + fn read(r: &mut Reader) -> Option { + let bytes = r.take(32)?; + let mut opaque = [0; 32]; + opaque.clone_from_slice(bytes); + + Some(Self(opaque)) + } +} + +impl Random { + pub fn new() -> Result { + let mut data = [0u8; 32]; + rand::fill_random(&mut data)?; + Ok(Self(data)) + } + + pub fn write_slice(&self, bytes: &mut [u8]) { + let buf = self.get_encoding(); + bytes.copy_from_slice(&buf); + } +} + +impl From<[u8; 32]> for Random { + #[inline] + fn from(bytes: [u8; 32]) -> Self { + Self(bytes) + } +} + +#[derive(Copy, Clone)] +pub struct SessionID { + len: usize, + data: [u8; 32], +} + +impl fmt::Debug for SessionID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + super::base::hex(f, &self.data[..self.len]) + } +} + +impl PartialEq for SessionID { + fn eq(&self, other: &Self) -> bool { + if self.len != other.len { + return false; + } + + let mut diff = 0u8; + for i in 0..self.len { + diff |= self.data[i] ^ other.data[i] + } + + diff == 0u8 + } +} + +impl Codec for SessionID { + fn encode(&self, bytes: &mut Vec) { + debug_assert!(self.len <= 32); + bytes.push(self.len as u8); + bytes.extend_from_slice(&self.data[..self.len]); + } + + fn read(r: &mut Reader) -> Option { + let len = u8::read(r)? as usize; + if len > 32 { + return None; + } + + let bytes = r.take(len)?; + let mut out = [0u8; 32]; + out[..len].clone_from_slice(&bytes[..len]); + + Some(Self { data: out, len }) + } +} + +impl SessionID { + pub fn random() -> Result { + let mut data = [0u8; 32]; + rand::fill_random(&mut data)?; + Ok(Self { data, len: 32 }) + } + + pub fn empty() -> Self { + Self { + data: [0u8; 32], + len: 0, + } + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } +} + +#[derive(Clone, Debug)] +pub struct UnknownExtension { + pub typ: ExtensionType, + pub payload: Payload, +} + +impl UnknownExtension { + fn encode(&self, bytes: &mut Vec) { + self.payload.encode(bytes); + } + + fn read(typ: ExtensionType, r: &mut Reader) -> Self { + let payload = Payload::read(r); + Self { typ, payload } + } +} + +declare_u8_vec!(ECPointFormatList, ECPointFormat); + +pub trait SupportedPointFormats { + fn supported() -> ECPointFormatList; +} + +impl SupportedPointFormats for ECPointFormatList { + fn supported() -> ECPointFormatList { + vec![ECPointFormat::Uncompressed] + } +} + +declare_u16_vec!(NamedGroups, NamedGroup); + +declare_u16_vec!(SupportedSignatureSchemes, SignatureScheme); + +pub trait DecomposedSignatureScheme { + fn sign(&self) -> SignatureAlgorithm; + fn make(alg: SignatureAlgorithm, hash: HashAlgorithm) -> SignatureScheme; +} + +impl DecomposedSignatureScheme for SignatureScheme { + fn sign(&self) -> SignatureAlgorithm { + match *self { + Self::RSA_PKCS1_SHA1 + | Self::RSA_PKCS1_SHA256 + | Self::RSA_PKCS1_SHA384 + | Self::RSA_PKCS1_SHA512 + | Self::RSA_PSS_SHA256 + | Self::RSA_PSS_SHA384 + | Self::RSA_PSS_SHA512 => SignatureAlgorithm::RSA, + Self::ECDSA_NISTP256_SHA256 + | Self::ECDSA_NISTP384_SHA384 + | Self::ECDSA_NISTP521_SHA512 => SignatureAlgorithm::ECDSA, + _ => SignatureAlgorithm::Unknown(0), + } + } + + fn make(alg: SignatureAlgorithm, hash: HashAlgorithm) -> SignatureScheme { + use crate::msgs::enums::HashAlgorithm::{SHA1, SHA256, SHA384, SHA512}; + use crate::msgs::enums::SignatureAlgorithm::{ECDSA, RSA}; + + match (alg, hash) { + (RSA, SHA1) => Self::RSA_PKCS1_SHA1, + (RSA, SHA256) => Self::RSA_PKCS1_SHA256, + (RSA, SHA384) => Self::RSA_PKCS1_SHA384, + (RSA, SHA512) => Self::RSA_PKCS1_SHA512, + (ECDSA, SHA256) => Self::ECDSA_NISTP256_SHA256, + (ECDSA, SHA384) => Self::ECDSA_NISTP384_SHA384, + (ECDSA, SHA512) => Self::ECDSA_NISTP521_SHA512, + (_, _) => unreachable!(), + } + } +} + +#[derive(Clone, Debug)] +pub enum ServerNamePayload { + // Stored twice, bytes so we can round-trip, and DnsName for use + HostName((PayloadU16, webpki::DnsName)), + Unknown(Payload), +} + +impl ServerNamePayload { + pub fn new_hostname(hostname: webpki::DnsName) -> Self { + let raw = { + let s: &str = hostname.as_ref().into(); + PayloadU16::new(s.as_bytes().into()) + }; + Self::HostName((raw, hostname)) + } + + fn read_hostname(r: &mut Reader) -> Option { + let raw = PayloadU16::read(r)?; + + let dns_name = { + match webpki::DnsNameRef::try_from_ascii(&raw.0) { + Ok(dns_name) => dns_name.into(), + Err(_) => { + warn!("Illegal SNI hostname received {:?}", raw.0); + return None; + } + } + }; + Some(Self::HostName((raw, dns_name))) + } + + fn encode(&self, bytes: &mut Vec) { + match *self { + Self::HostName((ref r, _)) => r.encode(bytes), + Self::Unknown(ref r) => r.encode(bytes), + } + } +} + +#[derive(Clone, Debug)] +pub struct ServerName { + pub typ: ServerNameType, + pub payload: ServerNamePayload, +} + +impl Codec for ServerName { + fn encode(&self, bytes: &mut Vec) { + self.typ.encode(bytes); + self.payload.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let typ = ServerNameType::read(r)?; + + let payload = match typ { + ServerNameType::HostName => ServerNamePayload::read_hostname(r)?, + _ => ServerNamePayload::Unknown(Payload::read(r)), + }; + + Some(Self { typ, payload }) + } +} + +declare_u16_vec!(ServerNameRequest, ServerName); + +pub trait ConvertServerNameList { + fn has_duplicate_names_for_type(&self) -> bool; + fn get_single_hostname(&self) -> Option; +} + +impl ConvertServerNameList for ServerNameRequest { + /// RFC6066: "The ServerNameList MUST NOT contain more than one name of the same name_type." + fn has_duplicate_names_for_type(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for name in self { + if !seen.insert(name.typ.get_u8()) { + return true; + } + } + + false + } + + fn get_single_hostname(&self) -> Option { + fn only_dns_hostnames(name: &ServerName) -> Option { + if let ServerNamePayload::HostName((_, ref dns)) = name.payload { + Some(dns.as_ref()) + } else { + None + } + } + + self.iter() + .filter_map(only_dns_hostnames) + .next() + } +} + +pub type ProtocolNameList = VecU16OfPayloadU8; + +pub trait ConvertProtocolNameList { + fn from_slices(names: &[&[u8]]) -> Self; + fn to_slices(&self) -> Vec<&[u8]>; + fn as_single_slice(&self) -> Option<&[u8]>; +} + +impl ConvertProtocolNameList for ProtocolNameList { + fn from_slices(names: &[&[u8]]) -> Self { + let mut ret = Self::new(); + + for name in names { + ret.push(PayloadU8::new(name.to_vec())); + } + + ret + } + + fn to_slices(&self) -> Vec<&[u8]> { + self.iter() + .map(|proto| -> &[u8] { &proto.0 }) + .collect::>() + } + + fn as_single_slice(&self) -> Option<&[u8]> { + if self.len() == 1 { + Some(&self[0].0) + } else { + None + } + } +} + +// --- TLS 1.3 Key shares --- +#[derive(Clone, Debug)] +pub struct KeyShareEntry { + pub group: NamedGroup, + pub payload: PayloadU16, +} + +impl KeyShareEntry { + pub fn new(group: NamedGroup, payload: &[u8]) -> Self { + Self { + group, + payload: PayloadU16::new(payload.to_vec()), + } + } +} + +impl Codec for KeyShareEntry { + fn encode(&self, bytes: &mut Vec) { + self.group.encode(bytes); + self.payload.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let group = NamedGroup::read(r)?; + let payload = PayloadU16::read(r)?; + + Some(Self { group, payload }) + } +} + +// --- TLS 1.3 PresharedKey offers --- +#[derive(Clone, Debug)] +pub struct PresharedKeyIdentity { + pub identity: PayloadU16, + pub obfuscated_ticket_age: u32, +} + +impl PresharedKeyIdentity { + pub fn new(id: Vec, age: u32) -> Self { + Self { + identity: PayloadU16::new(id), + obfuscated_ticket_age: age, + } + } +} + +impl Codec for PresharedKeyIdentity { + fn encode(&self, bytes: &mut Vec) { + self.identity.encode(bytes); + self.obfuscated_ticket_age.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + Some(Self { + identity: PayloadU16::read(r)?, + obfuscated_ticket_age: u32::read(r)?, + }) + } +} + +declare_u16_vec!(PresharedKeyIdentities, PresharedKeyIdentity); +pub type PresharedKeyBinder = PayloadU8; +pub type PresharedKeyBinders = VecU16OfPayloadU8; + +#[derive(Clone, Debug)] +pub struct PresharedKeyOffer { + pub identities: PresharedKeyIdentities, + pub binders: PresharedKeyBinders, +} + +impl PresharedKeyOffer { + /// Make a new one with one entry. + pub fn new(id: PresharedKeyIdentity, binder: Vec) -> Self { + Self { + identities: vec![id], + binders: vec![PresharedKeyBinder::new(binder)], + } + } +} + +impl Codec for PresharedKeyOffer { + fn encode(&self, bytes: &mut Vec) { + self.identities.encode(bytes); + self.binders.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + Some(Self { + identities: PresharedKeyIdentities::read(r)?, + binders: PresharedKeyBinders::read(r)?, + }) + } +} + +// --- RFC6066 certificate status request --- +type ResponderIDs = VecU16OfPayloadU16; + +#[derive(Clone, Debug)] +pub struct OCSPCertificateStatusRequest { + pub responder_ids: ResponderIDs, + pub extensions: PayloadU16, +} + +impl Codec for OCSPCertificateStatusRequest { + fn encode(&self, bytes: &mut Vec) { + CertificateStatusType::OCSP.encode(bytes); + self.responder_ids.encode(bytes); + self.extensions.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + Some(Self { + responder_ids: ResponderIDs::read(r)?, + extensions: PayloadU16::read(r)?, + }) + } +} + +#[derive(Clone, Debug)] +pub enum CertificateStatusRequest { + OCSP(OCSPCertificateStatusRequest), + Unknown((CertificateStatusType, Payload)), +} + +impl Codec for CertificateStatusRequest { + fn encode(&self, bytes: &mut Vec) { + match self { + Self::OCSP(ref r) => r.encode(bytes), + Self::Unknown((typ, payload)) => { + typ.encode(bytes); + payload.encode(bytes); + } + } + } + + fn read(r: &mut Reader) -> Option { + let typ = CertificateStatusType::read(r)?; + + match typ { + CertificateStatusType::OCSP => { + let ocsp_req = OCSPCertificateStatusRequest::read(r)?; + Some(Self::OCSP(ocsp_req)) + } + _ => { + let data = Payload::read(r); + Some(Self::Unknown((typ, data))) + } + } + } +} + +impl CertificateStatusRequest { + pub fn build_ocsp() -> Self { + let ocsp = OCSPCertificateStatusRequest { + responder_ids: ResponderIDs::new(), + extensions: PayloadU16::empty(), + }; + Self::OCSP(ocsp) + } +} + +// --- +// SCTs + +pub type SCTList = VecU16OfPayloadU16; + +// --- + +declare_u8_vec!(PSKKeyExchangeModes, PSKKeyExchangeMode); +declare_u16_vec!(KeyShareEntries, KeyShareEntry); +declare_u8_vec!(ProtocolVersions, ProtocolVersion); + +#[derive(Clone, Debug)] +pub enum ClientExtension { + ECPointFormats(ECPointFormatList), + NamedGroups(NamedGroups), + SignatureAlgorithms(SupportedSignatureSchemes), + ServerName(ServerNameRequest), + SessionTicket(ClientSessionTicket), + Protocols(ProtocolNameList), + SupportedVersions(ProtocolVersions), + KeyShare(KeyShareEntries), + PresharedKeyModes(PSKKeyExchangeModes), + PresharedKey(PresharedKeyOffer), + Cookie(PayloadU16), + ExtendedMasterSecretRequest, + CertificateStatusRequest(CertificateStatusRequest), + SignedCertificateTimestampRequest, + TransportParameters(Vec), + TransportParametersDraft(Vec), + EarlyData, + Unknown(UnknownExtension), +} + +impl ClientExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::ECPointFormats(_) => ExtensionType::ECPointFormats, + Self::NamedGroups(_) => ExtensionType::EllipticCurves, + Self::SignatureAlgorithms(_) => ExtensionType::SignatureAlgorithms, + Self::ServerName(_) => ExtensionType::ServerName, + Self::SessionTicket(_) => ExtensionType::SessionTicket, + Self::Protocols(_) => ExtensionType::ALProtocolNegotiation, + Self::SupportedVersions(_) => ExtensionType::SupportedVersions, + Self::KeyShare(_) => ExtensionType::KeyShare, + Self::PresharedKeyModes(_) => ExtensionType::PSKKeyExchangeModes, + Self::PresharedKey(_) => ExtensionType::PreSharedKey, + Self::Cookie(_) => ExtensionType::Cookie, + Self::ExtendedMasterSecretRequest => ExtensionType::ExtendedMasterSecret, + Self::CertificateStatusRequest(_) => ExtensionType::StatusRequest, + Self::SignedCertificateTimestampRequest => ExtensionType::SCT, + Self::TransportParameters(_) => ExtensionType::TransportParameters, + Self::TransportParametersDraft(_) => ExtensionType::TransportParametersDraft, + Self::EarlyData => ExtensionType::EarlyData, + Self::Unknown(ref r) => r.typ, + } + } +} + +impl Codec for ClientExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::ECPointFormats(ref r) => r.encode(&mut sub), + Self::NamedGroups(ref r) => r.encode(&mut sub), + Self::SignatureAlgorithms(ref r) => r.encode(&mut sub), + Self::ServerName(ref r) => r.encode(&mut sub), + Self::SessionTicket(ClientSessionTicket::Request) + | Self::ExtendedMasterSecretRequest + | Self::SignedCertificateTimestampRequest + | Self::EarlyData => {} + Self::SessionTicket(ClientSessionTicket::Offer(ref r)) => r.encode(&mut sub), + Self::Protocols(ref r) => r.encode(&mut sub), + Self::SupportedVersions(ref r) => r.encode(&mut sub), + Self::KeyShare(ref r) => r.encode(&mut sub), + Self::PresharedKeyModes(ref r) => r.encode(&mut sub), + Self::PresharedKey(ref r) => r.encode(&mut sub), + Self::Cookie(ref r) => r.encode(&mut sub), + Self::CertificateStatusRequest(ref r) => r.encode(&mut sub), + Self::TransportParameters(ref r) | Self::TransportParametersDraft(ref r) => { + sub.extend_from_slice(r) + } + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::ECPointFormats => { + Self::ECPointFormats(ECPointFormatList::read(&mut sub)?) + } + ExtensionType::EllipticCurves => Self::NamedGroups(NamedGroups::read(&mut sub)?), + ExtensionType::SignatureAlgorithms => { + let schemes = SupportedSignatureSchemes::read(&mut sub)?; + Self::SignatureAlgorithms(schemes) + } + ExtensionType::ServerName => Self::ServerName(ServerNameRequest::read(&mut sub)?), + ExtensionType::SessionTicket => { + if sub.any_left() { + let contents = Payload::read(&mut sub); + Self::SessionTicket(ClientSessionTicket::Offer(contents)) + } else { + Self::SessionTicket(ClientSessionTicket::Request) + } + } + ExtensionType::ALProtocolNegotiation => { + Self::Protocols(ProtocolNameList::read(&mut sub)?) + } + ExtensionType::SupportedVersions => { + Self::SupportedVersions(ProtocolVersions::read(&mut sub)?) + } + ExtensionType::KeyShare => Self::KeyShare(KeyShareEntries::read(&mut sub)?), + ExtensionType::PSKKeyExchangeModes => { + Self::PresharedKeyModes(PSKKeyExchangeModes::read(&mut sub)?) + } + ExtensionType::PreSharedKey => Self::PresharedKey(PresharedKeyOffer::read(&mut sub)?), + ExtensionType::Cookie => Self::Cookie(PayloadU16::read(&mut sub)?), + ExtensionType::ExtendedMasterSecret if !sub.any_left() => { + Self::ExtendedMasterSecretRequest + } + ExtensionType::StatusRequest => { + let csr = CertificateStatusRequest::read(&mut sub)?; + Self::CertificateStatusRequest(csr) + } + ExtensionType::SCT if !sub.any_left() => Self::SignedCertificateTimestampRequest, + ExtensionType::TransportParameters => Self::TransportParameters(sub.rest().to_vec()), + ExtensionType::TransportParametersDraft => { + Self::TransportParametersDraft(sub.rest().to_vec()) + } + ExtensionType::EarlyData if !sub.any_left() => Self::EarlyData, + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +fn trim_hostname_trailing_dot_for_sni(dns_name: webpki::DnsNameRef) -> webpki::DnsName { + let dns_name_str: &str = dns_name.into(); + + // RFC6066: "The hostname is represented as a byte string using + // ASCII encoding without a trailing dot" + if dns_name_str.ends_with('.') { + let trimmed = &dns_name_str[0..dns_name_str.len() - 1]; + webpki::DnsNameRef::try_from_ascii_str(trimmed) + .unwrap() + .to_owned() + } else { + dns_name.to_owned() + } +} + +impl ClientExtension { + /// Make a basic SNI ServerNameRequest quoting `hostname`. + pub fn make_sni(dns_name: webpki::DnsNameRef) -> Self { + let name = ServerName { + typ: ServerNameType::HostName, + payload: ServerNamePayload::new_hostname(trim_hostname_trailing_dot_for_sni(dns_name)), + }; + + Self::ServerName(vec![name]) + } +} + +#[derive(Clone, Debug)] +pub enum ClientSessionTicket { + Request, + Offer(Payload), +} + +#[derive(Clone, Debug)] +pub enum ServerExtension { + ECPointFormats(ECPointFormatList), + ServerNameAck, + SessionTicketAck, + RenegotiationInfo(PayloadU8), + Protocols(ProtocolNameList), + KeyShare(KeyShareEntry), + PresharedKey(u16), + ExtendedMasterSecretAck, + CertificateStatusAck, + SignedCertificateTimestamp(SCTList), + SupportedVersions(ProtocolVersion), + TransportParameters(Vec), + TransportParametersDraft(Vec), + EarlyData, + Unknown(UnknownExtension), +} + +impl ServerExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::ECPointFormats(_) => ExtensionType::ECPointFormats, + Self::ServerNameAck => ExtensionType::ServerName, + Self::SessionTicketAck => ExtensionType::SessionTicket, + Self::RenegotiationInfo(_) => ExtensionType::RenegotiationInfo, + Self::Protocols(_) => ExtensionType::ALProtocolNegotiation, + Self::KeyShare(_) => ExtensionType::KeyShare, + Self::PresharedKey(_) => ExtensionType::PreSharedKey, + Self::ExtendedMasterSecretAck => ExtensionType::ExtendedMasterSecret, + Self::CertificateStatusAck => ExtensionType::StatusRequest, + Self::SignedCertificateTimestamp(_) => ExtensionType::SCT, + Self::SupportedVersions(_) => ExtensionType::SupportedVersions, + Self::TransportParameters(_) => ExtensionType::TransportParameters, + Self::TransportParametersDraft(_) => ExtensionType::TransportParametersDraft, + Self::EarlyData => ExtensionType::EarlyData, + Self::Unknown(ref r) => r.typ, + } + } +} + +impl Codec for ServerExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::ECPointFormats(ref r) => r.encode(&mut sub), + Self::ServerNameAck + | Self::SessionTicketAck + | Self::ExtendedMasterSecretAck + | Self::CertificateStatusAck + | Self::EarlyData => {} + Self::RenegotiationInfo(ref r) => r.encode(&mut sub), + Self::Protocols(ref r) => r.encode(&mut sub), + Self::KeyShare(ref r) => r.encode(&mut sub), + Self::PresharedKey(r) => r.encode(&mut sub), + Self::SignedCertificateTimestamp(ref r) => r.encode(&mut sub), + Self::SupportedVersions(ref r) => r.encode(&mut sub), + Self::TransportParameters(ref r) | Self::TransportParametersDraft(ref r) => { + sub.extend_from_slice(r) + } + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::ECPointFormats => { + Self::ECPointFormats(ECPointFormatList::read(&mut sub)?) + } + ExtensionType::ServerName => Self::ServerNameAck, + ExtensionType::SessionTicket => Self::SessionTicketAck, + ExtensionType::StatusRequest => Self::CertificateStatusAck, + ExtensionType::RenegotiationInfo => Self::RenegotiationInfo(PayloadU8::read(&mut sub)?), + ExtensionType::ALProtocolNegotiation => { + Self::Protocols(ProtocolNameList::read(&mut sub)?) + } + ExtensionType::KeyShare => Self::KeyShare(KeyShareEntry::read(&mut sub)?), + ExtensionType::PreSharedKey => Self::PresharedKey(u16::read(&mut sub)?), + ExtensionType::ExtendedMasterSecret => Self::ExtendedMasterSecretAck, + ExtensionType::SCT => { + let scts = SCTList::read(&mut sub)?; + Self::SignedCertificateTimestamp(scts) + } + ExtensionType::SupportedVersions => { + Self::SupportedVersions(ProtocolVersion::read(&mut sub)?) + } + ExtensionType::TransportParameters => Self::TransportParameters(sub.rest().to_vec()), + ExtensionType::TransportParametersDraft => { + Self::TransportParametersDraft(sub.rest().to_vec()) + } + ExtensionType::EarlyData => Self::EarlyData, + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +impl ServerExtension { + pub fn make_alpn(proto: &[&[u8]]) -> Self { + Self::Protocols(ProtocolNameList::from_slices(proto)) + } + + pub fn make_empty_renegotiation_info() -> Self { + let empty = Vec::new(); + Self::RenegotiationInfo(PayloadU8::new(empty)) + } + + pub fn make_sct(sctl: Vec) -> Self { + let scts = SCTList::read_bytes(&sctl).expect("invalid SCT list"); + Self::SignedCertificateTimestamp(scts) + } +} + +#[derive(Debug)] +pub struct ClientHelloPayload { + pub client_version: ProtocolVersion, + pub random: Random, + pub session_id: SessionID, + pub cipher_suites: Vec, + pub compression_methods: Vec, + pub extensions: Vec, +} + +impl Codec for ClientHelloPayload { + fn encode(&self, bytes: &mut Vec) { + self.client_version.encode(bytes); + self.random.encode(bytes); + self.session_id.encode(bytes); + codec::encode_vec_u16(bytes, &self.cipher_suites); + codec::encode_vec_u8(bytes, &self.compression_methods); + + if !self.extensions.is_empty() { + codec::encode_vec_u16(bytes, &self.extensions); + } + } + + fn read(r: &mut Reader) -> Option { + let mut ret = Self { + client_version: ProtocolVersion::read(r)?, + random: Random::read(r)?, + session_id: SessionID::read(r)?, + cipher_suites: codec::read_vec_u16::(r)?, + compression_methods: codec::read_vec_u8::(r)?, + extensions: Vec::new(), + }; + + if r.any_left() { + ret.extensions = codec::read_vec_u16::(r)?; + } + + if r.any_left() || ret.extensions.is_empty() { + None + } else { + Some(ret) + } + } +} + +impl ClientHelloPayload { + /// Returns true if there is more than one extension of a given + /// type. + pub fn has_duplicate_extension(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for ext in &self.extensions { + let typ = ext.get_type().get_u16(); + + if seen.contains(&typ) { + return true; + } + seen.insert(typ); + } + + false + } + + pub fn find_extension(&self, ext: ExtensionType) -> Option<&ClientExtension> { + self.extensions + .iter() + .find(|x| x.get_type() == ext) + } + + pub fn get_sni_extension(&self) -> Option<&ServerNameRequest> { + let ext = self.find_extension(ExtensionType::ServerName)?; + match *ext { + ClientExtension::ServerName(ref req) => Some(req), + _ => None, + } + } + + pub fn get_sigalgs_extension(&self) -> Option<&SupportedSignatureSchemes> { + let ext = self.find_extension(ExtensionType::SignatureAlgorithms)?; + match *ext { + ClientExtension::SignatureAlgorithms(ref req) => Some(req), + _ => None, + } + } + + pub fn get_namedgroups_extension(&self) -> Option<&NamedGroups> { + let ext = self.find_extension(ExtensionType::EllipticCurves)?; + match *ext { + ClientExtension::NamedGroups(ref req) => Some(req), + _ => None, + } + } + + pub fn get_ecpoints_extension(&self) -> Option<&ECPointFormatList> { + let ext = self.find_extension(ExtensionType::ECPointFormats)?; + match *ext { + ClientExtension::ECPointFormats(ref req) => Some(req), + _ => None, + } + } + + pub fn get_alpn_extension(&self) -> Option<&ProtocolNameList> { + let ext = self.find_extension(ExtensionType::ALProtocolNegotiation)?; + match *ext { + ClientExtension::Protocols(ref req) => Some(req), + _ => None, + } + } + + pub fn get_quic_params_extension(&self) -> Option> { + let ext = self + .find_extension(ExtensionType::TransportParameters) + .or_else(|| self.find_extension(ExtensionType::TransportParametersDraft))?; + match *ext { + ClientExtension::TransportParameters(ref bytes) + | ClientExtension::TransportParametersDraft(ref bytes) => Some(bytes.to_vec()), + _ => None, + } + } + + pub fn get_ticket_extension(&self) -> Option<&ClientExtension> { + self.find_extension(ExtensionType::SessionTicket) + } + + pub fn get_versions_extension(&self) -> Option<&ProtocolVersions> { + let ext = self.find_extension(ExtensionType::SupportedVersions)?; + match *ext { + ClientExtension::SupportedVersions(ref vers) => Some(vers), + _ => None, + } + } + + pub fn get_keyshare_extension(&self) -> Option<&KeyShareEntries> { + let ext = self.find_extension(ExtensionType::KeyShare)?; + match *ext { + ClientExtension::KeyShare(ref shares) => Some(shares), + _ => None, + } + } + + pub fn has_keyshare_extension_with_duplicates(&self) -> bool { + if let Some(entries) = self.get_keyshare_extension() { + let mut seen = collections::HashSet::new(); + + for kse in entries { + let grp = kse.group.get_u16(); + + if !seen.insert(grp) { + return true; + } + } + } + + false + } + + pub fn get_psk(&self) -> Option<&PresharedKeyOffer> { + let ext = self.find_extension(ExtensionType::PreSharedKey)?; + match *ext { + ClientExtension::PresharedKey(ref psk) => Some(psk), + _ => None, + } + } + + pub fn check_psk_ext_is_last(&self) -> bool { + self.extensions + .last() + .map_or(false, |ext| ext.get_type() == ExtensionType::PreSharedKey) + } + + pub fn get_psk_modes(&self) -> Option<&PSKKeyExchangeModes> { + let ext = self.find_extension(ExtensionType::PSKKeyExchangeModes)?; + match *ext { + ClientExtension::PresharedKeyModes(ref psk_modes) => Some(psk_modes), + _ => None, + } + } + + pub fn psk_mode_offered(&self, mode: PSKKeyExchangeMode) -> bool { + self.get_psk_modes() + .map(|modes| modes.contains(&mode)) + .unwrap_or(false) + } + + pub fn set_psk_binder(&mut self, binder: impl Into>) { + let last_extension = self.extensions.last_mut(); + if let Some(ClientExtension::PresharedKey(ref mut offer)) = last_extension { + offer.binders[0] = PresharedKeyBinder::new(binder.into()); + } + } + + pub fn ems_support_offered(&self) -> bool { + self.find_extension(ExtensionType::ExtendedMasterSecret) + .is_some() + } + + pub fn early_data_extension_offered(&self) -> bool { + self.find_extension(ExtensionType::EarlyData) + .is_some() + } +} + +#[derive(Debug)] +pub enum HelloRetryExtension { + KeyShare(NamedGroup), + Cookie(PayloadU16), + SupportedVersions(ProtocolVersion), + Unknown(UnknownExtension), +} + +impl HelloRetryExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::KeyShare(_) => ExtensionType::KeyShare, + Self::Cookie(_) => ExtensionType::Cookie, + Self::SupportedVersions(_) => ExtensionType::SupportedVersions, + Self::Unknown(ref r) => r.typ, + } + } +} + +impl Codec for HelloRetryExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::KeyShare(ref r) => r.encode(&mut sub), + Self::Cookie(ref r) => r.encode(&mut sub), + Self::SupportedVersions(ref r) => r.encode(&mut sub), + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::KeyShare => Self::KeyShare(NamedGroup::read(&mut sub)?), + ExtensionType::Cookie => Self::Cookie(PayloadU16::read(&mut sub)?), + ExtensionType::SupportedVersions => { + Self::SupportedVersions(ProtocolVersion::read(&mut sub)?) + } + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +#[derive(Debug)] +pub struct HelloRetryRequest { + pub legacy_version: ProtocolVersion, + pub session_id: SessionID, + pub cipher_suite: CipherSuite, + pub extensions: Vec, +} + +impl Codec for HelloRetryRequest { + fn encode(&self, bytes: &mut Vec) { + self.legacy_version.encode(bytes); + HELLO_RETRY_REQUEST_RANDOM.encode(bytes); + self.session_id.encode(bytes); + self.cipher_suite.encode(bytes); + Compression::Null.encode(bytes); + codec::encode_vec_u16(bytes, &self.extensions); + } + + fn read(r: &mut Reader) -> Option { + let session_id = SessionID::read(r)?; + let cipher_suite = CipherSuite::read(r)?; + let compression = Compression::read(r)?; + + if compression != Compression::Null { + return None; + } + + Some(Self { + legacy_version: ProtocolVersion::Unknown(0), + session_id, + cipher_suite, + extensions: codec::read_vec_u16::(r)?, + }) + } +} + +impl HelloRetryRequest { + /// Returns true if there is more than one extension of a given + /// type. + pub fn has_duplicate_extension(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for ext in &self.extensions { + let typ = ext.get_type().get_u16(); + + if seen.contains(&typ) { + return true; + } + seen.insert(typ); + } + + false + } + + pub fn has_unknown_extension(&self) -> bool { + self.extensions.iter().any(|ext| { + ext.get_type() != ExtensionType::KeyShare + && ext.get_type() != ExtensionType::SupportedVersions + && ext.get_type() != ExtensionType::Cookie + }) + } + + fn find_extension(&self, ext: ExtensionType) -> Option<&HelloRetryExtension> { + self.extensions + .iter() + .find(|x| x.get_type() == ext) + } + + pub fn get_requested_key_share_group(&self) -> Option { + let ext = self.find_extension(ExtensionType::KeyShare)?; + match *ext { + HelloRetryExtension::KeyShare(grp) => Some(grp), + _ => None, + } + } + + pub fn get_cookie(&self) -> Option<&PayloadU16> { + let ext = self.find_extension(ExtensionType::Cookie)?; + match *ext { + HelloRetryExtension::Cookie(ref ck) => Some(ck), + _ => None, + } + } + + pub fn get_supported_versions(&self) -> Option { + let ext = self.find_extension(ExtensionType::SupportedVersions)?; + match *ext { + HelloRetryExtension::SupportedVersions(ver) => Some(ver), + _ => None, + } + } +} + +#[derive(Debug)] +pub struct ServerHelloPayload { + pub legacy_version: ProtocolVersion, + pub random: Random, + pub session_id: SessionID, + pub cipher_suite: CipherSuite, + pub compression_method: Compression, + pub extensions: Vec, +} + +impl Codec for ServerHelloPayload { + fn encode(&self, bytes: &mut Vec) { + self.legacy_version.encode(bytes); + self.random.encode(bytes); + + self.session_id.encode(bytes); + self.cipher_suite.encode(bytes); + self.compression_method.encode(bytes); + + if !self.extensions.is_empty() { + codec::encode_vec_u16(bytes, &self.extensions); + } + } + + // minus version and random, which have already been read. + fn read(r: &mut Reader) -> Option { + let session_id = SessionID::read(r)?; + let suite = CipherSuite::read(r)?; + let compression = Compression::read(r)?; + + // RFC5246: + // "The presence of extensions can be detected by determining whether + // there are bytes following the compression_method field at the end of + // the ServerHello." + let extensions = if r.any_left() { + codec::read_vec_u16::(r)? + } else { + vec![] + }; + + let ret = Self { + legacy_version: ProtocolVersion::Unknown(0), + random: ZERO_RANDOM, + session_id, + cipher_suite: suite, + compression_method: compression, + extensions, + }; + + if r.any_left() { + None + } else { + Some(ret) + } + } +} + +impl HasServerExtensions for ServerHelloPayload { + fn get_extensions(&self) -> &[ServerExtension] { + &self.extensions + } +} + +impl ServerHelloPayload { + pub fn get_key_share(&self) -> Option<&KeyShareEntry> { + let ext = self.find_extension(ExtensionType::KeyShare)?; + match *ext { + ServerExtension::KeyShare(ref share) => Some(share), + _ => None, + } + } + + pub fn get_psk_index(&self) -> Option { + let ext = self.find_extension(ExtensionType::PreSharedKey)?; + match *ext { + ServerExtension::PresharedKey(ref index) => Some(*index), + _ => None, + } + } + + pub fn get_ecpoints_extension(&self) -> Option<&ECPointFormatList> { + let ext = self.find_extension(ExtensionType::ECPointFormats)?; + match *ext { + ServerExtension::ECPointFormats(ref fmts) => Some(fmts), + _ => None, + } + } + + pub fn ems_support_acked(&self) -> bool { + self.find_extension(ExtensionType::ExtendedMasterSecret) + .is_some() + } + + pub fn get_sct_list(&self) -> Option<&SCTList> { + let ext = self.find_extension(ExtensionType::SCT)?; + match *ext { + ServerExtension::SignedCertificateTimestamp(ref sctl) => Some(sctl), + _ => None, + } + } + + pub fn get_supported_versions(&self) -> Option { + let ext = self.find_extension(ExtensionType::SupportedVersions)?; + match *ext { + ServerExtension::SupportedVersions(vers) => Some(vers), + _ => None, + } + } +} + +pub type CertificatePayload = Vec; + +impl Codec for CertificatePayload { + fn encode(&self, bytes: &mut Vec) { + codec::encode_vec_u24(bytes, self); + } + + fn read(r: &mut Reader) -> Option { + // 64KB of certificates is plenty, 16MB is obviously silly + codec::read_vec_u24_limited(r, 0x10000) + } +} + +// TLS1.3 changes the Certificate payload encoding. +// That's annoying. It means the parsing is not +// context-free any more. + +#[derive(Debug)] +pub enum CertificateExtension { + CertificateStatus(CertificateStatus), + SignedCertificateTimestamp(SCTList), + Unknown(UnknownExtension), +} + +impl CertificateExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::CertificateStatus(_) => ExtensionType::StatusRequest, + Self::SignedCertificateTimestamp(_) => ExtensionType::SCT, + Self::Unknown(ref r) => r.typ, + } + } + + pub fn make_sct(sct_list: Vec) -> Self { + let sctl = SCTList::read_bytes(&sct_list).expect("invalid SCT list"); + Self::SignedCertificateTimestamp(sctl) + } + + pub fn get_cert_status(&self) -> Option<&Vec> { + match *self { + Self::CertificateStatus(ref cs) => Some(&cs.ocsp_response.0), + _ => None, + } + } + + pub fn get_sct_list(&self) -> Option<&SCTList> { + match *self { + Self::SignedCertificateTimestamp(ref sctl) => Some(sctl), + _ => None, + } + } +} + +impl Codec for CertificateExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::CertificateStatus(ref r) => r.encode(&mut sub), + Self::SignedCertificateTimestamp(ref r) => r.encode(&mut sub), + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::StatusRequest => { + let st = CertificateStatus::read(&mut sub)?; + Self::CertificateStatus(st) + } + ExtensionType::SCT => { + let scts = SCTList::read(&mut sub)?; + Self::SignedCertificateTimestamp(scts) + } + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +declare_u16_vec!(CertificateExtensions, CertificateExtension); + +#[derive(Debug)] +pub struct CertificateEntry { + pub cert: key::Certificate, + pub exts: CertificateExtensions, +} + +impl Codec for CertificateEntry { + fn encode(&self, bytes: &mut Vec) { + self.cert.encode(bytes); + self.exts.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + Some(Self { + cert: key::Certificate::read(r)?, + exts: CertificateExtensions::read(r)?, + }) + } +} + +impl CertificateEntry { + pub fn new(cert: key::Certificate) -> Self { + Self { + cert, + exts: Vec::new(), + } + } + + pub fn has_duplicate_extension(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for ext in &self.exts { + let typ = ext.get_type().get_u16(); + + if seen.contains(&typ) { + return true; + } + seen.insert(typ); + } + + false + } + + pub fn has_unknown_extension(&self) -> bool { + self.exts.iter().any(|ext| { + ext.get_type() != ExtensionType::StatusRequest && ext.get_type() != ExtensionType::SCT + }) + } + + pub fn get_ocsp_response(&self) -> Option<&Vec> { + self.exts + .iter() + .find(|ext| ext.get_type() == ExtensionType::StatusRequest) + .and_then(CertificateExtension::get_cert_status) + } + + pub fn get_scts(&self) -> Option<&SCTList> { + self.exts + .iter() + .find(|ext| ext.get_type() == ExtensionType::SCT) + .and_then(CertificateExtension::get_sct_list) + } +} + +#[derive(Debug)] +pub struct CertificatePayloadTLS13 { + pub context: PayloadU8, + pub entries: Vec, +} + +impl Codec for CertificatePayloadTLS13 { + fn encode(&self, bytes: &mut Vec) { + self.context.encode(bytes); + codec::encode_vec_u24(bytes, &self.entries); + } + + fn read(r: &mut Reader) -> Option { + Some(Self { + context: PayloadU8::read(r)?, + entries: codec::read_vec_u24_limited::(r, 0x10000)?, + }) + } +} + +impl CertificatePayloadTLS13 { + pub fn new(entries: Vec) -> Self { + Self { + context: PayloadU8::empty(), + entries, + } + } + + pub fn any_entry_has_duplicate_extension(&self) -> bool { + for entry in &self.entries { + if entry.has_duplicate_extension() { + return true; + } + } + + false + } + + pub fn any_entry_has_unknown_extension(&self) -> bool { + for entry in &self.entries { + if entry.has_unknown_extension() { + return true; + } + } + + false + } + + pub fn any_entry_has_extension(&self) -> bool { + for entry in &self.entries { + if !entry.exts.is_empty() { + return true; + } + } + + false + } + + pub fn get_end_entity_ocsp(&self) -> Vec { + self.entries + .first() + .and_then(CertificateEntry::get_ocsp_response) + .cloned() + .unwrap_or_default() + } + + pub fn get_end_entity_scts(&self) -> Option { + self.entries + .first() + .and_then(CertificateEntry::get_scts) + .cloned() + } + + pub fn convert(&self) -> CertificatePayload { + let mut ret = Vec::new(); + for entry in &self.entries { + ret.push(entry.cert.clone()); + } + ret + } +} + +#[derive(Debug)] +pub enum KeyExchangeAlgorithm { + BulkOnly, + DH, + DHE, + RSA, + ECDH, + ECDHE, +} + +// We don't support arbitrary curves. It's a terrible +// idea and unnecessary attack surface. Please, +// get a grip. +#[derive(Debug)] +pub struct ECParameters { + pub curve_type: ECCurveType, + pub named_group: NamedGroup, +} + +impl Codec for ECParameters { + fn encode(&self, bytes: &mut Vec) { + self.curve_type.encode(bytes); + self.named_group.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let ct = ECCurveType::read(r)?; + + if ct != ECCurveType::NamedCurve { + return None; + } + + let grp = NamedGroup::read(r)?; + + Some(Self { + curve_type: ct, + named_group: grp, + }) + } +} + +#[derive(Debug, Clone)] +pub struct DigitallySignedStruct { + pub scheme: SignatureScheme, + #[deprecated(since = "0.20.7", note = "Use signature() accessor")] + pub sig: PayloadU16, +} + +impl DigitallySignedStruct { + #![allow(deprecated)] + pub fn new(scheme: SignatureScheme, sig: Vec) -> Self { + Self { + scheme, + sig: PayloadU16::new(sig), + } + } + + pub fn signature(&self) -> &[u8] { + &self.sig.0 + } +} + +impl Codec for DigitallySignedStruct { + #![allow(deprecated)] + fn encode(&self, bytes: &mut Vec) { + self.scheme.encode(bytes); + self.sig.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let scheme = SignatureScheme::read(r)?; + let sig = PayloadU16::read(r)?; + + Some(Self { scheme, sig }) + } +} + +#[derive(Debug)] +pub struct ClientECDHParams { + pub public: PayloadU8, +} + +impl Codec for ClientECDHParams { + fn encode(&self, bytes: &mut Vec) { + self.public.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let pb = PayloadU8::read(r)?; + Some(Self { public: pb }) + } +} + +#[derive(Debug)] +pub struct ServerECDHParams { + pub curve_params: ECParameters, + pub public: PayloadU8, +} + +impl ServerECDHParams { + pub fn new(named_group: NamedGroup, pubkey: &[u8]) -> Self { + Self { + curve_params: ECParameters { + curve_type: ECCurveType::NamedCurve, + named_group, + }, + public: PayloadU8::new(pubkey.to_vec()), + } + } +} + +impl Codec for ServerECDHParams { + fn encode(&self, bytes: &mut Vec) { + self.curve_params.encode(bytes); + self.public.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let cp = ECParameters::read(r)?; + let pb = PayloadU8::read(r)?; + + Some(Self { + curve_params: cp, + public: pb, + }) + } +} + +#[derive(Debug)] +pub struct ECDHEServerKeyExchange { + pub params: ServerECDHParams, + pub dss: DigitallySignedStruct, +} + +impl Codec for ECDHEServerKeyExchange { + fn encode(&self, bytes: &mut Vec) { + self.params.encode(bytes); + self.dss.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let params = ServerECDHParams::read(r)?; + let dss = DigitallySignedStruct::read(r)?; + + Some(Self { params, dss }) + } +} + +#[derive(Debug)] +pub enum ServerKeyExchangePayload { + ECDHE(ECDHEServerKeyExchange), + Unknown(Payload), +} + +impl Codec for ServerKeyExchangePayload { + fn encode(&self, bytes: &mut Vec) { + match *self { + Self::ECDHE(ref x) => x.encode(bytes), + Self::Unknown(ref x) => x.encode(bytes), + } + } + + fn read(r: &mut Reader) -> Option { + // read as Unknown, fully parse when we know the + // KeyExchangeAlgorithm + Some(Self::Unknown(Payload::read(r))) + } +} + +impl ServerKeyExchangePayload { + pub fn unwrap_given_kxa(&self, kxa: &KeyExchangeAlgorithm) -> Option { + if let Self::Unknown(ref unk) = *self { + let mut rd = Reader::init(&unk.0); + + let result = match *kxa { + KeyExchangeAlgorithm::ECDHE => ECDHEServerKeyExchange::read(&mut rd), + _ => None, + }; + + if !rd.any_left() { + return result; + }; + } + + None + } +} + +// -- EncryptedExtensions (TLS1.3 only) -- +declare_u16_vec!(EncryptedExtensions, ServerExtension); + +pub trait HasServerExtensions { + fn get_extensions(&self) -> &[ServerExtension]; + + /// Returns true if there is more than one extension of a given + /// type. + fn has_duplicate_extension(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for ext in self.get_extensions() { + let typ = ext.get_type().get_u16(); + + if seen.contains(&typ) { + return true; + } + seen.insert(typ); + } + + false + } + + fn find_extension(&self, ext: ExtensionType) -> Option<&ServerExtension> { + self.get_extensions() + .iter() + .find(|x| x.get_type() == ext) + } + + fn get_alpn_protocol(&self) -> Option<&[u8]> { + let ext = self.find_extension(ExtensionType::ALProtocolNegotiation)?; + match *ext { + ServerExtension::Protocols(ref protos) => protos.as_single_slice(), + _ => None, + } + } + + fn get_quic_params_extension(&self) -> Option> { + let ext = self + .find_extension(ExtensionType::TransportParameters) + .or_else(|| self.find_extension(ExtensionType::TransportParametersDraft))?; + match *ext { + ServerExtension::TransportParameters(ref bytes) + | ServerExtension::TransportParametersDraft(ref bytes) => Some(bytes.to_vec()), + _ => None, + } + } + + fn early_data_extension_offered(&self) -> bool { + self.find_extension(ExtensionType::EarlyData) + .is_some() + } +} + +impl HasServerExtensions for EncryptedExtensions { + fn get_extensions(&self) -> &[ServerExtension] { + self + } +} + +// -- CertificateRequest and sundries -- +declare_u8_vec!(ClientCertificateTypes, ClientCertificateType); +pub type DistinguishedName = PayloadU16; +/// DistinguishedNames is a `Vec>` wrapped in internal types. Each element contains the +/// DER or BER encoded [`Subject` field from RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) +/// for a single certificate. The Subject field is +/// [encoded as an RFC 5280 `Name`](https://datatracker.ietf.org/doc/html/rfc5280#page-116). +/// It can be decoded using [x509-parser's FromDer trait](https://docs.rs/x509-parser/latest/x509_parser/traits/trait.FromDer.html). +/// +/// ```ignore +/// for name in distinguished_names { +/// use x509_parser::traits::FromDer; +/// println!("{}", x509_parser::x509::X509Name::from_der(&name.0)?.1); +/// } +/// ``` +pub type DistinguishedNames = VecU16OfPayloadU16; + +#[derive(Debug)] +pub struct CertificateRequestPayload { + pub certtypes: ClientCertificateTypes, + pub sigschemes: SupportedSignatureSchemes, + pub canames: DistinguishedNames, +} + +impl Codec for CertificateRequestPayload { + fn encode(&self, bytes: &mut Vec) { + self.certtypes.encode(bytes); + self.sigschemes.encode(bytes); + self.canames.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let certtypes = ClientCertificateTypes::read(r)?; + let sigschemes = SupportedSignatureSchemes::read(r)?; + let canames = DistinguishedNames::read(r)?; + + if sigschemes.is_empty() { + warn!("meaningless CertificateRequest message"); + None + } else { + Some(Self { + certtypes, + sigschemes, + canames, + }) + } + } +} + +#[derive(Debug)] +pub enum CertReqExtension { + SignatureAlgorithms(SupportedSignatureSchemes), + AuthorityNames(DistinguishedNames), + Unknown(UnknownExtension), +} + +impl CertReqExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::SignatureAlgorithms(_) => ExtensionType::SignatureAlgorithms, + Self::AuthorityNames(_) => ExtensionType::CertificateAuthorities, + Self::Unknown(ref r) => r.typ, + } + } +} + +impl Codec for CertReqExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::SignatureAlgorithms(ref r) => r.encode(&mut sub), + Self::AuthorityNames(ref r) => r.encode(&mut sub), + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::SignatureAlgorithms => { + let schemes = SupportedSignatureSchemes::read(&mut sub)?; + if schemes.is_empty() { + return None; + } + Self::SignatureAlgorithms(schemes) + } + ExtensionType::CertificateAuthorities => { + let cas = DistinguishedNames::read(&mut sub)?; + Self::AuthorityNames(cas) + } + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +declare_u16_vec!(CertReqExtensions, CertReqExtension); + +#[derive(Debug)] +pub struct CertificateRequestPayloadTLS13 { + pub context: PayloadU8, + pub extensions: CertReqExtensions, +} + +impl Codec for CertificateRequestPayloadTLS13 { + fn encode(&self, bytes: &mut Vec) { + self.context.encode(bytes); + self.extensions.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let context = PayloadU8::read(r)?; + let extensions = CertReqExtensions::read(r)?; + + Some(Self { + context, + extensions, + }) + } +} + +impl CertificateRequestPayloadTLS13 { + pub fn find_extension(&self, ext: ExtensionType) -> Option<&CertReqExtension> { + self.extensions + .iter() + .find(|x| x.get_type() == ext) + } + + pub fn get_sigalgs_extension(&self) -> Option<&SupportedSignatureSchemes> { + let ext = self.find_extension(ExtensionType::SignatureAlgorithms)?; + match *ext { + CertReqExtension::SignatureAlgorithms(ref sa) => Some(sa), + _ => None, + } + } + + pub fn get_authorities_extension(&self) -> Option<&DistinguishedNames> { + let ext = self.find_extension(ExtensionType::CertificateAuthorities)?; + match *ext { + CertReqExtension::AuthorityNames(ref an) => Some(an), + _ => None, + } + } +} + +// -- NewSessionTicket -- +#[derive(Debug)] +pub struct NewSessionTicketPayload { + pub lifetime_hint: u32, + pub ticket: PayloadU16, +} + +impl NewSessionTicketPayload { + pub fn new(lifetime_hint: u32, ticket: Vec) -> Self { + Self { + lifetime_hint, + ticket: PayloadU16::new(ticket), + } + } +} + +impl Codec for NewSessionTicketPayload { + fn encode(&self, bytes: &mut Vec) { + self.lifetime_hint.encode(bytes); + self.ticket.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let lifetime = u32::read(r)?; + let ticket = PayloadU16::read(r)?; + + Some(Self { + lifetime_hint: lifetime, + ticket, + }) + } +} + +// -- NewSessionTicket electric boogaloo -- +#[derive(Debug)] +pub enum NewSessionTicketExtension { + EarlyData(u32), + Unknown(UnknownExtension), +} + +impl NewSessionTicketExtension { + pub fn get_type(&self) -> ExtensionType { + match *self { + Self::EarlyData(_) => ExtensionType::EarlyData, + Self::Unknown(ref r) => r.typ, + } + } +} + +impl Codec for NewSessionTicketExtension { + fn encode(&self, bytes: &mut Vec) { + self.get_type().encode(bytes); + + let mut sub: Vec = Vec::new(); + match *self { + Self::EarlyData(r) => r.encode(&mut sub), + Self::Unknown(ref r) => r.encode(&mut sub), + } + + (sub.len() as u16).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + let typ = ExtensionType::read(r)?; + let len = u16::read(r)? as usize; + let mut sub = r.sub(len)?; + + let ext = match typ { + ExtensionType::EarlyData => Self::EarlyData(u32::read(&mut sub)?), + _ => Self::Unknown(UnknownExtension::read(typ, &mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(ext) + } + } +} + +declare_u16_vec!(NewSessionTicketExtensions, NewSessionTicketExtension); + +#[derive(Debug)] +pub struct NewSessionTicketPayloadTLS13 { + pub lifetime: u32, + pub age_add: u32, + pub nonce: PayloadU8, + pub ticket: PayloadU16, + pub exts: NewSessionTicketExtensions, +} + +impl NewSessionTicketPayloadTLS13 { + pub fn new(lifetime: u32, age_add: u32, nonce: Vec, ticket: Vec) -> Self { + Self { + lifetime, + age_add, + nonce: PayloadU8::new(nonce), + ticket: PayloadU16::new(ticket), + exts: vec![], + } + } + + pub fn has_duplicate_extension(&self) -> bool { + let mut seen = collections::HashSet::new(); + + for ext in &self.exts { + let typ = ext.get_type().get_u16(); + + if seen.contains(&typ) { + return true; + } + seen.insert(typ); + } + + false + } + + pub fn find_extension(&self, ext: ExtensionType) -> Option<&NewSessionTicketExtension> { + self.exts + .iter() + .find(|x| x.get_type() == ext) + } + + pub fn get_max_early_data_size(&self) -> Option { + let ext = self.find_extension(ExtensionType::EarlyData)?; + match *ext { + NewSessionTicketExtension::EarlyData(ref sz) => Some(*sz), + _ => None, + } + } +} + +impl Codec for NewSessionTicketPayloadTLS13 { + fn encode(&self, bytes: &mut Vec) { + self.lifetime.encode(bytes); + self.age_add.encode(bytes); + self.nonce.encode(bytes); + self.ticket.encode(bytes); + self.exts.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let lifetime = u32::read(r)?; + let age_add = u32::read(r)?; + let nonce = PayloadU8::read(r)?; + let ticket = PayloadU16::read(r)?; + let exts = NewSessionTicketExtensions::read(r)?; + + Some(Self { + lifetime, + age_add, + nonce, + ticket, + exts, + }) + } +} + +// -- RFC6066 certificate status types + +/// Only supports OCSP +#[derive(Debug)] +pub struct CertificateStatus { + pub ocsp_response: PayloadU24, +} + +impl Codec for CertificateStatus { + fn encode(&self, bytes: &mut Vec) { + CertificateStatusType::OCSP.encode(bytes); + self.ocsp_response.encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let typ = CertificateStatusType::read(r)?; + + match typ { + CertificateStatusType::OCSP => Some(Self { + ocsp_response: PayloadU24::read(r)?, + }), + _ => None, + } + } +} + +impl CertificateStatus { + pub fn new(ocsp: Vec) -> Self { + Self { + ocsp_response: PayloadU24::new(ocsp), + } + } + + pub fn into_inner(self) -> Vec { + self.ocsp_response.0 + } +} + +#[derive(Debug)] +pub enum HandshakePayload { + HelloRequest, + ClientHello(ClientHelloPayload), + ServerHello(ServerHelloPayload), + HelloRetryRequest(HelloRetryRequest), + Certificate(CertificatePayload), + CertificateTLS13(CertificatePayloadTLS13), + ServerKeyExchange(ServerKeyExchangePayload), + CertificateRequest(CertificateRequestPayload), + CertificateRequestTLS13(CertificateRequestPayloadTLS13), + CertificateVerify(DigitallySignedStruct), + ServerHelloDone, + EndOfEarlyData, + ClientKeyExchange(Payload), + NewSessionTicket(NewSessionTicketPayload), + NewSessionTicketTLS13(NewSessionTicketPayloadTLS13), + EncryptedExtensions(EncryptedExtensions), + KeyUpdate(KeyUpdateRequest), + Finished(Payload), + CertificateStatus(CertificateStatus), + MessageHash(Payload), + Unknown(Payload), +} + +impl HandshakePayload { + fn encode(&self, bytes: &mut Vec) { + use self::HandshakePayload::*; + match *self { + HelloRequest | ServerHelloDone | EndOfEarlyData => {} + ClientHello(ref x) => x.encode(bytes), + ServerHello(ref x) => x.encode(bytes), + HelloRetryRequest(ref x) => x.encode(bytes), + Certificate(ref x) => x.encode(bytes), + CertificateTLS13(ref x) => x.encode(bytes), + ServerKeyExchange(ref x) => x.encode(bytes), + ClientKeyExchange(ref x) => x.encode(bytes), + CertificateRequest(ref x) => x.encode(bytes), + CertificateRequestTLS13(ref x) => x.encode(bytes), + CertificateVerify(ref x) => x.encode(bytes), + NewSessionTicket(ref x) => x.encode(bytes), + NewSessionTicketTLS13(ref x) => x.encode(bytes), + EncryptedExtensions(ref x) => x.encode(bytes), + KeyUpdate(ref x) => x.encode(bytes), + Finished(ref x) => x.encode(bytes), + CertificateStatus(ref x) => x.encode(bytes), + MessageHash(ref x) => x.encode(bytes), + Unknown(ref x) => x.encode(bytes), + } + } +} + +#[derive(Debug)] +pub struct HandshakeMessagePayload { + pub typ: HandshakeType, + pub payload: HandshakePayload, +} + +impl Codec for HandshakeMessagePayload { + fn encode(&self, bytes: &mut Vec) { + // encode payload to learn length + let mut sub: Vec = Vec::new(); + self.payload.encode(&mut sub); + + // output type, length, and encoded payload + match self.typ { + HandshakeType::HelloRetryRequest => HandshakeType::ServerHello, + _ => self.typ, + } + .encode(bytes); + codec::u24(sub.len() as u32).encode(bytes); + bytes.append(&mut sub); + } + + fn read(r: &mut Reader) -> Option { + Self::read_version(r, ProtocolVersion::TLSv1_2) + } +} + +impl HandshakeMessagePayload { + pub fn read_version(r: &mut Reader, vers: ProtocolVersion) -> Option { + let mut typ = HandshakeType::read(r)?; + let len = codec::u24::read(r)?.0 as usize; + let mut sub = r.sub(len)?; + + let payload = match typ { + HandshakeType::HelloRequest if sub.left() == 0 => HandshakePayload::HelloRequest, + HandshakeType::ClientHello => { + HandshakePayload::ClientHello(ClientHelloPayload::read(&mut sub)?) + } + HandshakeType::ServerHello => { + let version = ProtocolVersion::read(&mut sub)?; + let random = Random::read(&mut sub)?; + + if random == HELLO_RETRY_REQUEST_RANDOM { + let mut hrr = HelloRetryRequest::read(&mut sub)?; + hrr.legacy_version = version; + typ = HandshakeType::HelloRetryRequest; + HandshakePayload::HelloRetryRequest(hrr) + } else { + let mut shp = ServerHelloPayload::read(&mut sub)?; + shp.legacy_version = version; + shp.random = random; + HandshakePayload::ServerHello(shp) + } + } + HandshakeType::Certificate if vers == ProtocolVersion::TLSv1_3 => { + let p = CertificatePayloadTLS13::read(&mut sub)?; + HandshakePayload::CertificateTLS13(p) + } + HandshakeType::Certificate => { + HandshakePayload::Certificate(CertificatePayload::read(&mut sub)?) + } + HandshakeType::ServerKeyExchange => { + let p = ServerKeyExchangePayload::read(&mut sub)?; + HandshakePayload::ServerKeyExchange(p) + } + HandshakeType::ServerHelloDone => { + if sub.any_left() { + return None; + } + HandshakePayload::ServerHelloDone + } + HandshakeType::ClientKeyExchange => { + HandshakePayload::ClientKeyExchange(Payload::read(&mut sub)) + } + HandshakeType::CertificateRequest if vers == ProtocolVersion::TLSv1_3 => { + let p = CertificateRequestPayloadTLS13::read(&mut sub)?; + HandshakePayload::CertificateRequestTLS13(p) + } + HandshakeType::CertificateRequest => { + let p = CertificateRequestPayload::read(&mut sub)?; + HandshakePayload::CertificateRequest(p) + } + HandshakeType::CertificateVerify => { + HandshakePayload::CertificateVerify(DigitallySignedStruct::read(&mut sub)?) + } + HandshakeType::NewSessionTicket if vers == ProtocolVersion::TLSv1_3 => { + let p = NewSessionTicketPayloadTLS13::read(&mut sub)?; + HandshakePayload::NewSessionTicketTLS13(p) + } + HandshakeType::NewSessionTicket => { + let p = NewSessionTicketPayload::read(&mut sub)?; + HandshakePayload::NewSessionTicket(p) + } + HandshakeType::EncryptedExtensions => { + HandshakePayload::EncryptedExtensions(EncryptedExtensions::read(&mut sub)?) + } + HandshakeType::KeyUpdate => { + HandshakePayload::KeyUpdate(KeyUpdateRequest::read(&mut sub)?) + } + HandshakeType::EndOfEarlyData => { + if sub.any_left() { + return None; + } + HandshakePayload::EndOfEarlyData + } + HandshakeType::Finished => HandshakePayload::Finished(Payload::read(&mut sub)), + HandshakeType::CertificateStatus => { + HandshakePayload::CertificateStatus(CertificateStatus::read(&mut sub)?) + } + HandshakeType::MessageHash => { + // does not appear on the wire + return None; + } + HandshakeType::HelloRetryRequest => { + // not legal on wire + return None; + } + _ => HandshakePayload::Unknown(Payload::read(&mut sub)), + }; + + if sub.any_left() { + None + } else { + Some(Self { typ, payload }) + } + } + + pub fn build_key_update_notify() -> Self { + Self { + typ: HandshakeType::KeyUpdate, + payload: HandshakePayload::KeyUpdate(KeyUpdateRequest::UpdateNotRequested), + } + } + + pub fn get_encoding_for_binder_signing(&self) -> Vec { + let mut ret = self.get_encoding(); + + let binder_len = match self.payload { + HandshakePayload::ClientHello(ref ch) => match ch.extensions.last() { + Some(ClientExtension::PresharedKey(ref offer)) => { + let mut binders_encoding = Vec::new(); + offer + .binders + .encode(&mut binders_encoding); + binders_encoding.len() + } + _ => 0, + }, + _ => 0, + }; + + let ret_len = ret.len() - binder_len; + ret.truncate(ret_len); + ret + } + + pub fn build_handshake_hash(hash: &[u8]) -> Self { + Self { + typ: HandshakeType::MessageHash, + payload: HandshakePayload::MessageHash(Payload::new(hash.to_vec())), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake_test.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake_test.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e19cc173ea3c21f30ad57711eaffc01bb64a1b6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/handshake_test.rs @@ -0,0 +1,1247 @@ +use crate::enums::{CipherSuite, ProtocolVersion, SignatureScheme}; +use crate::key::Certificate; +use crate::msgs::base::{Payload, PayloadU16, PayloadU24, PayloadU8}; +use crate::msgs::codec::{put_u16, Codec, Reader}; +use crate::msgs::enums::{ + ClientCertificateType, Compression, ECCurveType, ExtensionType, HandshakeType, HashAlgorithm, + KeyUpdateRequest, NamedGroup, PSKKeyExchangeMode, ServerNameType, SignatureAlgorithm, +}; +use crate::msgs::handshake::{ + CertReqExtension, CertificateEntry, CertificateExtension, CertificatePayloadTLS13, + CertificateRequestPayload, CertificateRequestPayloadTLS13, CertificateStatus, + CertificateStatusRequest, ClientExtension, ClientHelloPayload, ClientSessionTicket, + ConvertProtocolNameList, ConvertServerNameList, DecomposedSignatureScheme, + DigitallySignedStruct, ECDHEServerKeyExchange, ECParameters, ECPointFormatList, + EncryptedExtensions, HandshakeMessagePayload, HandshakePayload, HasServerExtensions, + HelloRetryExtension, HelloRetryRequest, KeyShareEntry, NewSessionTicketExtension, + NewSessionTicketPayload, NewSessionTicketPayloadTLS13, PresharedKeyBinder, + PresharedKeyIdentity, PresharedKeyOffer, Random, ServerECDHParams, ServerExtension, + ServerHelloPayload, ServerKeyExchangePayload, SessionID, SupportedPointFormats, + UnknownExtension, +}; +use webpki::DnsNameRef; + +#[test] +fn rejects_short_random() { + let bytes = [0x01; 31]; + let mut rd = Reader::init(&bytes); + assert_eq!(Random::read(&mut rd), None); +} + +#[test] +fn reads_random() { + let bytes = [0x01; 32]; + let mut rd = Reader::init(&bytes); + let rnd = Random::read(&mut rd).unwrap(); + println!("{:?}", rnd); + + assert!(!rd.any_left()); +} + +#[test] +fn debug_random() { + assert_eq!( + "0101010101010101010101010101010101010101010101010101010101010101", + format!("{:?}", Random::from([1; 32])) + ); +} + +#[test] +fn rejects_truncated_sessionid() { + let bytes = [32; 32]; + let mut rd = Reader::init(&bytes); + assert_eq!(SessionID::read(&mut rd), None); +} + +#[test] +fn rejects_sessionid_with_bad_length() { + let bytes = [33; 33]; + let mut rd = Reader::init(&bytes); + assert_eq!(SessionID::read(&mut rd), None); +} + +#[test] +fn sessionid_with_different_lengths_are_unequal() { + let a = SessionID::read(&mut Reader::init(&[1u8, 1])).unwrap(); + let b = SessionID::read(&mut Reader::init(&[2u8, 1, 2])).unwrap(); + assert_ne!(a, b); +} + +#[test] +fn accepts_short_sessionid() { + let bytes = [1; 2]; + let mut rd = Reader::init(&bytes); + let sess = SessionID::read(&mut rd).unwrap(); + println!("{:?}", sess); + + assert!(!sess.is_empty()); + assert_eq!(sess.len(), 1); + assert!(!rd.any_left()); +} + +#[test] +fn accepts_empty_sessionid() { + let bytes = [0; 1]; + let mut rd = Reader::init(&bytes); + let sess = SessionID::read(&mut rd).unwrap(); + println!("{:?}", sess); + + assert!(sess.is_empty()); + assert_eq!(sess.len(), 0); + assert!(!rd.any_left()); +} + +#[test] +fn debug_sessionid() { + let bytes = [ + 32, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, + ]; + let mut rd = Reader::init(&bytes); + let sess = SessionID::read(&mut rd).unwrap(); + assert_eq!( + "0101010101010101010101010101010101010101010101010101010101010101", + format!("{:?}", sess) + ); +} + +#[test] +fn can_roundtrip_unknown_client_ext() { + let bytes = [0x12u8, 0x34u8, 0, 3, 1, 2, 3]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + + println!("{:?}", ext); + assert_eq!(ext.get_type(), ExtensionType::Unknown(0x1234)); + assert_eq!(bytes.to_vec(), ext.get_encoding()); +} + +#[test] +fn refuses_client_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x0b, 0x00, 0x04, 0x02, 0xf8, 0x01, 0x02]; + let mut rd = Reader::init(&bytes); + assert!(ClientExtension::read(&mut rd).is_none()); +} + +#[test] +fn refuses_server_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x0b, 0x00, 0x04, 0x02, 0xf8, 0x01, 0x02]; + let mut rd = Reader::init(&bytes); + assert!(ServerExtension::read(&mut rd).is_none()); +} + +#[test] +fn refuses_certificate_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x12, 0x00, 0x03, 0x00, 0x00, 0x01]; + let mut rd = Reader::init(&bytes); + assert!(CertificateExtension::read(&mut rd).is_none()); +} + +#[test] +fn refuses_certificate_req_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x0d, 0x00, 0x05, 0x00, 0x02, 0x01, 0x02, 0xff]; + let mut rd = Reader::init(&bytes); + assert!(CertReqExtension::read(&mut rd).is_none()); +} + +#[test] +fn refuses_helloreq_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x2b, 0x00, 0x03, 0x00, 0x00, 0x01]; + let mut rd = Reader::init(&bytes); + assert!(HelloRetryExtension::read(&mut rd).is_none()); +} + +#[test] +fn refuses_newsessionticket_ext_with_unparsed_bytes() { + let bytes = [0x00u8, 0x2a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x01]; + let mut rd = Reader::init(&bytes); + assert!(NewSessionTicketExtension::read(&mut rd).is_none()); +} + +#[test] +fn can_roundtrip_single_sni() { + let bytes = [0, 0, 0, 7, 0, 5, 0, 0, 2, 0x6c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ServerName); + assert_eq!(bytes.to_vec(), ext.get_encoding()); +} + +#[test] +fn can_round_trip_mixed_case_sni() { + let bytes = [0, 0, 0, 7, 0, 5, 0, 0, 2, 0x4c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ServerName); + assert_eq!(bytes.to_vec(), ext.get_encoding()); +} + +#[test] +fn can_roundtrip_other_sni_name_types() { + let bytes = [0, 0, 0, 7, 0, 5, 1, 0, 2, 0x6c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ServerName); + assert_eq!(bytes.to_vec(), ext.get_encoding()); +} + +#[test] +fn get_single_hostname_returns_none_for_other_sni_name_types() { + let bytes = [0, 0, 0, 7, 0, 5, 1, 0, 2, 0x6c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ServerName); + if let ClientExtension::ServerName(snr) = ext { + assert!(!snr.has_duplicate_names_for_type()); + assert!(snr.get_single_hostname().is_none()); + } else { + unreachable!(); + } +} + +#[test] +fn can_roundtrip_multiname_sni() { + let bytes = [0, 0, 0, 12, 0, 10, 0, 0, 2, 0x68, 0x69, 0, 0, 2, 0x6c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ServerName); + assert_eq!(bytes.to_vec(), ext.get_encoding()); + match ext { + ClientExtension::ServerName(req) => { + assert_eq!(2, req.len()); + + assert!(req.has_duplicate_names_for_type()); + + let dns_name_str: &str = req + .get_single_hostname() + .unwrap() + .into(); + assert_eq!(dns_name_str, "hi"); + + assert_eq!(req[0].typ, ServerNameType::HostName); + assert_eq!(req[1].typ, ServerNameType::HostName); + } + _ => unreachable!(), + } +} + +#[test] +fn rejects_truncated_sni() { + let bytes = [0, 0, 0, 1, 0]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 2, 0, 1]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 3, 0, 1, 0]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 4, 0, 2, 0, 0]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 5, 0, 3, 0, 0, 0]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 5, 0, 3, 0, 0, 1]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); + + let bytes = [0, 0, 0, 6, 0, 4, 0, 0, 2, 0x68]; + assert!(ClientExtension::read(&mut Reader::init(&bytes)).is_none()); +} + +#[test] +fn can_roundtrip_psk_identity() { + let bytes = [0, 0, 0x11, 0x22, 0x33, 0x44]; + let psk_id = PresharedKeyIdentity::read(&mut Reader::init(&bytes)).unwrap(); + println!("{:?}", psk_id); + assert_eq!(psk_id.obfuscated_ticket_age, 0x11223344); + assert_eq!(psk_id.get_encoding(), bytes.to_vec()); + + let bytes = [0, 5, 0x1, 0x2, 0x3, 0x4, 0x5, 0x11, 0x22, 0x33, 0x44]; + let psk_id = PresharedKeyIdentity::read(&mut Reader::init(&bytes)).unwrap(); + println!("{:?}", psk_id); + assert_eq!(psk_id.identity.0, vec![0x1, 0x2, 0x3, 0x4, 0x5]); + assert_eq!(psk_id.obfuscated_ticket_age, 0x11223344); + assert_eq!(psk_id.get_encoding(), bytes.to_vec()); +} + +#[test] +fn can_roundtrip_psk_offer() { + let bytes = [ + 0, 7, 0, 1, 0x99, 0x11, 0x22, 0x33, 0x44, 0, 4, 3, 0x01, 0x02, 0x3, + ]; + let psko = PresharedKeyOffer::read(&mut Reader::init(&bytes)).unwrap(); + println!("{:?}", psko); + + assert_eq!(psko.identities.len(), 1); + assert_eq!(psko.identities[0].identity.0, vec![0x99]); + assert_eq!(psko.identities[0].obfuscated_ticket_age, 0x11223344); + assert_eq!(psko.binders.len(), 1); + assert_eq!(psko.binders[0].0, vec![1, 2, 3]); + assert_eq!(psko.get_encoding(), bytes.to_vec()); +} + +#[test] +fn can_roundtrip_certstatusreq_for_ocsp() { + let ext = ClientExtension::CertificateStatusRequest(CertificateStatusRequest::build_ocsp()); + println!("{:?}", ext); + + let bytes = [ + 0, 5, // CertificateStatusRequest + 0, 11, 1, // OCSP + 0, 5, 0, 3, 0, 1, 1, 0, 1, 2, + ]; + + let csr = ClientExtension::read(&mut Reader::init(&bytes)).unwrap(); + println!("{:?}", csr); + assert_eq!(csr.get_encoding(), bytes.to_vec()); +} + +#[test] +fn can_roundtrip_certstatusreq_for_other() { + let bytes = [ + 0, 5, // CertificateStatusRequest + 0, 5, 2, // !OCSP + 1, 2, 3, 4, + ]; + + let csr = ClientExtension::read(&mut Reader::init(&bytes)).unwrap(); + println!("{:?}", csr); + assert_eq!(csr.get_encoding(), bytes.to_vec()); +} + +#[test] +fn can_roundtrip_multi_proto() { + let bytes = [0, 16, 0, 8, 0, 6, 2, 0x68, 0x69, 2, 0x6c, 0x6f]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ALProtocolNegotiation); + assert_eq!(ext.get_encoding(), bytes.to_vec()); + match ext { + ClientExtension::Protocols(prot) => { + assert_eq!(2, prot.len()); + assert_eq!(vec![b"hi", b"lo"], prot.to_slices()); + assert_eq!(prot.as_single_slice(), None); + } + _ => unreachable!(), + } +} + +#[test] +fn can_roundtrip_single_proto() { + let bytes = [0, 16, 0, 5, 0, 3, 2, 0x68, 0x69]; + let mut rd = Reader::init(&bytes); + let ext = ClientExtension::read(&mut rd).unwrap(); + println!("{:?}", ext); + + assert_eq!(ext.get_type(), ExtensionType::ALProtocolNegotiation); + assert_eq!(bytes.to_vec(), ext.get_encoding()); + match ext { + ClientExtension::Protocols(prot) => { + assert_eq!(1, prot.len()); + assert_eq!(vec![b"hi"], prot.to_slices()); + assert_eq!(prot.as_single_slice(), Some(&b"hi"[..])); + } + _ => unreachable!(), + } +} + +#[test] +fn decomposed_signature_scheme_has_correct_mappings() { + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::RSA, HashAlgorithm::SHA1), + SignatureScheme::RSA_PKCS1_SHA1 + ); + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::RSA, HashAlgorithm::SHA256), + SignatureScheme::RSA_PKCS1_SHA256 + ); + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::RSA, HashAlgorithm::SHA384), + SignatureScheme::RSA_PKCS1_SHA384 + ); + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::RSA, HashAlgorithm::SHA512), + SignatureScheme::RSA_PKCS1_SHA512 + ); + + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::ECDSA, HashAlgorithm::SHA256), + SignatureScheme::ECDSA_NISTP256_SHA256 + ); + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::ECDSA, HashAlgorithm::SHA384), + SignatureScheme::ECDSA_NISTP384_SHA384 + ); + assert_eq!( + SignatureScheme::make(SignatureAlgorithm::ECDSA, HashAlgorithm::SHA512), + SignatureScheme::ECDSA_NISTP521_SHA512 + ); +} + +fn get_sample_clienthellopayload() -> ClientHelloPayload { + ClientHelloPayload { + client_version: ProtocolVersion::TLSv1_2, + random: Random::from([0; 32]), + session_id: SessionID::empty(), + cipher_suites: vec![CipherSuite::TLS_NULL_WITH_NULL_NULL], + compression_methods: vec![Compression::Null], + extensions: vec![ + ClientExtension::ECPointFormats(ECPointFormatList::supported()), + ClientExtension::NamedGroups(vec![NamedGroup::X25519]), + ClientExtension::SignatureAlgorithms(vec![SignatureScheme::ECDSA_NISTP256_SHA256]), + ClientExtension::make_sni(DnsNameRef::try_from_ascii_str("hello").unwrap()), + ClientExtension::SessionTicket(ClientSessionTicket::Request), + ClientExtension::SessionTicket(ClientSessionTicket::Offer(Payload(vec![]))), + ClientExtension::Protocols(vec![PayloadU8(vec![0])]), + ClientExtension::SupportedVersions(vec![ProtocolVersion::TLSv1_3]), + ClientExtension::KeyShare(vec![KeyShareEntry::new(NamedGroup::X25519, &[1, 2, 3])]), + ClientExtension::PresharedKeyModes(vec![PSKKeyExchangeMode::PSK_DHE_KE]), + ClientExtension::PresharedKey(PresharedKeyOffer { + identities: vec![ + PresharedKeyIdentity::new(vec![3, 4, 5], 123456), + PresharedKeyIdentity::new(vec![6, 7, 8], 7891011), + ], + binders: vec![ + PresharedKeyBinder::new(vec![1, 2, 3]), + PresharedKeyBinder::new(vec![3, 4, 5]), + ], + }), + ClientExtension::Cookie(PayloadU16(vec![1, 2, 3])), + ClientExtension::ExtendedMasterSecretRequest, + ClientExtension::CertificateStatusRequest(CertificateStatusRequest::build_ocsp()), + ClientExtension::SignedCertificateTimestampRequest, + ClientExtension::TransportParameters(vec![1, 2, 3]), + ClientExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + }), + ], + } +} + +#[test] +fn can_print_all_clientextensions() { + println!("client hello {:?}", get_sample_clienthellopayload()); +} + +#[test] +fn can_clone_all_clientextensions() { + let _ = get_sample_serverhellopayload().extensions; +} + +#[test] +fn client_has_duplicate_extensions_works() { + let mut chp = get_sample_clienthellopayload(); + assert!(chp.has_duplicate_extension()); // due to SessionTicketRequest/SessionTicketOffer + + chp.extensions.drain(1..); + assert!(!chp.has_duplicate_extension()); + + chp.extensions = vec![]; + assert!(!chp.has_duplicate_extension()); +} + +#[test] +fn test_truncated_psk_offer() { + let ext = ClientExtension::PresharedKey(PresharedKeyOffer { + identities: vec![PresharedKeyIdentity::new(vec![3, 4, 5], 123456)], + binders: vec![PresharedKeyBinder::new(vec![1, 2, 3])], + }); + + let mut enc = ext.get_encoding(); + println!("testing {:?} enc {:?}", ext, enc); + for l in 0..enc.len() { + if l == 9 { + continue; + } + put_u16(l as u16, &mut enc[4..]); + let rc = ClientExtension::read_bytes(&enc); + assert!(rc.is_none()); + } +} + +#[test] +fn test_truncated_client_hello_is_detected() { + let ch = get_sample_clienthellopayload(); + let enc = ch.get_encoding(); + println!("testing {:?} enc {:?}", ch, enc); + + for l in 0..enc.len() { + println!("len {:?} enc {:?}", l, &enc[..l]); + if l == 41 { + continue; // where extensions are empty + } + assert!(ClientHelloPayload::read_bytes(&enc[..l]).is_none()); + } +} + +#[test] +fn test_truncated_client_extension_is_detected() { + let chp = get_sample_clienthellopayload(); + + for ext in &chp.extensions { + let mut enc = ext.get_encoding(); + println!("testing {:?} enc {:?}", ext, enc); + + // "outer" truncation, i.e., where the extension-level length is longer than + // the input + for l in 0..enc.len() { + assert!(ClientExtension::read_bytes(&enc[..l]).is_none()); + } + + // these extension types don't have any internal encoding that rustls validates: + match ext.get_type() { + ExtensionType::TransportParameters | ExtensionType::Unknown(_) => { + continue; + } + _ => {} + }; + + // "inner" truncation, where the extension-level length agrees with the input + // length, but isn't long enough for the type of extension + for l in 0..(enc.len() - 4) { + put_u16(l as u16, &mut enc[2..]); + println!(" encoding {:?} len {:?}", enc, l); + assert!(ClientExtension::read_bytes(&enc).is_none()); + } + } +} + +fn test_client_extension_getter(typ: ExtensionType, getter: fn(&ClientHelloPayload) -> bool) { + let mut chp = get_sample_clienthellopayload(); + let ext = chp.find_extension(typ).unwrap().clone(); + + chp.extensions = vec![]; + assert!(!getter(&chp)); + + chp.extensions = vec![ext]; + assert!(getter(&chp)); + + chp.extensions = vec![ClientExtension::Unknown(UnknownExtension { + typ, + payload: Payload(vec![]), + })]; + assert!(!getter(&chp)); +} + +#[test] +fn client_get_sni_extension() { + test_client_extension_getter(ExtensionType::ServerName, |chp| { + chp.get_sni_extension().is_some() + }); +} + +#[test] +fn client_get_sigalgs_extension() { + test_client_extension_getter(ExtensionType::SignatureAlgorithms, |chp| { + chp.get_sigalgs_extension().is_some() + }); +} + +#[test] +fn client_get_namedgroups_extension() { + test_client_extension_getter(ExtensionType::EllipticCurves, |chp| { + chp.get_namedgroups_extension() + .is_some() + }); +} + +#[test] +fn client_get_ecpoints_extension() { + test_client_extension_getter(ExtensionType::ECPointFormats, |chp| { + chp.get_ecpoints_extension().is_some() + }); +} + +#[test] +fn client_get_alpn_extension() { + test_client_extension_getter(ExtensionType::ALProtocolNegotiation, |chp| { + chp.get_alpn_extension().is_some() + }); +} + +#[test] +fn client_get_quic_params_extension() { + test_client_extension_getter(ExtensionType::TransportParameters, |chp| { + chp.get_quic_params_extension() + .is_some() + }); +} + +#[test] +fn client_get_versions_extension() { + test_client_extension_getter(ExtensionType::SupportedVersions, |chp| { + chp.get_versions_extension().is_some() + }); +} + +#[test] +fn client_get_keyshare_extension() { + test_client_extension_getter(ExtensionType::KeyShare, |chp| { + chp.get_keyshare_extension().is_some() + }); +} + +#[test] +fn client_get_psk() { + test_client_extension_getter(ExtensionType::PreSharedKey, |chp| chp.get_psk().is_some()); +} + +#[test] +fn client_get_psk_modes() { + test_client_extension_getter(ExtensionType::PSKKeyExchangeModes, |chp| { + chp.get_psk_modes().is_some() + }); +} + +#[test] +fn test_truncated_helloretry_extension_is_detected() { + let hrr = get_sample_helloretryrequest(); + + for ext in &hrr.extensions { + let mut enc = ext.get_encoding(); + println!("testing {:?} enc {:?}", ext, enc); + + // "outer" truncation, i.e., where the extension-level length is longer than + // the input + for l in 0..enc.len() { + assert!(HelloRetryExtension::read_bytes(&enc[..l]).is_none()); + } + + // these extension types don't have any internal encoding that rustls validates: + if let ExtensionType::Unknown(_) = ext.get_type() { + continue; + } + + // "inner" truncation, where the extension-level length agrees with the input + // length, but isn't long enough for the type of extension + for l in 0..(enc.len() - 4) { + put_u16(l as u16, &mut enc[2..]); + println!(" encoding {:?} len {:?}", enc, l); + assert!(HelloRetryExtension::read_bytes(&enc).is_none()); + } + } +} + +fn test_helloretry_extension_getter(typ: ExtensionType, getter: fn(&HelloRetryRequest) -> bool) { + let mut hrr = get_sample_helloretryrequest(); + let mut exts = std::mem::take(&mut hrr.extensions); + exts.retain(|ext| ext.get_type() == typ); + + assert!(!getter(&hrr)); + + hrr.extensions = exts; + assert!(getter(&hrr)); + + hrr.extensions = vec![HelloRetryExtension::Unknown(UnknownExtension { + typ, + payload: Payload(vec![]), + })]; + assert!(!getter(&hrr)); +} + +#[test] +fn helloretry_get_requested_key_share_group() { + test_helloretry_extension_getter(ExtensionType::KeyShare, |hrr| { + hrr.get_requested_key_share_group() + .is_some() + }); +} + +#[test] +fn helloretry_get_cookie() { + test_helloretry_extension_getter(ExtensionType::Cookie, |hrr| hrr.get_cookie().is_some()); +} + +#[test] +fn helloretry_get_supported_versions() { + test_helloretry_extension_getter(ExtensionType::SupportedVersions, |hrr| { + hrr.get_supported_versions().is_some() + }); +} + +#[test] +fn test_truncated_server_extension_is_detected() { + let shp = get_sample_serverhellopayload(); + + for ext in &shp.extensions { + let mut enc = ext.get_encoding(); + println!("testing {:?} enc {:?}", ext, enc); + + // "outer" truncation, i.e., where the extension-level length is longer than + // the input + for l in 0..enc.len() { + assert!(ServerExtension::read_bytes(&enc[..l]).is_none()); + } + + // these extension types don't have any internal encoding that rustls validates: + match ext.get_type() { + ExtensionType::TransportParameters | ExtensionType::Unknown(_) => { + continue; + } + _ => {} + }; + + // "inner" truncation, where the extension-level length agrees with the input + // length, but isn't long enough for the type of extension + for l in 0..(enc.len() - 4) { + put_u16(l as u16, &mut enc[2..]); + println!(" encoding {:?} len {:?}", enc, l); + assert!(ServerExtension::read_bytes(&enc).is_none()); + } + } +} + +fn test_server_extension_getter(typ: ExtensionType, getter: fn(&ServerHelloPayload) -> bool) { + let mut shp = get_sample_serverhellopayload(); + let ext = shp.find_extension(typ).unwrap().clone(); + + shp.extensions = vec![]; + assert!(!getter(&shp)); + + shp.extensions = vec![ext]; + assert!(getter(&shp)); + + shp.extensions = vec![ServerExtension::Unknown(UnknownExtension { + typ, + payload: Payload(vec![]), + })]; + assert!(!getter(&shp)); +} + +#[test] +fn server_get_key_share() { + test_server_extension_getter(ExtensionType::KeyShare, |shp| shp.get_key_share().is_some()); +} + +#[test] +fn server_get_psk_index() { + test_server_extension_getter(ExtensionType::PreSharedKey, |shp| { + shp.get_psk_index().is_some() + }); +} + +#[test] +fn server_get_ecpoints_extension() { + test_server_extension_getter(ExtensionType::ECPointFormats, |shp| { + shp.get_ecpoints_extension().is_some() + }); +} + +#[test] +fn server_get_sct_list() { + test_server_extension_getter(ExtensionType::SCT, |shp| shp.get_sct_list().is_some()); +} + +#[test] +fn server_get_supported_versions() { + test_server_extension_getter(ExtensionType::SupportedVersions, |shp| { + shp.get_supported_versions().is_some() + }); +} + +fn test_cert_extension_getter(typ: ExtensionType, getter: fn(&CertificateEntry) -> bool) { + let mut ce = get_sample_certificatepayloadtls13() + .entries + .remove(0); + let mut exts = std::mem::take(&mut ce.exts); + exts.retain(|ext| ext.get_type() == typ); + + assert!(!getter(&ce)); + + ce.exts = exts; + assert!(getter(&ce)); + + ce.exts = vec![CertificateExtension::Unknown(UnknownExtension { + typ, + payload: Payload(vec![]), + })]; + assert!(!getter(&ce)); +} + +#[test] +fn certentry_get_ocsp_response() { + test_cert_extension_getter(ExtensionType::StatusRequest, |ce| { + ce.get_ocsp_response().is_some() + }); +} + +#[test] +fn certentry_get_scts() { + test_cert_extension_getter(ExtensionType::SCT, |ce| ce.get_scts().is_some()); +} + +fn get_sample_serverhellopayload() -> ServerHelloPayload { + ServerHelloPayload { + legacy_version: ProtocolVersion::TLSv1_2, + random: Random::from([0; 32]), + session_id: SessionID::empty(), + cipher_suite: CipherSuite::TLS_NULL_WITH_NULL_NULL, + compression_method: Compression::Null, + extensions: vec![ + ServerExtension::ECPointFormats(ECPointFormatList::supported()), + ServerExtension::ServerNameAck, + ServerExtension::SessionTicketAck, + ServerExtension::RenegotiationInfo(PayloadU8(vec![0])), + ServerExtension::Protocols(vec![PayloadU8(vec![0])]), + ServerExtension::KeyShare(KeyShareEntry::new(NamedGroup::X25519, &[1, 2, 3])), + ServerExtension::PresharedKey(3), + ServerExtension::ExtendedMasterSecretAck, + ServerExtension::CertificateStatusAck, + ServerExtension::SignedCertificateTimestamp(vec![PayloadU16(vec![0])]), + ServerExtension::SupportedVersions(ProtocolVersion::TLSv1_2), + ServerExtension::TransportParameters(vec![1, 2, 3]), + ServerExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + }), + ], + } +} + +#[test] +fn can_print_all_serverextensions() { + println!("server hello {:?}", get_sample_serverhellopayload()); +} + +#[test] +fn can_clone_all_serverextensions() { + let _ = get_sample_serverhellopayload().extensions; +} + +fn get_sample_helloretryrequest() -> HelloRetryRequest { + HelloRetryRequest { + legacy_version: ProtocolVersion::TLSv1_2, + session_id: SessionID::empty(), + cipher_suite: CipherSuite::TLS_NULL_WITH_NULL_NULL, + extensions: vec![ + HelloRetryExtension::KeyShare(NamedGroup::X25519), + HelloRetryExtension::Cookie(PayloadU16(vec![0])), + HelloRetryExtension::SupportedVersions(ProtocolVersion::TLSv1_2), + HelloRetryExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + }), + ], + } +} + +fn get_sample_certificatepayloadtls13() -> CertificatePayloadTLS13 { + CertificatePayloadTLS13 { + context: PayloadU8(vec![1, 2, 3]), + entries: vec![CertificateEntry { + cert: Certificate(vec![3, 4, 5]), + exts: vec![ + CertificateExtension::CertificateStatus(CertificateStatus { + ocsp_response: PayloadU24(vec![1, 2, 3]), + }), + CertificateExtension::SignedCertificateTimestamp(vec![PayloadU16(vec![0])]), + CertificateExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + }), + ], + }], + } +} + +fn get_sample_serverkeyexchangepayload_ecdhe() -> ServerKeyExchangePayload { + ServerKeyExchangePayload::ECDHE(ECDHEServerKeyExchange { + params: ServerECDHParams { + curve_params: ECParameters { + curve_type: ECCurveType::NamedCurve, + named_group: NamedGroup::X25519, + }, + public: PayloadU8(vec![1, 2, 3]), + }, + dss: DigitallySignedStruct::new(SignatureScheme::RSA_PSS_SHA256, vec![1, 2, 3]), + }) +} + +fn get_sample_serverkeyexchangepayload_unknown() -> ServerKeyExchangePayload { + ServerKeyExchangePayload::Unknown(Payload(vec![1, 2, 3])) +} + +fn get_sample_certificaterequestpayload() -> CertificateRequestPayload { + CertificateRequestPayload { + certtypes: vec![ClientCertificateType::RSASign], + sigschemes: vec![SignatureScheme::ECDSA_NISTP256_SHA256], + canames: vec![PayloadU16(vec![1, 2, 3])], + } +} + +fn get_sample_certificaterequestpayloadtls13() -> CertificateRequestPayloadTLS13 { + CertificateRequestPayloadTLS13 { + context: PayloadU8(vec![1, 2, 3]), + extensions: vec![ + CertReqExtension::SignatureAlgorithms(vec![SignatureScheme::ECDSA_NISTP256_SHA256]), + CertReqExtension::AuthorityNames(vec![PayloadU16(vec![1, 2, 3])]), + CertReqExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + }), + ], + } +} + +fn get_sample_newsessionticketpayload() -> NewSessionTicketPayload { + NewSessionTicketPayload { + lifetime_hint: 1234, + ticket: PayloadU16(vec![1, 2, 3]), + } +} + +fn get_sample_newsessionticketpayloadtls13() -> NewSessionTicketPayloadTLS13 { + NewSessionTicketPayloadTLS13 { + lifetime: 123, + age_add: 1234, + nonce: PayloadU8(vec![1, 2, 3]), + ticket: PayloadU16(vec![4, 5, 6]), + exts: vec![NewSessionTicketExtension::Unknown(UnknownExtension { + typ: ExtensionType::Unknown(12345), + payload: Payload(vec![1, 2, 3]), + })], + } +} + +fn get_sample_encryptedextensions() -> EncryptedExtensions { + get_sample_serverhellopayload().extensions +} + +fn get_sample_certificatestatus() -> CertificateStatus { + CertificateStatus { + ocsp_response: PayloadU24(vec![1, 2, 3]), + } +} + +fn get_all_tls12_handshake_payloads() -> Vec { + vec![ + HandshakeMessagePayload { + typ: HandshakeType::HelloRequest, + payload: HandshakePayload::HelloRequest, + }, + HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(get_sample_clienthellopayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerHello, + payload: HandshakePayload::ServerHello(get_sample_serverhellopayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::HelloRetryRequest, + payload: HandshakePayload::HelloRetryRequest(get_sample_helloretryrequest()), + }, + HandshakeMessagePayload { + typ: HandshakeType::Certificate, + payload: HandshakePayload::Certificate(vec![Certificate(vec![1, 2, 3])]), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerKeyExchange, + payload: HandshakePayload::ServerKeyExchange( + get_sample_serverkeyexchangepayload_ecdhe(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerKeyExchange, + payload: HandshakePayload::ServerKeyExchange( + get_sample_serverkeyexchangepayload_unknown(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::CertificateRequest, + payload: HandshakePayload::CertificateRequest(get_sample_certificaterequestpayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerHelloDone, + payload: HandshakePayload::ServerHelloDone, + }, + HandshakeMessagePayload { + typ: HandshakeType::ClientKeyExchange, + payload: HandshakePayload::ClientKeyExchange(Payload(vec![1, 2, 3])), + }, + HandshakeMessagePayload { + typ: HandshakeType::NewSessionTicket, + payload: HandshakePayload::NewSessionTicket(get_sample_newsessionticketpayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::EncryptedExtensions, + payload: HandshakePayload::EncryptedExtensions(get_sample_encryptedextensions()), + }, + HandshakeMessagePayload { + typ: HandshakeType::KeyUpdate, + payload: HandshakePayload::KeyUpdate(KeyUpdateRequest::UpdateRequested), + }, + HandshakeMessagePayload { + typ: HandshakeType::KeyUpdate, + payload: HandshakePayload::KeyUpdate(KeyUpdateRequest::UpdateNotRequested), + }, + HandshakeMessagePayload { + typ: HandshakeType::Finished, + payload: HandshakePayload::Finished(Payload(vec![1, 2, 3])), + }, + HandshakeMessagePayload { + typ: HandshakeType::CertificateStatus, + payload: HandshakePayload::CertificateStatus(get_sample_certificatestatus()), + }, + HandshakeMessagePayload { + typ: HandshakeType::Unknown(99), + payload: HandshakePayload::Unknown(Payload(vec![1, 2, 3])), + }, + ] +} + +#[test] +fn can_roundtrip_all_tls12_handshake_payloads() { + for ref hm in get_all_tls12_handshake_payloads().iter() { + println!("{:?}", hm.typ); + let bytes = hm.get_encoding(); + let mut rd = Reader::init(&bytes); + let other = HandshakeMessagePayload::read(&mut rd).unwrap(); + assert!(!rd.any_left()); + assert_eq!(hm.get_encoding(), other.get_encoding()); + + println!("{:?}", hm); + println!("{:?}", other); + } +} + +#[test] +fn can_detect_truncation_of_all_tls12_handshake_payloads() { + for hm in get_all_tls12_handshake_payloads().iter() { + let mut enc = hm.get_encoding(); + println!("test {:?} enc {:?}", hm, enc); + + // outer truncation + for l in 0..enc.len() { + assert!(HandshakeMessagePayload::read_bytes(&enc[..l]).is_none()) + } + + // inner truncation + for l in 0..enc.len() - 4 { + put_u24(l as u32, &mut enc[1..]); + println!(" check len {:?} enc {:?}", l, enc); + + match (hm.typ, l) { + (HandshakeType::ClientHello, 41) + | (HandshakeType::ServerHello, 38) + | (HandshakeType::ServerKeyExchange, _) + | (HandshakeType::ClientKeyExchange, _) + | (HandshakeType::Finished, _) + | (HandshakeType::Unknown(_), _) => continue, + _ => {} + }; + + assert!(HandshakeMessagePayload::read_version( + &mut Reader::init(&enc), + ProtocolVersion::TLSv1_2 + ) + .is_none()); + assert!(HandshakeMessagePayload::read_bytes(&enc).is_none()); + } + } +} + +fn get_all_tls13_handshake_payloads() -> Vec { + vec![ + HandshakeMessagePayload { + typ: HandshakeType::HelloRequest, + payload: HandshakePayload::HelloRequest, + }, + HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(get_sample_clienthellopayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerHello, + payload: HandshakePayload::ServerHello(get_sample_serverhellopayload()), + }, + HandshakeMessagePayload { + typ: HandshakeType::HelloRetryRequest, + payload: HandshakePayload::HelloRetryRequest(get_sample_helloretryrequest()), + }, + HandshakeMessagePayload { + typ: HandshakeType::Certificate, + payload: HandshakePayload::CertificateTLS13(get_sample_certificatepayloadtls13()), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerKeyExchange, + payload: HandshakePayload::ServerKeyExchange( + get_sample_serverkeyexchangepayload_ecdhe(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerKeyExchange, + payload: HandshakePayload::ServerKeyExchange( + get_sample_serverkeyexchangepayload_unknown(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::CertificateRequest, + payload: HandshakePayload::CertificateRequestTLS13( + get_sample_certificaterequestpayloadtls13(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::CertificateVerify, + payload: HandshakePayload::CertificateVerify(DigitallySignedStruct::new( + SignatureScheme::ECDSA_NISTP256_SHA256, + vec![1, 2, 3], + )), + }, + HandshakeMessagePayload { + typ: HandshakeType::ServerHelloDone, + payload: HandshakePayload::ServerHelloDone, + }, + HandshakeMessagePayload { + typ: HandshakeType::ClientKeyExchange, + payload: HandshakePayload::ClientKeyExchange(Payload(vec![1, 2, 3])), + }, + HandshakeMessagePayload { + typ: HandshakeType::NewSessionTicket, + payload: HandshakePayload::NewSessionTicketTLS13( + get_sample_newsessionticketpayloadtls13(), + ), + }, + HandshakeMessagePayload { + typ: HandshakeType::EncryptedExtensions, + payload: HandshakePayload::EncryptedExtensions(get_sample_encryptedextensions()), + }, + HandshakeMessagePayload { + typ: HandshakeType::KeyUpdate, + payload: HandshakePayload::KeyUpdate(KeyUpdateRequest::UpdateRequested), + }, + HandshakeMessagePayload { + typ: HandshakeType::KeyUpdate, + payload: HandshakePayload::KeyUpdate(KeyUpdateRequest::UpdateNotRequested), + }, + HandshakeMessagePayload { + typ: HandshakeType::Finished, + payload: HandshakePayload::Finished(Payload(vec![1, 2, 3])), + }, + HandshakeMessagePayload { + typ: HandshakeType::CertificateStatus, + payload: HandshakePayload::CertificateStatus(get_sample_certificatestatus()), + }, + HandshakeMessagePayload { + typ: HandshakeType::Unknown(99), + payload: HandshakePayload::Unknown(Payload(vec![1, 2, 3])), + }, + ] +} + +#[test] +fn can_roundtrip_all_tls13_handshake_payloads() { + for ref hm in get_all_tls13_handshake_payloads().iter() { + println!("{:?}", hm.typ); + let bytes = hm.get_encoding(); + let mut rd = Reader::init(&bytes); + + let other = + HandshakeMessagePayload::read_version(&mut rd, ProtocolVersion::TLSv1_3).unwrap(); + assert!(!rd.any_left()); + assert_eq!(hm.get_encoding(), other.get_encoding()); + + println!("{:?}", hm); + println!("{:?}", other); + } +} + +fn put_u24(u: u32, b: &mut [u8]) { + b[0] = (u >> 16) as u8; + b[1] = (u >> 8) as u8; + b[2] = u as u8; +} + +#[test] +fn can_detect_truncation_of_all_tls13_handshake_payloads() { + for hm in get_all_tls13_handshake_payloads().iter() { + let mut enc = hm.get_encoding(); + println!("test {:?} enc {:?}", hm, enc); + + // outer truncation + for l in 0..enc.len() { + assert!(HandshakeMessagePayload::read_bytes(&enc[..l]).is_none()) + } + + // inner truncation + for l in 0..enc.len() - 4 { + put_u24(l as u32, &mut enc[1..]); + println!(" check len {:?} enc {:?}", l, enc); + + match (hm.typ, l) { + (HandshakeType::ClientHello, 41) + | (HandshakeType::ServerHello, 38) + | (HandshakeType::ServerKeyExchange, _) + | (HandshakeType::ClientKeyExchange, _) + | (HandshakeType::Finished, _) + | (HandshakeType::Unknown(_), _) => continue, + _ => {} + }; + + assert!(HandshakeMessagePayload::read_version( + &mut Reader::init(&enc), + ProtocolVersion::TLSv1_3 + ) + .is_none()); + } + } +} + +#[test] +fn cannot_read_messagehash_from_network() { + let mh = HandshakeMessagePayload { + typ: HandshakeType::MessageHash, + payload: HandshakePayload::MessageHash(Payload::new(vec![1, 2, 3])), + }; + println!("mh {:?}", mh); + let enc = mh.get_encoding(); + assert!(HandshakeMessagePayload::read_bytes(&enc).is_none()); +} + +#[test] +fn cannot_decode_huge_certificate() { + let mut buf = [0u8; 65 * 1024]; + // exactly 64KB decodes fine + buf[0] = 0x0b; + buf[1] = 0x01; + buf[2] = 0x00; + buf[3] = 0x03; + buf[4] = 0x01; + buf[5] = 0x00; + buf[6] = 0x00; + buf[7] = 0x00; + buf[8] = 0xff; + buf[9] = 0xfd; + HandshakeMessagePayload::read_bytes(&buf).unwrap(); + + // however 64KB + 1 byte does not + buf[1] = 0x01; + buf[2] = 0x00; + buf[3] = 0x04; + buf[4] = 0x01; + buf[5] = 0x00; + buf[6] = 0x01; + assert!(HandshakeMessagePayload::read_bytes(&buf).is_none()); +} + +#[test] +fn can_decode_server_hello_from_api_devicecheck_apple_com() { + let data = include_bytes!("hello-api.devicecheck.apple.com.bin"); + let mut r = Reader::init(data); + let hm = HandshakeMessagePayload::read(&mut r).unwrap(); + println!("msg: {:?}", hm); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/hsjoiner.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/hsjoiner.rs new file mode 100644 index 0000000000000000000000000000000000000000..909f6a13ad5327e5209295872320d7e778d9faa9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/hsjoiner.rs @@ -0,0 +1,281 @@ +use std::collections::VecDeque; + +use crate::enums::ProtocolVersion; +use crate::msgs::base::Payload; +use crate::msgs::codec; +use crate::msgs::enums::ContentType; +use crate::msgs::handshake::HandshakeMessagePayload; +use crate::msgs::message::{Message, MessagePayload, PlainMessage}; + +const HEADER_SIZE: usize = 1 + 3; + +/// TLS allows for handshake messages of up to 16MB. We +/// restrict that to 64KB to limit potential for denial-of- +/// service. +const MAX_HANDSHAKE_SIZE: u32 = 0xffff; + +/// This works to reconstruct TLS handshake messages +/// from individual TLS messages. It's guaranteed that +/// TLS messages output from this layer contain precisely +/// one handshake payload. +pub struct HandshakeJoiner { + /// The message payload(s) we're currently accumulating. + buf: Vec, + + /// Sizes of messages currently in the buffer. + /// + /// The buffer can be larger than the sum of the sizes in this queue, because it might contain + /// the start of a message that hasn't fully been received yet as its suffix. + sizes: VecDeque, + + /// Version of the protocol we're currently parsing. + version: ProtocolVersion, +} + +impl HandshakeJoiner { + /// Make a new HandshakeJoiner. + pub fn new() -> Self { + Self { + buf: Vec::new(), + sizes: VecDeque::new(), + version: ProtocolVersion::TLSv1_2, + } + } + + /// Take the message, and join/split it as needed. + /// + /// Returns `Err(JoinerError::Unwanted(msg))` if `msg`'s type is not `ContentType::Handshake` or + /// `JoinerError::Decode` if a received payload has an advertised size larger than we accept. + /// + /// Otherwise, yields a `bool` to indicate whether the handshake is "aligned": if the buffer currently + /// only contains complete payloads (that is, no incomplete message in the suffix). + pub fn push(&mut self, msg: PlainMessage) -> Result { + if msg.typ != ContentType::Handshake { + return Err(JoinerError::Unwanted(msg)); + } + + // The vast majority of the time `self.buf` will be empty since most + // handshake messages arrive in a single fragment. Avoid allocating and + // copying in that common case. + if self.buf.is_empty() { + self.buf = msg.payload.0; + } else { + self.buf + .extend_from_slice(&msg.payload.0[..]); + } + + if msg.version == ProtocolVersion::TLSv1_3 { + self.version = msg.version; + } + + // Check the suffix of the buffer that hasn't been covered by `sizes` so far + // for complete messages. If we find any, update `self.sizes` and `complete`. + let mut complete = self.sizes.iter().copied().sum(); + while let Some(size) = payload_size(&self.buf[complete..])? { + self.sizes.push_back(size); + complete += size; + } + + // Use the value of `complete` to determine if the buffer currently contains any + // incomplete messages. If not, an incoming message is said to be "aligned". + Ok(complete == self.buf.len()) + } + + /// Parse the first received message out of the buffer. + /// + /// Returns `Ok(None)` if we don't have a complete message in the buffer, or `Err` if we + /// fail to parse the first message in the buffer. + pub fn pop(&mut self) -> Result, JoinerError> { + let len = match self.sizes.pop_front() { + Some(len) => len, + None => return Ok(None), + }; + + // Parse the first part of the buffer as a handshake buffer. + // If we get `None` back, we've failed to parse the message. + // If we succeed, drain the relevant bytes from the buffer. + + let buf = &self.buf[..len]; + let mut rd = codec::Reader::init(buf); + let parsed = match HandshakeMessagePayload::read_version(&mut rd, self.version) { + Some(p) => p, + None => return Err(JoinerError::Decode), + }; + + let message = Message { + version: self.version, + payload: MessagePayload::Handshake { + parsed, + encoded: Payload::new(buf), + }, + }; + + self.buf.drain(..len); + Ok(Some(message)) + } +} + +/// Does `buf` contain a full handshake payload? +/// +/// Returns `Ok(Some(_))` with the length of the payload (including header) if it does, +/// `Ok(None)` if the buffer is too small to contain a message with the length advertised in the +/// header, or `Err` if the advertised length is larger than what we want to accept +/// (`MAX_HANDSHAKE_SIZE`). +fn payload_size(buf: &[u8]) -> Result, JoinerError> { + if buf.len() < HEADER_SIZE { + return Ok(None); + } + + let (header, rest) = buf.split_at(HEADER_SIZE); + match codec::u24::decode(&header[1..]) { + Some(len) if len.0 > MAX_HANDSHAKE_SIZE => Err(JoinerError::Decode), + Some(len) if rest.get(..len.into()).is_some() => Ok(Some(HEADER_SIZE + usize::from(len))), + _ => Ok(None), + } +} + +#[derive(Debug)] +pub enum JoinerError { + Unwanted(PlainMessage), + Decode, +} + +#[cfg(test)] +mod tests { + use super::HandshakeJoiner; + use crate::enums::ProtocolVersion; + use crate::msgs::base::Payload; + use crate::msgs::codec::Codec; + use crate::msgs::enums::{ContentType, HandshakeType}; + use crate::msgs::handshake::{HandshakeMessagePayload, HandshakePayload}; + use crate::msgs::message::{Message, MessagePayload, PlainMessage}; + + #[test] + fn want() { + let mut hj = HandshakeJoiner::new(); + let wanted = PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x00\x00\x00\x00".to_vec()), + }; + + let unwanted = PlainMessage { + typ: ContentType::Alert, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"ponytown".to_vec()), + }; + + hj.push(wanted).unwrap(); + hj.push(unwanted).unwrap_err(); + } + + fn pop_eq(expect: &PlainMessage, hj: &mut HandshakeJoiner) { + let got = hj.pop().unwrap().unwrap(); + assert_eq!(got.payload.content_type(), expect.typ); + assert_eq!(got.version, expect.version); + + let (mut left, mut right) = (Vec::new(), Vec::new()); + got.payload.encode(&mut left); + expect.payload.encode(&mut right); + + assert_eq!(left, right); + } + + #[test] + fn split() { + // Check we split two handshake messages within one PDU. + let mut hj = HandshakeJoiner::new(); + + // two HelloRequests + assert!(hj + .push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x00\x00\x00\x00\x00\x00\x00\x00".to_vec()), + }) + .unwrap()); + + let expect = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::HelloRequest, + payload: HandshakePayload::HelloRequest, + }), + } + .into(); + + pop_eq(&expect, &mut hj); + pop_eq(&expect, &mut hj); + } + + #[test] + fn broken() { + // Check obvious crap payloads are reported as errors, not panics. + let mut hj = HandshakeJoiner::new(); + + // short ClientHello + hj.push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x01\x00\x00\x02\xff\xff".to_vec()), + }) + .unwrap(); + + hj.pop().unwrap_err(); + } + + #[test] + fn join() { + // Check we join one handshake message split over two PDUs. + let mut hj = HandshakeJoiner::new(); + + // Introduce Finished of 16 bytes, providing 4. + hj.push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x14\x00\x00\x10\x00\x01\x02\x03\x04".to_vec()), + }) + .unwrap(); + + // 11 more bytes. + assert!(!hj + .push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e".to_vec()), + }) + .unwrap()); + + // Final 1 byte. + assert!(hj + .push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x0f".to_vec()), + }) + .unwrap()); + + let payload = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f".to_vec(); + let expect = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::Finished, + payload: HandshakePayload::Finished(Payload::new(payload)), + }), + } + .into(); + + pop_eq(&expect, &mut hj); + } + + #[test] + fn test_rejects_giant_certs() { + let mut hj = HandshakeJoiner::new(); + hj.push(PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::new(b"\x0b\x01\x00\x04\x01\x00\x01\x00\xff\xfe".to_vec()), + }) + .unwrap_err(); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/macros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..da9ff6a22e05b1fa4d79d7ffae04616712402a6a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/macros.rs @@ -0,0 +1,88 @@ +/// A macro which defines an enum type. +macro_rules! enum_builder { + ( + $(#[$comment:meta])* + @U8 + EnumName: $enum_name: ident; + EnumVal { $( $enum_var: ident => $enum_val: expr ),* } + ) => { + $(#[$comment])* + #[derive(Debug, PartialEq, Eq, Clone, Copy)] + pub enum $enum_name { + $( $enum_var),* + ,Unknown(u8) + } + impl $enum_name { + pub fn get_u8(&self) -> u8 { + let x = self.clone(); + match x { + $( $enum_name::$enum_var => $enum_val),* + ,$enum_name::Unknown(x) => x + } + } + } + impl Codec for $enum_name { + fn encode(&self, bytes: &mut Vec) { + self.get_u8().encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + u8::read(r).map($enum_name::from) + } + } + impl From for $enum_name { + fn from(x: u8) -> Self { + match x { + $($enum_val => $enum_name::$enum_var),* + , x => $enum_name::Unknown(x), + } + } + } + }; + ( + $(#[$comment:meta])* + @U16 + EnumName: $enum_name: ident; + EnumVal { $( $enum_var: ident => $enum_val: expr ),* } + ) => { + $(#[$comment])* + #[derive(Debug, PartialEq, Eq, Clone, Copy)] + pub enum $enum_name { + $( $enum_var),* + ,Unknown(u16) + } + impl $enum_name { + pub fn get_u16(&self) -> u16 { + let x = self.clone(); + match x { + $( $enum_name::$enum_var => $enum_val),* + ,$enum_name::Unknown(x) => x + } + } + + pub fn as_str(&self) -> Option<&'static str> { + match self { + $( $enum_name::$enum_var => Some(stringify!($enum_var))),* + ,$enum_name::Unknown(_) => None, + } + } + } + impl Codec for $enum_name { + fn encode(&self, bytes: &mut Vec) { + self.get_u16().encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + u16::read(r).map($enum_name::from) + } + } + impl From for $enum_name { + fn from(x: u16) -> Self { + match x { + $($enum_val => $enum_name::$enum_var),* + , x => $enum_name::Unknown(x), + } + } + } + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message.rs new file mode 100644 index 0000000000000000000000000000000000000000..a8b8308fa40b51caf848145ba3fb012dc92b1bd6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message.rs @@ -0,0 +1,292 @@ +use crate::enums::ProtocolVersion; +use crate::error::Error; +use crate::msgs::alert::AlertMessagePayload; +use crate::msgs::base::Payload; +use crate::msgs::ccs::ChangeCipherSpecPayload; +use crate::msgs::codec::{Codec, Reader}; +use crate::msgs::enums::{AlertDescription, AlertLevel, ContentType, HandshakeType}; +use crate::msgs::handshake::HandshakeMessagePayload; + +use std::convert::TryFrom; + +#[derive(Debug)] +pub enum MessagePayload { + Alert(AlertMessagePayload), + Handshake { + parsed: HandshakeMessagePayload, + encoded: Payload, + }, + ChangeCipherSpec(ChangeCipherSpecPayload), + ApplicationData(Payload), +} + +impl MessagePayload { + pub fn encode(&self, bytes: &mut Vec) { + match self { + Self::Alert(x) => x.encode(bytes), + Self::Handshake { encoded, .. } => bytes.extend(&encoded.0), + Self::ChangeCipherSpec(x) => x.encode(bytes), + Self::ApplicationData(x) => x.encode(bytes), + } + } + + pub fn handshake(parsed: HandshakeMessagePayload) -> Self { + Self::Handshake { + encoded: Payload::new(parsed.get_encoding()), + parsed, + } + } + + pub fn new(typ: ContentType, vers: ProtocolVersion, payload: Payload) -> Result { + let mut r = Reader::init(&payload.0); + let parsed = match typ { + ContentType::ApplicationData => return Ok(Self::ApplicationData(payload)), + ContentType::Alert => AlertMessagePayload::read(&mut r) + .filter(|_| !r.any_left()) + .map(MessagePayload::Alert), + ContentType::Handshake => HandshakeMessagePayload::read_version(&mut r, vers) + .filter(|_| !r.any_left()) + .map(|parsed| Self::Handshake { + parsed, + encoded: payload, + }), + ContentType::ChangeCipherSpec => ChangeCipherSpecPayload::read(&mut r) + .filter(|_| !r.any_left()) + .map(MessagePayload::ChangeCipherSpec), + _ => None, + }; + + parsed.ok_or(Error::CorruptMessagePayload(typ)) + } + + pub fn content_type(&self) -> ContentType { + match self { + Self::Alert(_) => ContentType::Alert, + Self::Handshake { .. } => ContentType::Handshake, + Self::ChangeCipherSpec(_) => ContentType::ChangeCipherSpec, + Self::ApplicationData(_) => ContentType::ApplicationData, + } + } +} + +/// A TLS frame, named TLSPlaintext in the standard. +/// +/// This type owns all memory for its interior parts. It is used to read/write from/to I/O +/// buffers as well as for fragmenting, joining and encryption/decryption. It can be converted +/// into a `Message` by decoding the payload. +#[derive(Clone, Debug)] +pub struct OpaqueMessage { + pub typ: ContentType, + pub version: ProtocolVersion, + pub payload: Payload, +} + +impl OpaqueMessage { + /// `MessageError` allows callers to distinguish between valid prefixes (might + /// become valid if we read more data) and invalid data. + pub fn read(r: &mut Reader) -> Result { + let typ = ContentType::read(r).ok_or(MessageError::TooShortForHeader)?; + let version = ProtocolVersion::read(r).ok_or(MessageError::TooShortForHeader)?; + let len = u16::read(r).ok_or(MessageError::TooShortForHeader)?; + + // Reject undersize messages + // implemented per section 5.1 of RFC8446 (TLSv1.3) + // per section 6.2.1 of RFC5246 (TLSv1.2) + if typ != ContentType::ApplicationData && len == 0 { + return Err(MessageError::IllegalLength); + } + + // Reject oversize messages + if len >= Self::MAX_PAYLOAD { + return Err(MessageError::IllegalLength); + } + + // Don't accept any new content-types. + if let ContentType::Unknown(_) = typ { + return Err(MessageError::IllegalContentType); + } + + // Accept only versions 0x03XX for any XX. + match version { + ProtocolVersion::Unknown(ref v) if (v & 0xff00) != 0x0300 => { + return Err(MessageError::IllegalProtocolVersion); + } + _ => {} + }; + + let mut sub = r + .sub(len as usize) + .ok_or(MessageError::TooShortForLength)?; + let payload = Payload::read(&mut sub); + + Ok(Self { + typ, + version, + payload, + }) + } + + pub fn encode(self) -> Vec { + let mut buf = Vec::new(); + self.typ.encode(&mut buf); + self.version.encode(&mut buf); + (self.payload.0.len() as u16).encode(&mut buf); + self.payload.encode(&mut buf); + buf + } + + /// Force conversion into a plaintext message. + /// + /// This should only be used for messages that are known to be in plaintext. Otherwise, the + /// `OpaqueMessage` should be decrypted into a `PlainMessage` using a `MessageDecrypter`. + pub fn into_plain_message(self) -> PlainMessage { + PlainMessage { + version: self.version, + typ: self.typ, + payload: self.payload, + } + } + + /// This is the maximum on-the-wire size of a TLSCiphertext. + /// That's 2^14 payload bytes, a header, and a 2KB allowance + /// for ciphertext overheads. + const MAX_PAYLOAD: u16 = 16384 + 2048; + + /// Content type, version and size. + const HEADER_SIZE: u16 = 1 + 2 + 2; + + /// Maximum on-wire message size. + pub const MAX_WIRE_SIZE: usize = (Self::MAX_PAYLOAD + Self::HEADER_SIZE) as usize; +} + +impl From for PlainMessage { + fn from(msg: Message) -> Self { + let typ = msg.payload.content_type(); + let payload = match msg.payload { + MessagePayload::ApplicationData(payload) => payload, + _ => { + let mut buf = Vec::new(); + msg.payload.encode(&mut buf); + Payload(buf) + } + }; + + Self { + typ, + version: msg.version, + payload, + } + } +} + +/// A decrypted TLS frame +/// +/// This type owns all memory for its interior parts. It can be decrypted from an OpaqueMessage +/// or encrypted into an OpaqueMessage, and it is also used for joining and fragmenting. +#[derive(Clone, Debug)] +pub struct PlainMessage { + pub typ: ContentType, + pub version: ProtocolVersion, + pub payload: Payload, +} + +impl PlainMessage { + pub fn into_unencrypted_opaque(self) -> OpaqueMessage { + OpaqueMessage { + version: self.version, + typ: self.typ, + payload: self.payload, + } + } + + pub fn borrow(&self) -> BorrowedPlainMessage<'_> { + BorrowedPlainMessage { + version: self.version, + typ: self.typ, + payload: &self.payload.0, + } + } +} + +/// A message with decoded payload +#[derive(Debug)] +pub struct Message { + pub version: ProtocolVersion, + pub payload: MessagePayload, +} + +impl Message { + pub fn is_handshake_type(&self, hstyp: HandshakeType) -> bool { + // Bit of a layering violation, but OK. + if let MessagePayload::Handshake { parsed, .. } = &self.payload { + parsed.typ == hstyp + } else { + false + } + } + + pub fn build_alert(level: AlertLevel, desc: AlertDescription) -> Self { + Self { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::Alert(AlertMessagePayload { + level, + description: desc, + }), + } + } + + pub fn build_key_update_notify() -> Self { + Self { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload::build_key_update_notify()), + } + } +} + +/// Parses a plaintext message into a well-typed [`Message`]. +/// +/// A [`PlainMessage`] must contain plaintext content. Encrypted content should be stored in an +/// [`OpaqueMessage`] and decrypted before being stored into a [`PlainMessage`]. +impl TryFrom for Message { + type Error = Error; + + fn try_from(plain: PlainMessage) -> Result { + Ok(Self { + version: plain.version, + payload: MessagePayload::new(plain.typ, plain.version, plain.payload)?, + }) + } +} + +/// A TLS frame, named TLSPlaintext in the standard. +/// +/// This type differs from `OpaqueMessage` because it borrows +/// its payload. You can make a `OpaqueMessage` from an +/// `BorrowMessage`, but this involves a copy. +/// +/// This type also cannot decode its internals and +/// cannot be read/encoded; only `OpaqueMessage` can do that. +pub struct BorrowedPlainMessage<'a> { + pub typ: ContentType, + pub version: ProtocolVersion, + pub payload: &'a [u8], +} + +impl<'a> BorrowedPlainMessage<'a> { + pub fn to_unencrypted_opaque(&self) -> OpaqueMessage { + OpaqueMessage { + version: self.version, + typ: self.typ, + payload: Payload(self.payload.to_vec()), + } + } +} + +#[derive(Debug)] +pub enum MessageError { + TooShortForHeader, + TooShortForLength, + IllegalLength, + IllegalContentType, + IllegalProtocolVersion, +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message_test.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message_test.rs new file mode 100644 index 0000000000000000000000000000000000000000..a50df90acc02e5abc7d6568db423adcb2d0768a4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/message_test.rs @@ -0,0 +1,113 @@ +use crate::msgs::base::{PayloadU16, PayloadU24, PayloadU8}; + +use super::base::Payload; +use super::codec::Reader; +use super::enums::{AlertDescription, AlertLevel, HandshakeType}; +use super::message::{Message, OpaqueMessage, PlainMessage}; + +use std::convert::TryFrom; +use std::fs; +use std::io::Read; +use std::path::{Path, PathBuf}; + +#[test] +fn test_read_fuzz_corpus() { + fn corpus_dir() -> PathBuf { + let from_subcrate = Path::new("../fuzz/corpus/message"); + let from_root = Path::new("fuzz/corpus/message"); + + if from_root.is_dir() { + from_root.to_path_buf() + } else { + from_subcrate.to_path_buf() + } + } + + for file in fs::read_dir(corpus_dir()).unwrap() { + let mut f = fs::File::open(file.unwrap().path()).unwrap(); + let mut bytes = Vec::new(); + f.read_to_end(&mut bytes).unwrap(); + + let mut rd = Reader::init(&bytes); + let msg = OpaqueMessage::read(&mut rd) + .unwrap() + .into_plain_message(); + println!("{:?}", msg); + + let msg = match Message::try_from(msg) { + Ok(msg) => msg, + Err(_) => continue, + }; + + let enc = PlainMessage::from(msg) + .into_unencrypted_opaque() + .encode(); + assert_eq!(bytes.to_vec(), enc); + assert_eq!(bytes[..rd.used()].to_vec(), enc); + } +} + +#[test] +fn can_read_safari_client_hello() { + let _ = env_logger::Builder::new() + .filter(None, log::LevelFilter::Trace) + .try_init(); + + let bytes = b"\ + \x16\x03\x01\x00\xeb\x01\x00\x00\xe7\x03\x03\xb6\x1f\xe4\x3a\x55\ + \x90\x3e\xc0\x28\x9c\x12\xe0\x5c\x84\xea\x90\x1b\xfb\x11\xfc\xbd\ + \x25\x55\xda\x9f\x51\x93\x1b\x8d\x92\x66\xfd\x00\x00\x2e\xc0\x2c\ + \xc0\x2b\xc0\x24\xc0\x23\xc0\x0a\xc0\x09\xcc\xa9\xc0\x30\xc0\x2f\ + \xc0\x28\xc0\x27\xc0\x14\xc0\x13\xcc\xa8\x00\x9d\x00\x9c\x00\x3d\ + \x00\x3c\x00\x35\x00\x2f\xc0\x08\xc0\x12\x00\x0a\x01\x00\x00\x90\ + \xff\x01\x00\x01\x00\x00\x00\x00\x0e\x00\x0c\x00\x00\x09\x31\x32\ + \x37\x2e\x30\x2e\x30\x2e\x31\x00\x17\x00\x00\x00\x0d\x00\x18\x00\ + \x16\x04\x03\x08\x04\x04\x01\x05\x03\x02\x03\x08\x05\x08\x05\x05\ + \x01\x08\x06\x06\x01\x02\x01\x00\x05\x00\x05\x01\x00\x00\x00\x00\ + \x33\x74\x00\x00\x00\x12\x00\x00\x00\x10\x00\x30\x00\x2e\x02\x68\ + \x32\x05\x68\x32\x2d\x31\x36\x05\x68\x32\x2d\x31\x35\x05\x68\x32\ + \x2d\x31\x34\x08\x73\x70\x64\x79\x2f\x33\x2e\x31\x06\x73\x70\x64\ + \x79\x2f\x33\x08\x68\x74\x74\x70\x2f\x31\x2e\x31\x00\x0b\x00\x02\ + \x01\x00\x00\x0a\x00\x0a\x00\x08\x00\x1d\x00\x17\x00\x18\x00\x19"; + let mut rd = Reader::init(bytes); + let m = OpaqueMessage::read(&mut rd).unwrap(); + println!("m = {:?}", m); + assert!(Message::try_from(m.into_plain_message()).is_err()); +} + +#[test] +fn alert_is_not_handshake() { + let m = Message::build_alert(AlertLevel::Fatal, AlertDescription::DecodeError); + assert!(!m.is_handshake_type(HandshakeType::ClientHello)); +} + +#[test] +fn alert_is_not_opaque() { + let m = Message::build_alert(AlertLevel::Fatal, AlertDescription::DecodeError); + assert!(Message::try_from(m).is_ok()); +} + +#[test] +fn construct_all_types() { + let samples = [ + &b"\x14\x03\x04\x00\x01\x01"[..], + &b"\x15\x03\x04\x00\x02\x01\x16"[..], + &b"\x16\x03\x04\x00\x05\x18\x00\x00\x01\x00"[..], + &b"\x17\x03\x04\x00\x04\x11\x22\x33\x44"[..], + &b"\x18\x03\x04\x00\x04\x11\x22\x33\x44"[..], + ]; + for &bytes in samples.iter() { + let m = OpaqueMessage::read(&mut Reader::init(bytes)).unwrap(); + println!("m = {:?}", m); + let m = Message::try_from(m.into_plain_message()); + println!("m' = {:?}", m); + } +} + +#[test] +fn debug_payload() { + assert_eq!("01020304", format!("{:?}", Payload(vec![1, 2, 3, 4]))); + assert_eq!("01020304", format!("{:?}", PayloadU8(vec![1, 2, 3, 4]))); + assert_eq!("01020304", format!("{:?}", PayloadU16(vec![1, 2, 3, 4]))); + assert_eq!("01020304", format!("{:?}", PayloadU24(vec![1, 2, 3, 4]))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..655ff13aaca0923cb52d4cded01d4087903e3a5e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/mod.rs @@ -0,0 +1,51 @@ +#![allow(clippy::upper_case_acronyms)] +#![allow(missing_docs)] + +#[macro_use] +mod macros; + +pub mod alert; +pub mod base; +pub mod ccs; +pub mod codec; +pub mod deframer; +pub mod enums; +pub mod fragmenter; +pub mod handshake; +pub mod hsjoiner; +pub mod message; +pub mod persist; + +#[cfg(test)] +mod handshake_test; + +#[cfg(test)] +mod persist_test; + +#[cfg(test)] +pub(crate) mod enums_test; + +#[cfg(test)] +mod message_test; + +#[cfg(test)] +mod test { + use std::convert::TryFrom; + + #[test] + fn smoketest() { + use super::codec::Reader; + use super::message::{Message, OpaqueMessage}; + let bytes = include_bytes!("handshake-test.1.bin"); + let mut r = Reader::init(bytes); + + while r.any_left() { + let m = OpaqueMessage::read(&mut r).unwrap(); + + let out = m.clone().encode(); + assert!(!out.is_empty()); + + Message::try_from(m.into_plain_message()).unwrap(); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist.rs new file mode 100644 index 0000000000000000000000000000000000000000..e9bcbda89ed737707f2ff649bd0f5888f9083f7c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist.rs @@ -0,0 +1,543 @@ +use crate::client::ServerName; +use crate::enums::{CipherSuite, ProtocolVersion}; +use crate::key; +use crate::msgs::base::{PayloadU16, PayloadU8}; +use crate::msgs::codec::{Codec, Reader}; +use crate::msgs::handshake::CertificatePayload; +use crate::msgs::handshake::SessionID; +use crate::suites::SupportedCipherSuite; +use crate::ticketer::TimeBase; +#[cfg(feature = "tls12")] +use crate::tls12::Tls12CipherSuite; +use crate::tls13::Tls13CipherSuite; + +use std::cmp; +#[cfg(feature = "tls12")] +use std::mem; + +// These are the keys and values we store in session storage. + +// --- Client types --- +/// Keys for session resumption and tickets. +/// Matching value is a `ClientSessionValue`. +#[derive(Debug)] +pub struct ClientSessionKey { + kind: &'static [u8], + name: Vec, +} + +impl Codec for ClientSessionKey { + fn encode(&self, bytes: &mut Vec) { + bytes.extend_from_slice(self.kind); + bytes.extend_from_slice(&self.name); + } + + // Don't need to read these. + fn read(_r: &mut Reader) -> Option { + None + } +} + +impl ClientSessionKey { + pub fn session_for_server_name(server_name: &ServerName) -> Self { + Self { + kind: b"session", + name: server_name.encode(), + } + } + + pub fn hint_for_server_name(server_name: &ServerName) -> Self { + Self { + kind: b"kx-hint", + name: server_name.encode(), + } + } +} + +#[derive(Debug)] +pub enum ClientSessionValue { + Tls13(Tls13ClientSessionValue), + #[cfg(feature = "tls12")] + Tls12(Tls12ClientSessionValue), +} + +impl ClientSessionValue { + pub fn read( + reader: &mut Reader<'_>, + suite: CipherSuite, + supported: &[SupportedCipherSuite], + ) -> Option { + match supported + .iter() + .find(|s| s.suite() == suite)? + { + SupportedCipherSuite::Tls13(inner) => { + Tls13ClientSessionValue::read(inner, reader).map(ClientSessionValue::Tls13) + } + #[cfg(feature = "tls12")] + SupportedCipherSuite::Tls12(inner) => { + Tls12ClientSessionValue::read(inner, reader).map(ClientSessionValue::Tls12) + } + } + } + + fn common(&self) -> &ClientSessionCommon { + match self { + Self::Tls13(inner) => &inner.common, + #[cfg(feature = "tls12")] + Self::Tls12(inner) => &inner.common, + } + } +} + +impl From for ClientSessionValue { + fn from(v: Tls13ClientSessionValue) -> Self { + Self::Tls13(v) + } +} + +#[cfg(feature = "tls12")] +impl From for ClientSessionValue { + fn from(v: Tls12ClientSessionValue) -> Self { + Self::Tls12(v) + } +} + +pub struct Retrieved { + pub value: T, + retrieved_at: TimeBase, +} + +impl Retrieved { + pub fn new(value: T, retrieved_at: TimeBase) -> Self { + Self { + value, + retrieved_at, + } + } +} + +impl Retrieved<&Tls13ClientSessionValue> { + pub fn obfuscated_ticket_age(&self) -> u32 { + let age_secs = self + .retrieved_at + .as_secs() + .saturating_sub(self.value.common.epoch); + let age_millis = age_secs as u32 * 1000; + age_millis.wrapping_add(self.value.age_add) + } +} + +impl Retrieved { + pub fn tls13(&self) -> Option> { + match &self.value { + ClientSessionValue::Tls13(value) => Some(Retrieved::new(value, self.retrieved_at)), + #[cfg(feature = "tls12")] + ClientSessionValue::Tls12(_) => None, + } + } + + pub fn has_expired(&self) -> bool { + let common = self.value.common(); + common.lifetime_secs != 0 + && common + .epoch + .saturating_add(u64::from(common.lifetime_secs)) + < self.retrieved_at.as_secs() + } +} + +impl std::ops::Deref for Retrieved { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.value + } +} + +#[derive(Debug)] +pub struct Tls13ClientSessionValue { + suite: &'static Tls13CipherSuite, + age_add: u32, + max_early_data_size: u32, + pub common: ClientSessionCommon, +} + +impl Tls13ClientSessionValue { + pub fn new( + suite: &'static Tls13CipherSuite, + ticket: Vec, + secret: Vec, + server_cert_chain: Vec, + time_now: TimeBase, + lifetime_secs: u32, + age_add: u32, + max_early_data_size: u32, + ) -> Self { + Self { + suite, + age_add, + max_early_data_size, + common: ClientSessionCommon::new( + ticket, + secret, + time_now, + lifetime_secs, + server_cert_chain, + ), + } + } + + /// [`Codec::read()`] with an extra `suite` argument. + /// + /// We decode the `suite` argument separately because it allows us to + /// decide whether we're decoding an 1.2 or 1.3 session value. + pub fn read(suite: &'static Tls13CipherSuite, r: &mut Reader) -> Option { + Some(Self { + suite, + age_add: u32::read(r)?, + max_early_data_size: u32::read(r)?, + common: ClientSessionCommon::read(r)?, + }) + } + + /// Inherent implementation of the [`Codec::get_encoding()`] method. + /// + /// (See `read()` for why this is inherent here.) + pub fn get_encoding(&self) -> Vec { + let mut bytes = Vec::with_capacity(16); + self.suite + .common + .suite + .encode(&mut bytes); + self.age_add.encode(&mut bytes); + self.max_early_data_size + .encode(&mut bytes); + self.common.encode(&mut bytes); + bytes + } + + pub fn max_early_data_size(&self) -> u32 { + self.max_early_data_size + } + + pub fn suite(&self) -> &'static Tls13CipherSuite { + self.suite + } +} + +impl std::ops::Deref for Tls13ClientSessionValue { + type Target = ClientSessionCommon; + + fn deref(&self) -> &Self::Target { + &self.common + } +} + +#[cfg(feature = "tls12")] +#[derive(Debug)] +pub struct Tls12ClientSessionValue { + suite: &'static Tls12CipherSuite, + pub session_id: SessionID, + extended_ms: bool, + pub common: ClientSessionCommon, +} + +#[cfg(feature = "tls12")] +impl Tls12ClientSessionValue { + pub fn new( + suite: &'static Tls12CipherSuite, + session_id: SessionID, + ticket: Vec, + master_secret: Vec, + server_cert_chain: Vec, + time_now: TimeBase, + lifetime_secs: u32, + extended_ms: bool, + ) -> Self { + Self { + suite, + session_id, + extended_ms, + common: ClientSessionCommon::new( + ticket, + master_secret, + time_now, + lifetime_secs, + server_cert_chain, + ), + } + } + + /// [`Codec::read()`] with an extra `suite` argument. + /// + /// We decode the `suite` argument separately because it allows us to + /// decide whether we're decoding an 1.2 or 1.3 session value. + fn read(suite: &'static Tls12CipherSuite, r: &mut Reader) -> Option { + Some(Self { + suite, + session_id: SessionID::read(r)?, + extended_ms: u8::read(r)? == 1, + common: ClientSessionCommon::read(r)?, + }) + } + + /// Inherent implementation of the [`Codec::get_encoding()`] method. + /// + /// (See `read()` for why this is inherent here.) + pub fn get_encoding(&self) -> Vec { + let mut bytes = Vec::with_capacity(16); + self.suite + .common + .suite + .encode(&mut bytes); + self.session_id.encode(&mut bytes); + (u8::from(self.extended_ms)).encode(&mut bytes); + self.common.encode(&mut bytes); + bytes + } + + pub fn take_ticket(&mut self) -> Vec { + mem::take(&mut self.common.ticket.0) + } + + pub fn extended_ms(&self) -> bool { + self.extended_ms + } + + pub fn suite(&self) -> &'static Tls12CipherSuite { + self.suite + } +} + +#[cfg(feature = "tls12")] +impl std::ops::Deref for Tls12ClientSessionValue { + type Target = ClientSessionCommon; + + fn deref(&self) -> &Self::Target { + &self.common + } +} + +#[derive(Debug)] +pub struct ClientSessionCommon { + ticket: PayloadU16, + secret: PayloadU8, + epoch: u64, + lifetime_secs: u32, + server_cert_chain: CertificatePayload, +} + +impl ClientSessionCommon { + fn new( + ticket: Vec, + secret: Vec, + time_now: TimeBase, + lifetime_secs: u32, + server_cert_chain: Vec, + ) -> Self { + Self { + ticket: PayloadU16(ticket), + secret: PayloadU8(secret), + epoch: time_now.as_secs(), + lifetime_secs: cmp::min(lifetime_secs, MAX_TICKET_LIFETIME), + server_cert_chain, + } + } + + /// [`Codec::read()`] is inherent here to avoid leaking the [`Codec`] + /// implementation through [`Deref`] implementations on + /// [`Tls12ClientSessionValue`] and [`Tls13ClientSessionValue`]. + fn read(r: &mut Reader) -> Option { + Some(Self { + ticket: PayloadU16::read(r)?, + secret: PayloadU8::read(r)?, + epoch: u64::read(r)?, + lifetime_secs: u32::read(r)?, + server_cert_chain: CertificatePayload::read(r)?, + }) + } + + /// [`Codec::encode()`] is inherent here to avoid leaking the [`Codec`] + /// implementation through [`Deref`] implementations on + /// [`Tls12ClientSessionValue`] and [`Tls13ClientSessionValue`]. + fn encode(&self, bytes: &mut Vec) { + self.ticket.encode(bytes); + self.secret.encode(bytes); + self.epoch.encode(bytes); + self.lifetime_secs.encode(bytes); + self.server_cert_chain.encode(bytes); + } + + pub fn server_cert_chain(&self) -> &[key::Certificate] { + self.server_cert_chain.as_ref() + } + + pub fn secret(&self) -> &[u8] { + self.secret.0.as_ref() + } + + pub fn ticket(&self) -> &[u8] { + self.ticket.0.as_ref() + } + + /// Test only: wind back epoch by delta seconds. + pub fn rewind_epoch(&mut self, delta: u32) { + self.epoch -= delta as u64; + } +} + +static MAX_TICKET_LIFETIME: u32 = 7 * 24 * 60 * 60; + +/// This is the maximum allowed skew between server and client clocks, over +/// the maximum ticket lifetime period. This encompasses TCP retransmission +/// times in case packet loss occurs when the client sends the ClientHello +/// or receives the NewSessionTicket, _and_ actual clock skew over this period. +static MAX_FRESHNESS_SKEW_MS: u32 = 60 * 1000; + +// --- Server types --- +pub type ServerSessionKey = SessionID; + +#[derive(Debug)] +pub struct ServerSessionValue { + pub sni: Option, + pub version: ProtocolVersion, + pub cipher_suite: CipherSuite, + pub master_secret: PayloadU8, + pub extended_ms: bool, + pub client_cert_chain: Option, + pub alpn: Option, + pub application_data: PayloadU16, + pub creation_time_sec: u64, + pub age_obfuscation_offset: u32, + freshness: Option, +} + +impl Codec for ServerSessionValue { + fn encode(&self, bytes: &mut Vec) { + if let Some(ref sni) = self.sni { + 1u8.encode(bytes); + let sni_bytes: &str = sni.as_ref().into(); + PayloadU8::new(Vec::from(sni_bytes)).encode(bytes); + } else { + 0u8.encode(bytes); + } + self.version.encode(bytes); + self.cipher_suite.encode(bytes); + self.master_secret.encode(bytes); + (u8::from(self.extended_ms)).encode(bytes); + if let Some(ref chain) = self.client_cert_chain { + 1u8.encode(bytes); + chain.encode(bytes); + } else { + 0u8.encode(bytes); + } + if let Some(ref alpn) = self.alpn { + 1u8.encode(bytes); + alpn.encode(bytes); + } else { + 0u8.encode(bytes); + } + self.application_data.encode(bytes); + self.creation_time_sec.encode(bytes); + self.age_obfuscation_offset + .encode(bytes); + } + + fn read(r: &mut Reader) -> Option { + let has_sni = u8::read(r)?; + let sni = if has_sni == 1 { + let dns_name = PayloadU8::read(r)?; + let dns_name = webpki::DnsNameRef::try_from_ascii(&dns_name.0).ok()?; + Some(dns_name.into()) + } else { + None + }; + let v = ProtocolVersion::read(r)?; + let cs = CipherSuite::read(r)?; + let ms = PayloadU8::read(r)?; + let ems = u8::read(r)?; + let has_ccert = u8::read(r)? == 1; + let ccert = if has_ccert { + Some(CertificatePayload::read(r)?) + } else { + None + }; + let has_alpn = u8::read(r)? == 1; + let alpn = if has_alpn { + Some(PayloadU8::read(r)?) + } else { + None + }; + let application_data = PayloadU16::read(r)?; + let creation_time_sec = u64::read(r)?; + let age_obfuscation_offset = u32::read(r)?; + + Some(Self { + sni, + version: v, + cipher_suite: cs, + master_secret: ms, + extended_ms: ems == 1u8, + client_cert_chain: ccert, + alpn, + application_data, + creation_time_sec, + age_obfuscation_offset, + freshness: None, + }) + } +} + +impl ServerSessionValue { + pub fn new( + sni: Option<&webpki::DnsName>, + v: ProtocolVersion, + cs: CipherSuite, + ms: Vec, + client_cert_chain: Option, + alpn: Option>, + application_data: Vec, + creation_time: TimeBase, + age_obfuscation_offset: u32, + ) -> Self { + Self { + sni: sni.cloned(), + version: v, + cipher_suite: cs, + master_secret: PayloadU8::new(ms), + extended_ms: false, + client_cert_chain, + alpn: alpn.map(PayloadU8::new), + application_data: PayloadU16::new(application_data), + creation_time_sec: creation_time.as_secs(), + age_obfuscation_offset, + freshness: None, + } + } + + pub fn set_extended_ms_used(&mut self) { + self.extended_ms = true; + } + + pub fn set_freshness(mut self, obfuscated_client_age_ms: u32, time_now: TimeBase) -> Self { + let client_age_ms = obfuscated_client_age_ms.wrapping_sub(self.age_obfuscation_offset); + let server_age_ms = (time_now + .as_secs() + .saturating_sub(self.creation_time_sec) as u32) + .saturating_mul(1000); + + let age_difference = if client_age_ms < server_age_ms { + server_age_ms - client_age_ms + } else { + client_age_ms - server_age_ms + }; + + self.freshness = Some(age_difference <= MAX_FRESHNESS_SKEW_MS); + self + } + + pub fn is_fresh(&self) -> bool { + self.freshness.unwrap_or_default() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist_test.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist_test.rs new file mode 100644 index 0000000000000000000000000000000000000000..c4a4165de9c7256a6a82efcee9ae8990a38fe71c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/msgs/persist_test.rs @@ -0,0 +1,78 @@ +use super::codec::{Codec, Reader}; +use super::persist::*; +use crate::enums::*; + +use crate::key::Certificate; +use crate::ticketer::TimeBase; +use crate::tls13::TLS13_AES_128_GCM_SHA256; + +use std::convert::TryInto; + +#[test] +fn clientsessionkey_is_debug() { + let name = "hello".try_into().unwrap(); + let csk = ClientSessionKey::session_for_server_name(&name); + println!("{:?}", csk); +} + +#[test] +fn clientsessionkey_cannot_be_read() { + let bytes = [0; 1]; + let mut rd = Reader::init(&bytes); + assert!(ClientSessionKey::read(&mut rd).is_none()); +} + +#[test] +fn clientsessionvalue_is_debug() { + let csv = ClientSessionValue::from(Tls13ClientSessionValue::new( + TLS13_AES_128_GCM_SHA256 + .tls13() + .unwrap(), + vec![], + vec![1, 2, 3], + vec![Certificate(b"abc".to_vec()), Certificate(b"def".to_vec())], + TimeBase::now().unwrap(), + 15, + 10, + 128, + )); + println!("{:?}", csv); +} + +#[test] +fn serversessionvalue_is_debug() { + let ssv = ServerSessionValue::new( + None, + ProtocolVersion::TLSv1_3, + CipherSuite::TLS13_AES_128_GCM_SHA256, + vec![1, 2, 3], + None, + None, + vec![4, 5, 6], + TimeBase::now().unwrap(), + 0x12345678, + ); + println!("{:?}", ssv); +} + +#[test] +fn serversessionvalue_no_sni() { + let bytes = [ + 0x00, 0x03, 0x03, 0xc0, 0x23, 0x03, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0xfe, 0xed, 0xf0, 0x0d, + ]; + let mut rd = Reader::init(&bytes); + let ssv = ServerSessionValue::read(&mut rd).unwrap(); + assert_eq!(ssv.get_encoding(), bytes); +} + +#[test] +fn serversessionvalue_with_cert() { + let bytes = [ + 0x00, 0x03, 0x03, 0xc0, 0x23, 0x03, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0xfe, 0xed, 0xf0, 0x0d, + ]; + let mut rd = Reader::init(&bytes); + let ssv = ServerSessionValue::read(&mut rd).unwrap(); + assert_eq!(ssv.get_encoding(), bytes); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/quic.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/quic.rs new file mode 100644 index 0000000000000000000000000000000000000000..1621495668f24bb5547b993f904b4c3c4ca87468 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/quic.rs @@ -0,0 +1,620 @@ +/// This module contains optional APIs for implementing QUIC TLS. +use crate::cipher::{Iv, IvLen}; +pub use crate::client::ClientQuicExt; +use crate::conn::CommonState; +use crate::error::Error; +use crate::msgs::enums::AlertDescription; +pub use crate::server::ServerQuicExt; +use crate::suites::BulkAlgorithm; +use crate::tls13::key_schedule::hkdf_expand; +use crate::tls13::{Tls13CipherSuite, TLS13_AES_128_GCM_SHA256_INTERNAL}; +use std::fmt::Debug; + +use ring::{aead, hkdf}; + +/// Secrets used to encrypt/decrypt traffic +#[derive(Clone, Debug)] +pub struct Secrets { + /// Secret used to encrypt packets transmitted by the client + client: hkdf::Prk, + /// Secret used to encrypt packets transmitted by the server + server: hkdf::Prk, + /// Cipher suite used with these secrets + suite: &'static Tls13CipherSuite, + is_client: bool, +} + +impl Secrets { + pub(crate) fn new( + client: hkdf::Prk, + server: hkdf::Prk, + suite: &'static Tls13CipherSuite, + is_client: bool, + ) -> Self { + Self { + client, + server, + suite, + is_client, + } + } + + /// Derive the next set of packet keys + pub fn next_packet_keys(&mut self) -> PacketKeySet { + let keys = PacketKeySet::new(self); + self.update(); + keys + } + + fn update(&mut self) { + let hkdf_alg = self.suite.hkdf_algorithm; + self.client = hkdf_expand(&self.client, hkdf_alg, b"quic ku", &[]); + self.server = hkdf_expand(&self.server, hkdf_alg, b"quic ku", &[]); + } + + fn local_remote(&self) -> (&hkdf::Prk, &hkdf::Prk) { + if self.is_client { + (&self.client, &self.server) + } else { + (&self.server, &self.client) + } + } +} + +/// Generic methods for QUIC sessions +pub trait QuicExt { + /// Return the TLS-encoded transport parameters for the session's peer. + /// + /// While the transport parameters are technically available prior to the + /// completion of the handshake, they cannot be fully trusted until the + /// handshake completes, and reliance on them should be minimized. + /// However, any tampering with the parameters will cause the handshake + /// to fail. + fn quic_transport_parameters(&self) -> Option<&[u8]>; + + /// Compute the keys for encrypting/decrypting 0-RTT packets, if available + fn zero_rtt_keys(&self) -> Option; + + /// Consume unencrypted TLS handshake data. + /// + /// Handshake data obtained from separate encryption levels should be supplied in separate calls. + fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), Error>; + + /// Emit unencrypted TLS handshake data. + /// + /// When this returns `Some(_)`, the new keys must be used for future handshake data. + fn write_hs(&mut self, buf: &mut Vec) -> Option; + + /// Emit the TLS description code of a fatal alert, if one has arisen. + /// + /// Check after `read_hs` returns `Err(_)`. + fn alert(&self) -> Option; +} + +/// Keys used to communicate in a single direction +pub struct DirectionalKeys { + /// Encrypts or decrypts a packet's headers + pub header: HeaderProtectionKey, + /// Encrypts or decrypts the payload of a packet + pub packet: PacketKey, +} + +impl DirectionalKeys { + pub(crate) fn new(suite: &'static Tls13CipherSuite, secret: &hkdf::Prk) -> Self { + Self { + header: HeaderProtectionKey::new(suite, secret), + packet: PacketKey::new(suite, secret), + } + } +} + +/// A QUIC header protection key +pub struct HeaderProtectionKey(aead::quic::HeaderProtectionKey); + +impl HeaderProtectionKey { + fn new(suite: &'static Tls13CipherSuite, secret: &hkdf::Prk) -> Self { + let alg = match suite.common.bulk { + BulkAlgorithm::Aes128Gcm => &aead::quic::AES_128, + BulkAlgorithm::Aes256Gcm => &aead::quic::AES_256, + BulkAlgorithm::Chacha20Poly1305 => &aead::quic::CHACHA20, + }; + + Self(hkdf_expand(secret, alg, b"quic hp", &[])) + } + + /// Adds QUIC Header Protection. + /// + /// `sample` must contain the sample of encrypted payload; see + /// [Header Protection Sample]. + /// + /// `first` must reference the first byte of the header, referred to as + /// `packet[0]` in [Header Protection Application]. + /// + /// `packet_number` must reference the Packet Number field; this is + /// `packet[pn_offset:pn_offset+pn_length]` in [Header Protection Application]. + /// + /// Returns an error without modifying anything if `sample` is not + /// the correct length (see [Header Protection Sample] and [`Self::sample_len()`]), + /// or `packet_number` is longer than allowed (see [Packet Number Encoding and Decoding]). + /// + /// Otherwise, `first` and `packet_number` will have the header protection added. + /// + /// [Header Protection Application]: https://datatracker.ietf.org/doc/html/rfc9001#section-5.4.1 + /// [Header Protection Sample]: https://datatracker.ietf.org/doc/html/rfc9001#section-5.4.2 + /// [Packet Number Encoding and Decoding]: https://datatracker.ietf.org/doc/html/rfc9000#section-17.1 + #[inline] + pub fn encrypt_in_place( + &self, + sample: &[u8], + first: &mut u8, + packet_number: &mut [u8], + ) -> Result<(), Error> { + self.xor_in_place(sample, first, packet_number, false) + } + + /// Removes QUIC Header Protection. + /// + /// `sample` must contain the sample of encrypted payload; see + /// [Header Protection Sample]. + /// + /// `first` must reference the first byte of the header, referred to as + /// `packet[0]` in [Header Protection Application]. + /// + /// `packet_number` must reference the Packet Number field; this is + /// `packet[pn_offset:pn_offset+pn_length]` in [Header Protection Application]. + /// + /// Returns an error without modifying anything if `sample` is not + /// the correct length (see [Header Protection Sample] and [`Self::sample_len()`]), + /// or `packet_number` is longer than allowed (see + /// [Packet Number Encoding and Decoding]). + /// + /// Otherwise, `first` and `packet_number` will have the header protection removed. + /// + /// [Header Protection Application]: https://datatracker.ietf.org/doc/html/rfc9001#section-5.4.1 + /// [Header Protection Sample]: https://datatracker.ietf.org/doc/html/rfc9001#section-5.4.2 + /// [Packet Number Encoding and Decoding]: https://datatracker.ietf.org/doc/html/rfc9000#section-17.1 + #[inline] + pub fn decrypt_in_place( + &self, + sample: &[u8], + first: &mut u8, + packet_number: &mut [u8], + ) -> Result<(), Error> { + self.xor_in_place(sample, first, packet_number, true) + } + + fn xor_in_place( + &self, + sample: &[u8], + first: &mut u8, + packet_number: &mut [u8], + masked: bool, + ) -> Result<(), Error> { + // This implements [Header Protection Application] almost verbatim. + + let mask = self + .0 + .new_mask(sample) + .map_err(|_| Error::General("sample of invalid length".into()))?; + + // The `unwrap()` will not panic because `new_mask` returns a + // non-empty result. + let (first_mask, pn_mask) = mask.split_first().unwrap(); + + // It is OK for the `mask` to be longer than `packet_number`, + // but a valid `packet_number` will never be longer than `mask`. + if packet_number.len() > pn_mask.len() { + return Err(Error::General("packet number too long".into())); + } + + // Infallible from this point on. Before this point, `first` and + // `packet_number` are unchanged. + + const LONG_HEADER_FORM: u8 = 0x80; + let bits = match *first & LONG_HEADER_FORM == LONG_HEADER_FORM { + true => 0x0f, // Long header: 4 bits masked + false => 0x1f, // Short header: 5 bits masked + }; + + let first_plain = match masked { + // When unmasking, use the packet length bits after unmasking + true => *first ^ (first_mask & bits), + // When masking, use the packet length bits before masking + false => *first, + }; + let pn_len = (first_plain & 0x03) as usize + 1; + + *first ^= first_mask & bits; + for (dst, m) in packet_number + .iter_mut() + .zip(pn_mask) + .take(pn_len) + { + *dst ^= m; + } + + Ok(()) + } + + /// Expected sample length for the key's algorithm + #[inline] + pub fn sample_len(&self) -> usize { + self.0.algorithm().sample_len() + } +} + +/// Keys to encrypt or decrypt the payload of a packet +pub struct PacketKey { + /// Encrypts or decrypts a packet's payload + key: aead::LessSafeKey, + /// Computes unique nonces for each packet + iv: Iv, + /// The cipher suite used for this packet key + suite: &'static Tls13CipherSuite, +} + +impl PacketKey { + fn new(suite: &'static Tls13CipherSuite, secret: &hkdf::Prk) -> Self { + Self { + key: aead::LessSafeKey::new(hkdf_expand( + secret, + suite.common.aead_algorithm, + b"quic key", + &[], + )), + iv: hkdf_expand(secret, IvLen, b"quic iv", &[]), + suite, + } + } + + /// Encrypt a QUIC packet + /// + /// Takes a `packet_number`, used to derive the nonce; the packet `header`, which is used as + /// the additional authenticated data; and the `payload`. The authentication tag is returned if + /// encryption succeeds. + /// + /// Fails iff the payload is longer than allowed by the cipher suite's AEAD algorithm. + pub fn encrypt_in_place( + &self, + packet_number: u64, + header: &[u8], + payload: &mut [u8], + ) -> Result { + let aad = aead::Aad::from(header); + let nonce = nonce_for(packet_number, &self.iv); + let tag = self + .key + .seal_in_place_separate_tag(nonce, aad, payload) + .map_err(|_| Error::EncryptError)?; + Ok(Tag(tag)) + } + + /// Decrypt a QUIC packet + /// + /// Takes the packet `header`, which is used as the additional authenticated data, and the + /// `payload`, which includes the authentication tag. + /// + /// If the return value is `Ok`, the decrypted payload can be found in `payload`, up to the + /// length found in the return value. + pub fn decrypt_in_place<'a>( + &self, + packet_number: u64, + header: &[u8], + payload: &'a mut [u8], + ) -> Result<&'a [u8], Error> { + let payload_len = payload.len(); + let aad = aead::Aad::from(header); + let nonce = nonce_for(packet_number, &self.iv); + self.key + .open_in_place(nonce, aad, payload) + .map_err(|_| Error::DecryptError)?; + + let plain_len = payload_len - self.key.algorithm().tag_len(); + Ok(&payload[..plain_len]) + } + + /// Number of times the packet key can be used without sacrificing confidentiality + /// + /// See . + #[inline] + pub fn confidentiality_limit(&self) -> u64 { + self.suite.confidentiality_limit + } + + /// Number of times the packet key can be used without sacrificing integrity + /// + /// See . + #[inline] + pub fn integrity_limit(&self) -> u64 { + self.suite.integrity_limit + } + + /// Tag length for the underlying AEAD algorithm + #[inline] + pub fn tag_len(&self) -> usize { + self.key.algorithm().tag_len() + } +} + +/// AEAD tag, must be appended to encrypted cipher text +pub struct Tag(aead::Tag); + +impl AsRef<[u8]> for Tag { + #[inline] + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +/// Packet protection keys for bidirectional 1-RTT communication +pub struct PacketKeySet { + /// Encrypts outgoing packets + pub local: PacketKey, + /// Decrypts incoming packets + pub remote: PacketKey, +} + +impl PacketKeySet { + fn new(secrets: &Secrets) -> Self { + let (local, remote) = secrets.local_remote(); + Self { + local: PacketKey::new(secrets.suite, local), + remote: PacketKey::new(secrets.suite, remote), + } + } +} + +/// Complete set of keys used to communicate with the peer +pub struct Keys { + /// Encrypts outgoing packets + pub local: DirectionalKeys, + /// Decrypts incoming packets + pub remote: DirectionalKeys, +} + +impl Keys { + /// Construct keys for use with initial packets + pub fn initial(version: Version, client_dst_connection_id: &[u8], is_client: bool) -> Self { + const CLIENT_LABEL: &[u8] = b"client in"; + const SERVER_LABEL: &[u8] = b"server in"; + let salt = version.initial_salt(); + let hs_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, salt).extract(client_dst_connection_id); + + let secrets = Secrets { + client: hkdf_expand(&hs_secret, hkdf::HKDF_SHA256, CLIENT_LABEL, &[]), + server: hkdf_expand(&hs_secret, hkdf::HKDF_SHA256, SERVER_LABEL, &[]), + suite: TLS13_AES_128_GCM_SHA256_INTERNAL, + is_client, + }; + Self::new(&secrets) + } + + fn new(secrets: &Secrets) -> Self { + let (local, remote) = secrets.local_remote(); + Self { + local: DirectionalKeys::new(secrets.suite, local), + remote: DirectionalKeys::new(secrets.suite, remote), + } + } +} + +pub(crate) fn write_hs(this: &mut CommonState, buf: &mut Vec) -> Option { + while let Some((_, msg)) = this.quic.hs_queue.pop_front() { + buf.extend_from_slice(&msg); + if let Some(&(true, _)) = this.quic.hs_queue.front() { + if this.quic.hs_secrets.is_some() { + // Allow the caller to switch keys before proceeding. + break; + } + } + } + + if let Some(secrets) = this.quic.hs_secrets.take() { + return Some(KeyChange::Handshake { + keys: Keys::new(&secrets), + }); + } + + if let Some(mut secrets) = this.quic.traffic_secrets.take() { + if !this.quic.returned_traffic_keys { + this.quic.returned_traffic_keys = true; + let keys = Keys::new(&secrets); + secrets.update(); + return Some(KeyChange::OneRtt { + keys, + next: secrets, + }); + } + } + + None +} + +/// Key material for use in QUIC packet spaces +/// +/// QUIC uses 4 different sets of keys (and progressive key updates for long-running connections): +/// +/// * Initial: these can be created from [`Keys::initial()`] +/// * 0-RTT keys: can be retrieved from [`QuicExt::zero_rtt_keys()`] +/// * Handshake: these are returned from [`QuicExt::write_hs()`] after `ClientHello` and +/// `ServerHello` messages have been exchanged +/// * 1-RTT keys: these are returned from [`QuicExt::write_hs()`] after the handshake is done +/// +/// Once the 1-RTT keys have been exchanged, either side may initiate a key update. Progressive +/// update keys can be obtained from the [`Secrets`] returned in [`KeyChange::OneRtt`]. Note that +/// only packet keys are updated by key updates; header protection keys remain the same. +#[allow(clippy::large_enum_variant)] +pub enum KeyChange { + /// Keys for the handshake space + Handshake { + /// Header and packet keys for the handshake space + keys: Keys, + }, + /// Keys for 1-RTT data + OneRtt { + /// Header and packet keys for 1-RTT data + keys: Keys, + /// Secrets to derive updated keys from + next: Secrets, + }, +} + +/// Compute the nonce to use for encrypting or decrypting `packet_number` +fn nonce_for(packet_number: u64, iv: &Iv) -> ring::aead::Nonce { + let mut out = [0; aead::NONCE_LEN]; + out[4..].copy_from_slice(&packet_number.to_be_bytes()); + for (out, inp) in out.iter_mut().zip(iv.0.iter()) { + *out ^= inp; + } + aead::Nonce::assume_unique_for_key(out) +} + +/// QUIC protocol version +/// +/// Governs version-specific behavior in the TLS layer +#[non_exhaustive] +#[derive(Clone, Copy, Debug)] +pub enum Version { + /// Draft versions 29, 30, 31 and 32 + V1Draft, + /// First stable RFC + V1, +} + +impl Version { + fn initial_salt(self) -> &'static [u8; 20] { + match self { + Self::V1Draft => &[ + // https://datatracker.ietf.org/doc/html/draft-ietf-quic-tls-32#section-5.2 + 0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, + 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99, + ], + Self::V1 => &[ + // https://www.rfc-editor.org/rfc/rfc9001.html#name-initial-secrets + 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, + 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a, + ], + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn short_packet_header_protection() { + // https://www.rfc-editor.org/rfc/rfc9001.html#name-chacha20-poly1305-short-hea + + const PN: u64 = 654360564; + const SECRET: &[u8] = &[ + 0x9a, 0xc3, 0x12, 0xa7, 0xf8, 0x77, 0x46, 0x8e, 0xbe, 0x69, 0x42, 0x27, 0x48, 0xad, + 0x00, 0xa1, 0x54, 0x43, 0xf1, 0x82, 0x03, 0xa0, 0x7d, 0x60, 0x60, 0xf6, 0x88, 0xf3, + 0x0f, 0x21, 0x63, 0x2b, + ]; + + let secret = hkdf::Prk::new_less_safe(hkdf::HKDF_SHA256, SECRET); + use crate::tls13::TLS13_CHACHA20_POLY1305_SHA256_INTERNAL; + let hpk = HeaderProtectionKey::new(TLS13_CHACHA20_POLY1305_SHA256_INTERNAL, &secret); + let packet = PacketKey::new(TLS13_CHACHA20_POLY1305_SHA256_INTERNAL, &secret); + + const PLAIN: &[u8] = &[0x42, 0x00, 0xbf, 0xf4, 0x01]; + + let mut buf = PLAIN.to_vec(); + let (header, payload) = buf.split_at_mut(4); + let tag = packet + .encrypt_in_place(PN, &*header, payload) + .unwrap(); + buf.extend(tag.as_ref()); + + let pn_offset = 1; + let (header, sample) = buf.split_at_mut(pn_offset + 4); + let (first, rest) = header.split_at_mut(1); + let sample = &sample[..hpk.sample_len()]; + hpk.encrypt_in_place(sample, &mut first[0], dbg!(rest)) + .unwrap(); + + const PROTECTED: &[u8] = &[ + 0x4c, 0xfe, 0x41, 0x89, 0x65, 0x5e, 0x5c, 0xd5, 0x5c, 0x41, 0xf6, 0x90, 0x80, 0x57, + 0x5d, 0x79, 0x99, 0xc2, 0x5a, 0x5b, 0xfb, + ]; + + assert_eq!(&buf, PROTECTED); + + let (header, sample) = buf.split_at_mut(pn_offset + 4); + let (first, rest) = header.split_at_mut(1); + let sample = &sample[..hpk.sample_len()]; + hpk.decrypt_in_place(sample, &mut first[0], rest) + .unwrap(); + + let (header, payload_tag) = buf.split_at_mut(4); + let plain = packet + .decrypt_in_place(PN, &*header, payload_tag) + .unwrap(); + + assert_eq!(plain, &PLAIN[4..]); + } + + #[test] + fn key_update_test_vector() { + fn equal_prk(x: &hkdf::Prk, y: &hkdf::Prk) -> bool { + let mut x_data = [0; 16]; + let mut y_data = [0; 16]; + let x_okm = x + .expand(&[b"info"], &aead::quic::AES_128) + .unwrap(); + x_okm.fill(&mut x_data[..]).unwrap(); + let y_okm = y + .expand(&[b"info"], &aead::quic::AES_128) + .unwrap(); + y_okm.fill(&mut y_data[..]).unwrap(); + x_data == y_data + } + + let mut secrets = Secrets { + // Constant dummy values for reproducibility + client: hkdf::Prk::new_less_safe( + hkdf::HKDF_SHA256, + &[ + 0xb8, 0x76, 0x77, 0x08, 0xf8, 0x77, 0x23, 0x58, 0xa6, 0xea, 0x9f, 0xc4, 0x3e, + 0x4a, 0xdd, 0x2c, 0x96, 0x1b, 0x3f, 0x52, 0x87, 0xa6, 0xd1, 0x46, 0x7e, 0xe0, + 0xae, 0xab, 0x33, 0x72, 0x4d, 0xbf, + ], + ), + server: hkdf::Prk::new_less_safe( + hkdf::HKDF_SHA256, + &[ + 0x42, 0xdc, 0x97, 0x21, 0x40, 0xe0, 0xf2, 0xe3, 0x98, 0x45, 0xb7, 0x67, 0x61, + 0x34, 0x39, 0xdc, 0x67, 0x58, 0xca, 0x43, 0x25, 0x9b, 0x87, 0x85, 0x06, 0x82, + 0x4e, 0xb1, 0xe4, 0x38, 0xd8, 0x55, + ], + ), + suite: TLS13_AES_128_GCM_SHA256_INTERNAL, + is_client: true, + }; + secrets.update(); + + assert!(equal_prk( + &secrets.client, + &hkdf::Prk::new_less_safe( + hkdf::HKDF_SHA256, + &[ + 0x42, 0xca, 0xc8, 0xc9, 0x1c, 0xd5, 0xeb, 0x40, 0x68, 0x2e, 0x43, 0x2e, 0xdf, + 0x2d, 0x2b, 0xe9, 0xf4, 0x1a, 0x52, 0xca, 0x6b, 0x22, 0xd8, 0xe6, 0xcd, 0xb1, + 0xe8, 0xac, 0xa9, 0x6, 0x1f, 0xce + ] + ) + )); + assert!(equal_prk( + &secrets.server, + &hkdf::Prk::new_less_safe( + hkdf::HKDF_SHA256, + &[ + 0xeb, 0x7f, 0x5e, 0x2a, 0x12, 0x3f, 0x40, 0x7d, 0xb4, 0x99, 0xe3, 0x61, 0xca, + 0xe5, 0x90, 0xd4, 0xd9, 0x92, 0xe1, 0x4b, 0x7a, 0xce, 0x3, 0xc2, 0x44, 0xe0, + 0x42, 0x21, 0x15, 0xb6, 0xd3, 0x8a + ] + ) + )); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/rand.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/rand.rs new file mode 100644 index 0000000000000000000000000000000000000000..14cde00a6e68e1cf7030b0c1b504ba49209c1b0f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/rand.rs @@ -0,0 +1,30 @@ +use crate::msgs::codec; +/// The single place where we generate random material +/// for our own use. These functions never fail, +/// they panic on error. +use ring::rand::{SecureRandom, SystemRandom}; + +/// Fill the whole slice with random material. +pub(crate) fn fill_random(bytes: &mut [u8]) -> Result<(), GetRandomFailed> { + SystemRandom::new() + .fill(bytes) + .map_err(|_| GetRandomFailed) +} + +/// Make a Vec of the given size +/// containing random material. +pub(crate) fn random_vec(len: usize) -> Result, GetRandomFailed> { + let mut v = vec![0; len]; + fill_random(&mut v)?; + Ok(v) +} + +/// Return a uniformly random u32. +pub(crate) fn random_u32() -> Result { + let mut buf = [0u8; 4]; + fill_random(&mut buf)?; + codec::decode_u32(&buf).ok_or(GetRandomFailed) +} + +#[derive(Debug)] +pub struct GetRandomFailed; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/record_layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/record_layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..e525772537a3330b30c80a74f1de7a75ce466c19 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/record_layer.rs @@ -0,0 +1,192 @@ +use crate::cipher::{MessageDecrypter, MessageEncrypter}; +use crate::error::Error; +use crate::msgs::message::{BorrowedPlainMessage, OpaqueMessage, PlainMessage}; + +static SEQ_SOFT_LIMIT: u64 = 0xffff_ffff_ffff_0000u64; +static SEQ_HARD_LIMIT: u64 = 0xffff_ffff_ffff_fffeu64; + +#[derive(PartialEq)] +enum DirectionState { + /// No keying material. + Invalid, + + /// Keying material present, but not yet in use. + Prepared, + + /// Keying material in use. + Active, +} + +pub(crate) struct RecordLayer { + message_encrypter: Box, + message_decrypter: Box, + write_seq: u64, + read_seq: u64, + encrypt_state: DirectionState, + decrypt_state: DirectionState, + + // Message encrypted with other keys may be encountered, so failures + // should be swallowed by the caller. This struct tracks the amount + // of message size this is allowed for. + trial_decryption_len: Option, +} + +impl RecordLayer { + pub(crate) fn new() -> Self { + Self { + message_encrypter: ::invalid(), + message_decrypter: ::invalid(), + write_seq: 0, + read_seq: 0, + encrypt_state: DirectionState::Invalid, + decrypt_state: DirectionState::Invalid, + trial_decryption_len: None, + } + } + + pub(crate) fn is_encrypting(&self) -> bool { + self.encrypt_state == DirectionState::Active + } + + pub(crate) fn is_decrypting(&self) -> bool { + self.decrypt_state == DirectionState::Active + } + + #[cfg(feature = "secret_extraction")] + pub(crate) fn write_seq(&self) -> u64 { + self.write_seq + } + + #[cfg(feature = "secret_extraction")] + pub(crate) fn read_seq(&self) -> u64 { + self.read_seq + } + + pub(crate) fn doing_trial_decryption(&mut self, requested: usize) -> bool { + match self + .trial_decryption_len + .and_then(|value| value.checked_sub(requested)) + { + Some(remaining) => { + self.trial_decryption_len = Some(remaining); + true + } + _ => false, + } + } + + /// Prepare to use the given `MessageEncrypter` for future message encryption. + /// It is not used until you call `start_encrypting`. + pub(crate) fn prepare_message_encrypter(&mut self, cipher: Box) { + self.message_encrypter = cipher; + self.write_seq = 0; + self.encrypt_state = DirectionState::Prepared; + } + + /// Prepare to use the given `MessageDecrypter` for future message decryption. + /// It is not used until you call `start_decrypting`. + pub(crate) fn prepare_message_decrypter(&mut self, cipher: Box) { + self.message_decrypter = cipher; + self.read_seq = 0; + self.decrypt_state = DirectionState::Prepared; + } + + /// Start using the `MessageEncrypter` previously provided to the previous + /// call to `prepare_message_encrypter`. + pub(crate) fn start_encrypting(&mut self) { + debug_assert!(self.encrypt_state == DirectionState::Prepared); + self.encrypt_state = DirectionState::Active; + } + + /// Start using the `MessageDecrypter` previously provided to the previous + /// call to `prepare_message_decrypter`. + pub(crate) fn start_decrypting(&mut self) { + debug_assert!(self.decrypt_state == DirectionState::Prepared); + self.decrypt_state = DirectionState::Active; + } + + /// Set and start using the given `MessageEncrypter` for future outgoing + /// message encryption. + pub(crate) fn set_message_encrypter(&mut self, cipher: Box) { + self.prepare_message_encrypter(cipher); + self.start_encrypting(); + } + + /// Set and start using the given `MessageDecrypter` for future incoming + /// message decryption. + pub(crate) fn set_message_decrypter(&mut self, cipher: Box) { + self.prepare_message_decrypter(cipher); + self.start_decrypting(); + self.trial_decryption_len = None; + } + + /// Set and start using the given `MessageDecrypter` for future incoming + /// message decryption, and enable "trial decryption" mode for when TLS1.3 + /// 0-RTT is attempted but rejected by the server. + pub(crate) fn set_message_decrypter_with_trial_decryption( + &mut self, + cipher: Box, + max_length: usize, + ) { + self.prepare_message_decrypter(cipher); + self.start_decrypting(); + self.trial_decryption_len = Some(max_length); + } + + pub(crate) fn finish_trial_decryption(&mut self) { + self.trial_decryption_len = None; + } + + /// Return true if the peer appears to getting close to encrypting + /// too many messages with this key. + /// + /// Perhaps if we send an alert well before their counter wraps, a + /// buggy peer won't make a terrible mistake here? + /// + /// Note that there's no reason to refuse to decrypt: the security + /// failure has already happened. + pub(crate) fn wants_close_before_decrypt(&self) -> bool { + self.read_seq == SEQ_SOFT_LIMIT + } + + /// Return true if we are getting close to encrypting too many + /// messages with our encryption key. + pub(crate) fn wants_close_before_encrypt(&self) -> bool { + self.write_seq == SEQ_SOFT_LIMIT + } + + /// Return true if we outright refuse to do anything with the + /// encryption key. + pub(crate) fn encrypt_exhausted(&self) -> bool { + self.write_seq >= SEQ_HARD_LIMIT + } + + /// Decrypt a TLS message. + /// + /// `encr` is a decoded message allegedly received from the peer. + /// If it can be decrypted, its decryption is returned. Otherwise, + /// an error is returned. + pub(crate) fn decrypt_incoming(&mut self, encr: OpaqueMessage) -> Result { + debug_assert!(self.is_decrypting()); + let seq = self.read_seq; + let msg = self + .message_decrypter + .decrypt(encr, seq)?; + self.read_seq += 1; + Ok(msg) + } + + /// Encrypt a TLS message. + /// + /// `plain` is a TLS message we'd like to send. This function + /// panics if the requisite keying material hasn't been established yet. + pub(crate) fn encrypt_outgoing(&mut self, plain: BorrowedPlainMessage) -> OpaqueMessage { + debug_assert!(self.encrypt_state == DirectionState::Active); + assert!(!self.encrypt_exhausted()); + let seq = self.write_seq; + self.write_seq += 1; + self.message_encrypter + .encrypt(plain, seq) + .unwrap() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/builder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..500adbc5aa08e864df17a884a414007e5cd53959 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/builder.rs @@ -0,0 +1,116 @@ +use crate::builder::{ConfigBuilder, WantsVerifier}; +use crate::error::Error; +use crate::key; +use crate::kx::SupportedKxGroup; +use crate::server::handy; +use crate::server::{ResolvesServerCert, ServerConfig}; +use crate::suites::SupportedCipherSuite; +use crate::verify; +use crate::versions; +use crate::NoKeyLog; + +use std::marker::PhantomData; +use std::sync::Arc; + +impl ConfigBuilder { + /// Choose how to verify client certificates. + pub fn with_client_cert_verifier( + self, + client_cert_verifier: Arc, + ) -> ConfigBuilder { + ConfigBuilder { + state: WantsServerCert { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + versions: self.state.versions, + verifier: client_cert_verifier, + }, + side: PhantomData, + } + } + + /// Disable client authentication. + pub fn with_no_client_auth(self) -> ConfigBuilder { + self.with_client_cert_verifier(verify::NoClientAuth::new()) + } +} + +/// A config builder state where the caller must supply how to provide a server certificate to +/// the connecting peer. +/// +/// For more information, see the [`ConfigBuilder`] documentation. +#[derive(Clone, Debug)] +pub struct WantsServerCert { + cipher_suites: Vec, + kx_groups: Vec<&'static SupportedKxGroup>, + versions: versions::EnabledVersions, + verifier: Arc, +} + +impl ConfigBuilder { + /// Sets a single certificate chain and matching private key. This + /// certificate and key is used for all subsequent connections, + /// irrespective of things like SNI hostname. + /// + /// Note that the end-entity certificate must have the + /// [Subject Alternative Name](https://tools.ietf.org/html/rfc6125#section-4.1) + /// extension to describe, e.g., the valid DNS name. The `commonName` field is + /// disregarded. + /// + /// `cert_chain` is a vector of DER-encoded certificates. + /// `key_der` is a DER-encoded RSA, ECDSA, or Ed25519 private key. + /// + /// This function fails if `key_der` is invalid. + pub fn with_single_cert( + self, + cert_chain: Vec, + key_der: key::PrivateKey, + ) -> Result { + let resolver = handy::AlwaysResolvesChain::new(cert_chain, &key_der)?; + Ok(self.with_cert_resolver(Arc::new(resolver))) + } + + /// Sets a single certificate chain, matching private key, OCSP + /// response and SCTs. This certificate and key is used for all + /// subsequent connections, irrespective of things like SNI hostname. + /// + /// `cert_chain` is a vector of DER-encoded certificates. + /// `key_der` is a DER-encoded RSA, ECDSA, or Ed25519 private key. + /// `ocsp` is a DER-encoded OCSP response. Ignored if zero length. + /// `scts` is an `SignedCertificateTimestampList` encoding (see RFC6962) + /// and is ignored if empty. + /// + /// This function fails if `key_der` is invalid. + pub fn with_single_cert_with_ocsp_and_sct( + self, + cert_chain: Vec, + key_der: key::PrivateKey, + ocsp: Vec, + scts: Vec, + ) -> Result { + let resolver = + handy::AlwaysResolvesChain::new_with_extras(cert_chain, &key_der, ocsp, scts)?; + Ok(self.with_cert_resolver(Arc::new(resolver))) + } + + /// Sets a custom [`ResolvesServerCert`]. + pub fn with_cert_resolver(self, cert_resolver: Arc) -> ServerConfig { + ServerConfig { + cipher_suites: self.state.cipher_suites, + kx_groups: self.state.kx_groups, + verifier: self.state.verifier, + cert_resolver, + ignore_client_order: false, + max_fragment_size: None, + session_storage: handy::ServerSessionMemoryCache::new(256), + ticketer: Arc::new(handy::NeverProducesTickets {}), + alpn_protocols: Vec::new(), + versions: self.state.versions, + key_log: Arc::new(NoKeyLog {}), + #[cfg(feature = "secret_extraction")] + enable_secret_extraction: false, + max_early_data_size: 0, + send_half_rtt_data: false, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/common.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/common.rs new file mode 100644 index 0000000000000000000000000000000000000000..2e3420c2f5d23038498c318fe0ff59dbe5280102 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/server/common.rs @@ -0,0 +1,41 @@ +use crate::{key, sign}; + +/// ActiveCertifiedKey wraps CertifiedKey and tracks OSCP and SCT state +/// in a single handshake. +pub(super) struct ActiveCertifiedKey<'a> { + key: &'a sign::CertifiedKey, + ocsp: Option<&'a [u8]>, + sct_list: Option<&'a [u8]>, +} + +impl<'a> ActiveCertifiedKey<'a> { + pub(super) fn from_certified_key(key: &sign::CertifiedKey) -> ActiveCertifiedKey { + ActiveCertifiedKey { + key, + ocsp: key.ocsp.as_deref(), + sct_list: key.sct_list.as_deref(), + } + } + + /// Get the certificate chain + #[inline] + pub(super) fn get_cert(&self) -> &[key::Certificate] { + &self.key.cert + } + + /// Get the signing key + #[inline] + pub(super) fn get_key(&self) -> &dyn sign::SigningKey { + &*self.key.key + } + + #[inline] + pub(super) fn get_ocsp(&self) -> Option<&[u8]> { + self.ocsp + } + + #[inline] + pub(super) fn get_sct_list(&self) -> Option<&[u8]> { + self.sct_list + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/sign.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/sign.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c916f69108d729e6c85df026da880c001ea43ac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/sign.rs @@ -0,0 +1,523 @@ +use crate::enums::SignatureScheme; +use crate::error::Error; +use crate::key; +use crate::msgs::enums::SignatureAlgorithm; +use crate::x509::{wrap_in_asn1_len, wrap_in_sequence}; + +use ring::io::der; +use ring::signature::{self, EcdsaKeyPair, Ed25519KeyPair, RsaKeyPair}; + +use std::convert::TryFrom; +use std::error::Error as StdError; +use std::fmt; +use std::sync::Arc; + +/// An abstract signing key. +pub trait SigningKey: Send + Sync { + /// Choose a `SignatureScheme` from those offered. + /// + /// Expresses the choice by returning something that implements `Signer`, + /// using the chosen scheme. + fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option>; + + /// What kind of key we have. + fn algorithm(&self) -> SignatureAlgorithm; +} + +/// A thing that can sign a message. +pub trait Signer: Send + Sync { + /// Signs `message` using the selected scheme. + fn sign(&self, message: &[u8]) -> Result, Error>; + + /// Reveals which scheme will be used when you call `sign()`. + fn scheme(&self) -> SignatureScheme; +} + +/// A packaged-together certificate chain, matching `SigningKey` and +/// optional stapled OCSP response and/or SCT list. +#[derive(Clone)] +pub struct CertifiedKey { + /// The certificate chain. + pub cert: Vec, + + /// The certified key. + pub key: Arc, + + /// An optional OCSP response from the certificate issuer, + /// attesting to its continued validity. + pub ocsp: Option>, + + /// An optional collection of SCTs from CT logs, proving the + /// certificate is included on those logs. This must be + /// a `SignedCertificateTimestampList` encoding; see RFC6962. + pub sct_list: Option>, +} + +impl CertifiedKey { + /// Make a new CertifiedKey, with the given chain and key. + /// + /// The cert chain must not be empty. The first certificate in the chain + /// must be the end-entity certificate. + pub fn new(cert: Vec, key: Arc) -> Self { + Self { + cert, + key, + ocsp: None, + sct_list: None, + } + } + + /// The end-entity certificate. + pub fn end_entity_cert(&self) -> Result<&key::Certificate, SignError> { + self.cert.get(0).ok_or(SignError(())) + } + + /// Check the certificate chain for validity: + /// - it should be non-empty list + /// - the first certificate should be parsable as a x509v3, + /// - the first certificate should quote the given server name + /// (if provided) + /// + /// These checks are not security-sensitive. They are the + /// *server* attempting to detect accidental misconfiguration. + pub(crate) fn cross_check_end_entity_cert( + &self, + name: Option, + ) -> Result<(), Error> { + // Always reject an empty certificate chain. + let end_entity_cert = self + .end_entity_cert() + .map_err(|SignError(())| { + Error::General("No end-entity certificate in certificate chain".to_string()) + })?; + + // Reject syntactically-invalid end-entity certificates. + let end_entity_cert = + webpki::EndEntityCert::try_from(end_entity_cert.as_ref()).map_err(|_| { + Error::General( + "End-entity certificate in certificate \ + chain is syntactically invalid" + .to_string(), + ) + })?; + + if let Some(name) = name { + // If SNI was offered then the certificate must be valid for + // that hostname. Note that this doesn't fully validate that the + // certificate is valid; it only validates that the name is one + // that the certificate is valid for, if the certificate is + // valid. + if end_entity_cert + .verify_is_valid_for_dns_name(name) + .is_err() + { + return Err(Error::General( + "The server certificate is not \ + valid for the given name" + .to_string(), + )); + } + } + + Ok(()) + } +} + +/// Parse `der` as any supported key encoding/type, returning +/// the first which works. +pub fn any_supported_type(der: &key::PrivateKey) -> Result, SignError> { + if let Ok(rsa) = RsaSigningKey::new(der) { + Ok(Arc::new(rsa)) + } else if let Ok(ecdsa) = any_ecdsa_type(der) { + Ok(ecdsa) + } else { + any_eddsa_type(der) + } +} + +/// Parse `der` as any ECDSA key type, returning the first which works. +/// +/// Both SEC1 (PEM section starting with 'BEGIN EC PRIVATE KEY') and PKCS8 +/// (PEM section starting with 'BEGIN PRIVATE KEY') encodings are supported. +pub fn any_ecdsa_type(der: &key::PrivateKey) -> Result, SignError> { + if let Ok(ecdsa_p256) = EcdsaSigningKey::new( + der, + SignatureScheme::ECDSA_NISTP256_SHA256, + &signature::ECDSA_P256_SHA256_ASN1_SIGNING, + ) { + return Ok(Arc::new(ecdsa_p256)); + } + + if let Ok(ecdsa_p384) = EcdsaSigningKey::new( + der, + SignatureScheme::ECDSA_NISTP384_SHA384, + &signature::ECDSA_P384_SHA384_ASN1_SIGNING, + ) { + return Ok(Arc::new(ecdsa_p384)); + } + + Err(SignError(())) +} + +/// Parse `der` as any EdDSA key type, returning the first which works. +pub fn any_eddsa_type(der: &key::PrivateKey) -> Result, SignError> { + if let Ok(ed25519) = Ed25519SigningKey::new(der, SignatureScheme::ED25519) { + return Ok(Arc::new(ed25519)); + } + + // TODO: Add support for Ed448 + + Err(SignError(())) +} + +/// A `SigningKey` for RSA-PKCS1 or RSA-PSS. +/// +/// This is used by the test suite, so it must be `pub`, but it isn't part of +/// the public, stable, API. +#[doc(hidden)] +pub struct RsaSigningKey { + key: Arc, +} + +static ALL_RSA_SCHEMES: &[SignatureScheme] = &[ + SignatureScheme::RSA_PSS_SHA512, + SignatureScheme::RSA_PSS_SHA384, + SignatureScheme::RSA_PSS_SHA256, + SignatureScheme::RSA_PKCS1_SHA512, + SignatureScheme::RSA_PKCS1_SHA384, + SignatureScheme::RSA_PKCS1_SHA256, +]; + +impl RsaSigningKey { + /// Make a new `RsaSigningKey` from a DER encoding, in either + /// PKCS#1 or PKCS#8 format. + pub fn new(der: &key::PrivateKey) -> Result { + RsaKeyPair::from_der(&der.0) + .or_else(|_| RsaKeyPair::from_pkcs8(&der.0)) + .map(|s| Self { key: Arc::new(s) }) + .map_err(|_| SignError(())) + } +} + +impl SigningKey for RsaSigningKey { + fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option> { + ALL_RSA_SCHEMES + .iter() + .find(|scheme| offered.contains(scheme)) + .map(|scheme| RsaSigner::new(Arc::clone(&self.key), *scheme)) + } + + fn algorithm(&self) -> SignatureAlgorithm { + SignatureAlgorithm::RSA + } +} + +#[allow(clippy::upper_case_acronyms)] +#[doc(hidden)] +#[deprecated(since = "0.20.0", note = "Use RsaSigningKey")] +pub type RSASigningKey = RsaSigningKey; + +struct RsaSigner { + key: Arc, + scheme: SignatureScheme, + encoding: &'static dyn signature::RsaEncoding, +} + +impl RsaSigner { + fn new(key: Arc, scheme: SignatureScheme) -> Box { + let encoding: &dyn signature::RsaEncoding = match scheme { + SignatureScheme::RSA_PKCS1_SHA256 => &signature::RSA_PKCS1_SHA256, + SignatureScheme::RSA_PKCS1_SHA384 => &signature::RSA_PKCS1_SHA384, + SignatureScheme::RSA_PKCS1_SHA512 => &signature::RSA_PKCS1_SHA512, + SignatureScheme::RSA_PSS_SHA256 => &signature::RSA_PSS_SHA256, + SignatureScheme::RSA_PSS_SHA384 => &signature::RSA_PSS_SHA384, + SignatureScheme::RSA_PSS_SHA512 => &signature::RSA_PSS_SHA512, + _ => unreachable!(), + }; + + Box::new(Self { + key, + scheme, + encoding, + }) + } +} + +impl Signer for RsaSigner { + fn sign(&self, message: &[u8]) -> Result, Error> { + let mut sig = vec![0; self.key.public_modulus_len()]; + + let rng = ring::rand::SystemRandom::new(); + self.key + .sign(self.encoding, &rng, message, &mut sig) + .map(|_| sig) + .map_err(|_| Error::General("signing failed".to_string())) + } + + fn scheme(&self) -> SignatureScheme { + self.scheme + } +} + +/// A SigningKey that uses exactly one TLS-level SignatureScheme +/// and one ring-level signature::SigningAlgorithm. +/// +/// Compare this to RsaSigningKey, which for a particular key is +/// willing to sign with several algorithms. This is quite poor +/// cryptography practice, but is necessary because a given RSA key +/// is expected to work in TLS1.2 (PKCS#1 signatures) and TLS1.3 +/// (PSS signatures) -- nobody is willing to obtain certificates for +/// different protocol versions. +/// +/// Currently this is only implemented for ECDSA keys. +struct EcdsaSigningKey { + key: Arc, + scheme: SignatureScheme, +} + +impl EcdsaSigningKey { + /// Make a new `ECDSASigningKey` from a DER encoding in PKCS#8 or SEC1 + /// format, expecting a key usable with precisely the given signature + /// scheme. + fn new( + der: &key::PrivateKey, + scheme: SignatureScheme, + sigalg: &'static signature::EcdsaSigningAlgorithm, + ) -> Result { + EcdsaKeyPair::from_pkcs8(sigalg, &der.0) + .map_err(|_| ()) + .or_else(|_| Self::convert_sec1_to_pkcs8(scheme, sigalg, &der.0)) + .map(|kp| Self { + key: Arc::new(kp), + scheme, + }) + } + + /// Convert a SEC1 encoding to PKCS8, and ask ring to parse it. This + /// can be removed once https://github.com/briansmith/ring/pull/1456 + /// (or equivalent) is landed. + fn convert_sec1_to_pkcs8( + scheme: SignatureScheme, + sigalg: &'static signature::EcdsaSigningAlgorithm, + maybe_sec1_der: &[u8], + ) -> Result { + let pkcs8_prefix = match scheme { + SignatureScheme::ECDSA_NISTP256_SHA256 => &PKCS8_PREFIX_ECDSA_NISTP256, + SignatureScheme::ECDSA_NISTP384_SHA384 => &PKCS8_PREFIX_ECDSA_NISTP384, + _ => unreachable!(), // all callers are in this file + }; + + // wrap sec1 encoding in an OCTET STRING + let mut sec1_wrap = Vec::with_capacity(maybe_sec1_der.len() + 8); + sec1_wrap.extend_from_slice(maybe_sec1_der); + wrap_in_asn1_len(&mut sec1_wrap); + sec1_wrap.insert(0, der::Tag::OctetString as u8); + + let mut pkcs8 = Vec::with_capacity(pkcs8_prefix.len() + sec1_wrap.len() + 4); + pkcs8.extend_from_slice(pkcs8_prefix); + pkcs8.extend_from_slice(&sec1_wrap); + wrap_in_sequence(&mut pkcs8); + + EcdsaKeyPair::from_pkcs8(sigalg, &pkcs8).map_err(|_| ()) + } +} + +// This is (line-by-line): +// - INTEGER Version = 0 +// - SEQUENCE (privateKeyAlgorithm) +// - id-ecPublicKey OID +// - prime256v1 OID +const PKCS8_PREFIX_ECDSA_NISTP256: &[u8] = b"\x02\x01\x00\ + \x30\x13\ + \x06\x07\x2a\x86\x48\xce\x3d\x02\x01\ + \x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07"; + +// This is (line-by-line): +// - INTEGER Version = 0 +// - SEQUENCE (privateKeyAlgorithm) +// - id-ecPublicKey OID +// - secp384r1 OID +const PKCS8_PREFIX_ECDSA_NISTP384: &[u8] = b"\x02\x01\x00\ + \x30\x10\ + \x06\x07\x2a\x86\x48\xce\x3d\x02\x01\ + \x06\x05\x2b\x81\x04\x00\x22"; + +impl SigningKey for EcdsaSigningKey { + fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option> { + if offered.contains(&self.scheme) { + Some(Box::new(EcdsaSigner { + key: Arc::clone(&self.key), + scheme: self.scheme, + })) + } else { + None + } + } + + fn algorithm(&self) -> SignatureAlgorithm { + use crate::msgs::handshake::DecomposedSignatureScheme; + self.scheme.sign() + } +} + +struct EcdsaSigner { + key: Arc, + scheme: SignatureScheme, +} + +impl Signer for EcdsaSigner { + fn sign(&self, message: &[u8]) -> Result, Error> { + let rng = ring::rand::SystemRandom::new(); + self.key + .sign(&rng, message) + .map_err(|_| Error::General("signing failed".into())) + .map(|sig| sig.as_ref().into()) + } + + fn scheme(&self) -> SignatureScheme { + self.scheme + } +} + +/// A SigningKey that uses exactly one TLS-level SignatureScheme +/// and one ring-level signature::SigningAlgorithm. +/// +/// Compare this to RsaSigningKey, which for a particular key is +/// willing to sign with several algorithms. This is quite poor +/// cryptography practice, but is necessary because a given RSA key +/// is expected to work in TLS1.2 (PKCS#1 signatures) and TLS1.3 +/// (PSS signatures) -- nobody is willing to obtain certificates for +/// different protocol versions. +/// +/// Currently this is only implemented for Ed25519 keys. +struct Ed25519SigningKey { + key: Arc, + scheme: SignatureScheme, +} + +impl Ed25519SigningKey { + /// Make a new `Ed25519SigningKey` from a DER encoding in PKCS#8 format, + /// expecting a key usable with precisely the given signature scheme. + fn new(der: &key::PrivateKey, scheme: SignatureScheme) -> Result { + Ed25519KeyPair::from_pkcs8_maybe_unchecked(&der.0) + .map(|kp| Self { + key: Arc::new(kp), + scheme, + }) + .map_err(|_| SignError(())) + } +} + +impl SigningKey for Ed25519SigningKey { + fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option> { + if offered.contains(&self.scheme) { + Some(Box::new(Ed25519Signer { + key: Arc::clone(&self.key), + scheme: self.scheme, + })) + } else { + None + } + } + + fn algorithm(&self) -> SignatureAlgorithm { + use crate::msgs::handshake::DecomposedSignatureScheme; + self.scheme.sign() + } +} + +struct Ed25519Signer { + key: Arc, + scheme: SignatureScheme, +} + +impl Signer for Ed25519Signer { + fn sign(&self, message: &[u8]) -> Result, Error> { + Ok(self.key.sign(message).as_ref().into()) + } + + fn scheme(&self) -> SignatureScheme { + self.scheme + } +} + +/// The set of schemes we support for signatures and +/// that are allowed for TLS1.3. +pub fn supported_sign_tls13() -> &'static [SignatureScheme] { + &[ + SignatureScheme::ECDSA_NISTP384_SHA384, + SignatureScheme::ECDSA_NISTP256_SHA256, + SignatureScheme::RSA_PSS_SHA512, + SignatureScheme::RSA_PSS_SHA384, + SignatureScheme::RSA_PSS_SHA256, + SignatureScheme::ED25519, + ] +} + +/// Errors while signing +#[derive(Debug)] +pub struct SignError(()); + +impl fmt::Display for SignError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("sign error") + } +} + +impl StdError for SignError {} + +#[test] +fn can_load_ecdsa_nistp256_pkcs8() { + let key = key::PrivateKey(include_bytes!("testdata/nistp256key.pkcs8.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_ecdsa_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); +} + +#[test] +fn can_load_ecdsa_nistp256_sec1() { + let key = key::PrivateKey(include_bytes!("testdata/nistp256key.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_ecdsa_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); +} + +#[test] +fn can_load_ecdsa_nistp384_pkcs8() { + let key = key::PrivateKey(include_bytes!("testdata/nistp384key.pkcs8.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_ecdsa_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); +} + +#[test] +fn can_load_ecdsa_nistp384_sec1() { + let key = key::PrivateKey(include_bytes!("testdata/nistp384key.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_ecdsa_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); +} + +#[test] +fn can_load_eddsa_pkcs8() { + let key = key::PrivateKey(include_bytes!("testdata/eddsakey.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_ok()); + assert!(any_ecdsa_type(&key).is_err()); +} + +#[test] +fn can_load_rsa2048_pkcs8() { + let key = key::PrivateKey(include_bytes!("testdata/rsa2048key.pkcs8.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); + assert!(any_ecdsa_type(&key).is_err()); +} + +#[test] +fn can_load_rsa2048_pkcs1() { + let key = key::PrivateKey(include_bytes!("testdata/rsa2048key.pkcs1.der").to_vec()); + assert!(any_supported_type(&key).is_ok()); + assert!(any_eddsa_type(&key).is_err()); + assert!(any_ecdsa_type(&key).is_err()); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/stream.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..70f0fa49471c24d1dd81c2af1c30e400e480f5ea --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/stream.rs @@ -0,0 +1,254 @@ +use crate::conn::{ConnectionCommon, SideData}; + +use std::io::{IoSlice, Read, Result, Write}; +use std::ops::{Deref, DerefMut}; + +/// This type implements `io::Read` and `io::Write`, encapsulating +/// a Connection `C` and an underlying transport `T`, such as a socket. +/// +/// This allows you to use a rustls Connection like a normal stream. +#[derive(Debug)] +pub struct Stream<'a, C: 'a + ?Sized, T: 'a + Read + Write + ?Sized> { + /// Our TLS connection + pub conn: &'a mut C, + + /// The underlying transport, like a socket + pub sock: &'a mut T, +} + +impl<'a, C, T, S> Stream<'a, C, T> +where + C: 'a + DerefMut + Deref>, + T: 'a + Read + Write, + S: SideData, +{ + /// Make a new Stream using the Connection `conn` and socket-like object + /// `sock`. This does not fail and does no IO. + pub fn new(conn: &'a mut C, sock: &'a mut T) -> Self { + Self { conn, sock } + } + + /// If we're handshaking, complete all the IO for that. + /// If we have data to write, write it all. + fn complete_prior_io(&mut self) -> Result<()> { + if self.conn.is_handshaking() { + self.conn.complete_io(self.sock)?; + } + + if self.conn.wants_write() { + self.conn.complete_io(self.sock)?; + } + + Ok(()) + } +} + +impl<'a, C, T, S> Read for Stream<'a, C, T> +where + C: 'a + DerefMut + Deref>, + T: 'a + Read + Write, + S: SideData, +{ + fn read(&mut self, buf: &mut [u8]) -> Result { + self.complete_prior_io()?; + + // We call complete_io() in a loop since a single call may read only + // a partial packet from the underlying transport. A full packet is + // needed to get more plaintext, which we must do if EOF has not been + // hit. Otherwise, we will prematurely signal EOF by returning 0. We + // determine if EOF has actually been hit by checking if 0 bytes were + // read from the underlying transport. + while self.conn.wants_read() { + let at_eof = self.conn.complete_io(self.sock)?.0 == 0; + if at_eof { + if let Ok(io_state) = self.conn.process_new_packets() { + if at_eof && io_state.plaintext_bytes_to_read() == 0 { + return Ok(0); + } + } + break; + } + } + + self.conn.reader().read(buf) + } + + #[cfg(read_buf)] + fn read_buf(&mut self, cursor: std::io::BorrowedCursor<'_>) -> Result<()> { + self.complete_prior_io()?; + + // We call complete_io() in a loop since a single call may read only + // a partial packet from the underlying transport. A full packet is + // needed to get more plaintext, which we must do if EOF has not been + // hit. Otherwise, we will prematurely signal EOF by returning without + // writing anything. We determine if EOF has actually been hit by + // checking if 0 bytes were read from the underlying transport. + while self.conn.wants_read() { + let at_eof = self.conn.complete_io(self.sock)?.0 == 0; + if at_eof { + if let Ok(io_state) = self.conn.process_new_packets() { + if at_eof && io_state.plaintext_bytes_to_read() == 0 { + return Ok(()); + } + } + break; + } + } + + self.conn.reader().read_buf(cursor) + } +} + +impl<'a, C, T, S> Write for Stream<'a, C, T> +where + C: 'a + DerefMut + Deref>, + T: 'a + Read + Write, + S: SideData, +{ + fn write(&mut self, buf: &[u8]) -> Result { + self.complete_prior_io()?; + + let len = self.conn.writer().write(buf)?; + + // Try to write the underlying transport here, but don't let + // any errors mask the fact we've consumed `len` bytes. + // Callers will learn of permanent errors on the next call. + let _ = self.conn.complete_io(self.sock); + + Ok(len) + } + + fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> Result { + self.complete_prior_io()?; + + let len = self + .conn + .writer() + .write_vectored(bufs)?; + + // Try to write the underlying transport here, but don't let + // any errors mask the fact we've consumed `len` bytes. + // Callers will learn of permanent errors on the next call. + let _ = self.conn.complete_io(self.sock); + + Ok(len) + } + + fn flush(&mut self) -> Result<()> { + self.complete_prior_io()?; + + self.conn.writer().flush()?; + if self.conn.wants_write() { + self.conn.complete_io(self.sock)?; + } + Ok(()) + } +} + +/// This type implements `io::Read` and `io::Write`, encapsulating +/// and owning a Connection `C` and an underlying blocking transport +/// `T`, such as a socket. +/// +/// This allows you to use a rustls Connection like a normal stream. +#[derive(Debug)] +pub struct StreamOwned { + /// Our connection + pub conn: C, + + /// The underlying transport, like a socket + pub sock: T, +} + +impl StreamOwned +where + C: DerefMut + Deref>, + T: Read + Write, + S: SideData, +{ + /// Make a new StreamOwned taking the Connection `conn` and socket-like + /// object `sock`. This does not fail and does no IO. + /// + /// This is the same as `Stream::new` except `conn` and `sock` are + /// moved into the StreamOwned. + pub fn new(conn: C, sock: T) -> Self { + Self { conn, sock } + } + + /// Get a reference to the underlying socket + pub fn get_ref(&self) -> &T { + &self.sock + } + + /// Get a mutable reference to the underlying socket + pub fn get_mut(&mut self) -> &mut T { + &mut self.sock + } +} + +impl<'a, C, T, S> StreamOwned +where + C: DerefMut + Deref>, + T: Read + Write, + S: SideData, +{ + fn as_stream(&'a mut self) -> Stream<'a, C, T> { + Stream { + conn: &mut self.conn, + sock: &mut self.sock, + } + } +} + +impl Read for StreamOwned +where + C: DerefMut + Deref>, + T: Read + Write, + S: SideData, +{ + fn read(&mut self, buf: &mut [u8]) -> Result { + self.as_stream().read(buf) + } + + #[cfg(read_buf)] + fn read_buf(&mut self, cursor: std::io::BorrowedCursor<'_>) -> Result<()> { + self.as_stream().read_buf(cursor) + } +} + +impl Write for StreamOwned +where + C: DerefMut + Deref>, + T: Read + Write, + S: SideData, +{ + fn write(&mut self, buf: &[u8]) -> Result { + self.as_stream().write(buf) + } + + fn flush(&mut self) -> Result<()> { + self.as_stream().flush() + } +} + +#[cfg(test)] +mod tests { + use super::{Stream, StreamOwned}; + use crate::client::ClientConnection; + use crate::server::ServerConnection; + use std::net::TcpStream; + + #[test] + fn stream_can_be_created_for_connection_and_tcpstream() { + type _Test<'a> = Stream<'a, ClientConnection, TcpStream>; + } + + #[test] + fn streamowned_can_be_created_for_client_and_tcpstream() { + type _Test = StreamOwned; + } + + #[test] + fn streamowned_can_be_created_for_server_and_tcpstream() { + type _Test = StreamOwned; + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/suites.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/suites.rs new file mode 100644 index 0000000000000000000000000000000000000000..8d3590cd80cae8b28f79f5e7af6ac74cfa7abd90 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/suites.rs @@ -0,0 +1,340 @@ +use std::fmt; + +use crate::enums::{CipherSuite, ProtocolVersion, SignatureScheme}; +use crate::msgs::enums::SignatureAlgorithm; +use crate::msgs::handshake::DecomposedSignatureScheme; +#[cfg(feature = "tls12")] +use crate::tls12::Tls12CipherSuite; +#[cfg(feature = "tls12")] +use crate::tls12::{ + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + // TLS1.2 suites + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, +}; +use crate::tls13::Tls13CipherSuite; +use crate::tls13::{ + TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384, TLS13_CHACHA20_POLY1305_SHA256, +}; +#[cfg(feature = "tls12")] +use crate::versions::TLS12; +use crate::versions::{SupportedProtocolVersion, TLS13}; + +/// Bulk symmetric encryption scheme used by a cipher suite. +#[allow(non_camel_case_types)] +#[derive(Debug, Eq, PartialEq)] +pub enum BulkAlgorithm { + /// AES with 128-bit keys in Galois counter mode. + Aes128Gcm, + + /// AES with 256-bit keys in Galois counter mode. + Aes256Gcm, + + /// Chacha20 for confidentiality with poly1305 for authenticity. + Chacha20Poly1305, +} + +/// Common state for cipher suites (both for TLS 1.2 and TLS 1.3) +#[derive(Debug)] +pub struct CipherSuiteCommon { + /// The TLS enumeration naming this cipher suite. + pub suite: CipherSuite, + + /// How to do bulk encryption. + pub bulk: BulkAlgorithm, + + pub(crate) aead_algorithm: &'static ring::aead::Algorithm, +} + +/// A cipher suite supported by rustls. +/// +/// All possible instances of this type are provided by the library in +/// the [`ALL_CIPHER_SUITES`] array. +#[derive(Clone, Copy, PartialEq)] +pub enum SupportedCipherSuite { + /// A TLS 1.2 cipher suite + #[cfg(feature = "tls12")] + Tls12(&'static Tls12CipherSuite), + /// A TLS 1.3 cipher suite + Tls13(&'static Tls13CipherSuite), +} + +impl fmt::Debug for SupportedCipherSuite { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.suite().fmt(f) + } +} + +impl SupportedCipherSuite { + /// Which hash function to use with this suite. + pub fn hash_algorithm(&self) -> &'static ring::digest::Algorithm { + match self { + #[cfg(feature = "tls12")] + Self::Tls12(inner) => inner.hash_algorithm(), + Self::Tls13(inner) => inner.hash_algorithm(), + } + } + + /// The cipher suite's identifier + pub fn suite(&self) -> CipherSuite { + self.common().suite + } + + pub(crate) fn common(&self) -> &CipherSuiteCommon { + match self { + #[cfg(feature = "tls12")] + Self::Tls12(inner) => &inner.common, + Self::Tls13(inner) => &inner.common, + } + } + + pub(crate) fn tls13(&self) -> Option<&'static Tls13CipherSuite> { + match self { + #[cfg(feature = "tls12")] + Self::Tls12(_) => None, + Self::Tls13(inner) => Some(inner), + } + } + + /// Return supported protocol version for the cipher suite. + pub fn version(&self) -> &'static SupportedProtocolVersion { + match self { + #[cfg(feature = "tls12")] + Self::Tls12(_) => &TLS12, + Self::Tls13(_) => &TLS13, + } + } + + /// Return true if this suite is usable for a key only offering `sig_alg` + /// signatures. This resolves to true for all TLS1.3 suites. + pub fn usable_for_signature_algorithm(&self, _sig_alg: SignatureAlgorithm) -> bool { + match self { + Self::Tls13(_) => true, // no constraint expressed by ciphersuite (e.g., TLS1.3) + #[cfg(feature = "tls12")] + Self::Tls12(inner) => inner + .sign + .iter() + .any(|scheme| scheme.sign() == _sig_alg), + } + } +} + +/// A list of all the cipher suites supported by rustls. +pub static ALL_CIPHER_SUITES: &[SupportedCipherSuite] = &[ + // TLS1.3 suites + TLS13_AES_256_GCM_SHA384, + TLS13_AES_128_GCM_SHA256, + TLS13_CHACHA20_POLY1305_SHA256, + // TLS1.2 suites + #[cfg(feature = "tls12")] + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + #[cfg(feature = "tls12")] + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + #[cfg(feature = "tls12")] + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + #[cfg(feature = "tls12")] + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + #[cfg(feature = "tls12")] + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + #[cfg(feature = "tls12")] + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, +]; + +/// The cipher suite configuration that an application should use by default. +/// +/// This will be [`ALL_CIPHER_SUITES`] sans any supported cipher suites that +/// shouldn't be enabled by most applications. +pub static DEFAULT_CIPHER_SUITES: &[SupportedCipherSuite] = ALL_CIPHER_SUITES; + +// These both O(N^2)! +pub(crate) fn choose_ciphersuite_preferring_client( + client_suites: &[CipherSuite], + server_suites: &[SupportedCipherSuite], +) -> Option { + for client_suite in client_suites { + if let Some(selected) = server_suites + .iter() + .find(|x| *client_suite == x.suite()) + { + return Some(*selected); + } + } + + None +} + +pub(crate) fn choose_ciphersuite_preferring_server( + client_suites: &[CipherSuite], + server_suites: &[SupportedCipherSuite], +) -> Option { + if let Some(selected) = server_suites + .iter() + .find(|x| client_suites.contains(&x.suite())) + { + return Some(*selected); + } + + None +} + +/// Return a list of the ciphersuites in `all` with the suites +/// incompatible with `SignatureAlgorithm` `sigalg` removed. +pub(crate) fn reduce_given_sigalg( + all: &[SupportedCipherSuite], + sigalg: SignatureAlgorithm, +) -> Vec { + all.iter() + .filter(|&&suite| suite.usable_for_signature_algorithm(sigalg)) + .copied() + .collect() +} + +/// Return a list of the ciphersuites in `all` with the suites +/// incompatible with the chosen `version` removed. +pub(crate) fn reduce_given_version( + all: &[SupportedCipherSuite], + version: ProtocolVersion, +) -> Vec { + all.iter() + .filter(|&&suite| suite.version().version == version) + .copied() + .collect() +} + +/// Return true if `sigscheme` is usable by any of the given suites. +pub(crate) fn compatible_sigscheme_for_suites( + sigscheme: SignatureScheme, + common_suites: &[SupportedCipherSuite], +) -> bool { + let sigalg = sigscheme.sign(); + common_suites + .iter() + .any(|&suite| suite.usable_for_signature_algorithm(sigalg)) +} + +/// Secrets for transmitting/receiving data over a TLS session. +/// +/// After performing a handshake with rustls, these secrets can be extracted +/// to configure kTLS for a socket, and have the kernel take over encryption +/// and/or decryption. +#[cfg(feature = "secret_extraction")] +pub struct ExtractedSecrets { + /// sequence number and secrets for the "tx" (transmit) direction + pub tx: (u64, ConnectionTrafficSecrets), + + /// sequence number and secrets for the "rx" (receive) direction + pub rx: (u64, ConnectionTrafficSecrets), +} + +/// [ExtractedSecrets] minus the sequence numbers +#[cfg(feature = "secret_extraction")] +pub(crate) struct PartiallyExtractedSecrets { + /// secrets for the "tx" (transmit) direction + pub(crate) tx: ConnectionTrafficSecrets, + + /// secrets for the "rx" (receive) direction + pub(crate) rx: ConnectionTrafficSecrets, +} + +/// Secrets used to encrypt/decrypt data in a TLS session. +/// +/// These can be used to configure kTLS for a socket in one direction. +/// The only other piece of information needed is the sequence number, +/// which is in [ExtractedSecrets]. +#[cfg(feature = "secret_extraction")] +#[non_exhaustive] +pub enum ConnectionTrafficSecrets { + /// Secrets for the AES_128_GCM AEAD algorithm + Aes128Gcm { + /// key (16 bytes) + key: [u8; 16], + /// salt (4 bytes) + salt: [u8; 4], + /// initialization vector (8 bytes, chopped from key block) + iv: [u8; 8], + }, + + /// Secrets for the AES_256_GCM AEAD algorithm + Aes256Gcm { + /// key (32 bytes) + key: [u8; 32], + /// salt (4 bytes) + salt: [u8; 4], + /// initialization vector (8 bytes, chopped from key block) + iv: [u8; 8], + }, + + /// Secrets for the CHACHA20_POLY1305 AEAD algorithm + Chacha20Poly1305 { + /// key (32 bytes) + key: [u8; 32], + /// initialization vector (12 bytes) + iv: [u8; 12], + }, +} + +#[cfg(test)] +mod test { + use super::*; + use crate::enums::CipherSuite; + + #[test] + fn test_client_pref() { + let client = vec![ + CipherSuite::TLS13_AES_128_GCM_SHA256, + CipherSuite::TLS13_AES_256_GCM_SHA384, + ]; + let server = vec![TLS13_AES_256_GCM_SHA384, TLS13_AES_128_GCM_SHA256]; + let chosen = choose_ciphersuite_preferring_client(&client, &server); + assert!(chosen.is_some()); + assert_eq!(chosen.unwrap(), TLS13_AES_128_GCM_SHA256); + } + + #[test] + fn test_server_pref() { + let client = vec![ + CipherSuite::TLS13_AES_128_GCM_SHA256, + CipherSuite::TLS13_AES_256_GCM_SHA384, + ]; + let server = vec![TLS13_AES_256_GCM_SHA384, TLS13_AES_128_GCM_SHA256]; + let chosen = choose_ciphersuite_preferring_server(&client, &server); + assert!(chosen.is_some()); + assert_eq!(chosen.unwrap(), TLS13_AES_256_GCM_SHA384); + } + + #[test] + fn test_pref_fails() { + assert!(choose_ciphersuite_preferring_client( + &[CipherSuite::TLS_NULL_WITH_NULL_NULL], + ALL_CIPHER_SUITES + ) + .is_none()); + assert!(choose_ciphersuite_preferring_server( + &[CipherSuite::TLS_NULL_WITH_NULL_NULL], + ALL_CIPHER_SUITES + ) + .is_none()); + } + + #[test] + fn test_scs_is_debug() { + println!("{:?}", ALL_CIPHER_SUITES); + } + + #[test] + fn test_can_resume_to() { + assert!(TLS13_AES_128_GCM_SHA256 + .tls13() + .unwrap() + .can_resume_from(crate::tls13::TLS13_CHACHA20_POLY1305_SHA256_INTERNAL) + .is_some()); + assert!(TLS13_AES_256_GCM_SHA384 + .tls13() + .unwrap() + .can_resume_from(crate::tls13::TLS13_CHACHA20_POLY1305_SHA256_INTERNAL) + .is_none()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/ticketer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/ticketer.rs new file mode 100644 index 0000000000000000000000000000000000000000..9660d711a60f64e9c11a7dee652410df58497464 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/ticketer.rs @@ -0,0 +1,338 @@ +use crate::rand; +use crate::server::ProducesTickets; +use crate::Error; + +use ring::aead; +use std::mem; +use std::sync::{Arc, Mutex, MutexGuard}; +use std::time; + +/// The timebase for expiring and rolling tickets and ticketing +/// keys. This is UNIX wall time in seconds. +/// +/// This is guaranteed to be on or after the UNIX epoch. +#[derive(Clone, Copy, Debug)] +pub struct TimeBase(time::Duration); + +impl TimeBase { + #[inline] + pub fn now() -> Result { + Ok(Self( + time::SystemTime::now().duration_since(time::UNIX_EPOCH)?, + )) + } + + #[inline] + pub fn as_secs(&self) -> u64 { + self.0.as_secs() + } +} + +/// This is a `ProducesTickets` implementation which uses +/// any *ring* `aead::Algorithm` to encrypt and authentication +/// the ticket payload. It does not enforce any lifetime +/// constraint. +struct AeadTicketer { + alg: &'static aead::Algorithm, + key: aead::LessSafeKey, + lifetime: u32, +} + +impl AeadTicketer { + /// Make a ticketer with recommended configuration and a random key. + fn new() -> Result { + let mut key = [0u8; 32]; + rand::fill_random(&mut key)?; + + let alg = &aead::CHACHA20_POLY1305; + let key = aead::UnboundKey::new(alg, &key).unwrap(); + + Ok(Self { + alg, + key: aead::LessSafeKey::new(key), + lifetime: 60 * 60 * 12, + }) + } +} + +impl ProducesTickets for AeadTicketer { + fn enabled(&self) -> bool { + true + } + fn lifetime(&self) -> u32 { + self.lifetime + } + + /// Encrypt `message` and return the ciphertext. + fn encrypt(&self, message: &[u8]) -> Option> { + // Random nonce, because a counter is a privacy leak. + let mut nonce_buf = [0u8; 12]; + rand::fill_random(&mut nonce_buf).ok()?; + let nonce = ring::aead::Nonce::assume_unique_for_key(nonce_buf); + let aad = ring::aead::Aad::empty(); + + let mut ciphertext = + Vec::with_capacity(nonce_buf.len() + message.len() + self.key.algorithm().tag_len()); + ciphertext.extend(nonce_buf); + ciphertext.extend(message); + self.key + .seal_in_place_separate_tag(nonce, aad, &mut ciphertext[nonce_buf.len()..]) + .map(|tag| { + ciphertext.extend(tag.as_ref()); + ciphertext + }) + .ok() + } + + /// Decrypt `ciphertext` and recover the original message. + fn decrypt(&self, ciphertext: &[u8]) -> Option> { + // Non-panicking `let (nonce, ciphertext) = ciphertext.split_at(...)`. + let nonce = ciphertext.get(..self.alg.nonce_len())?; + let ciphertext = ciphertext.get(nonce.len()..)?; + + // This won't fail since `nonce` has the required length. + let nonce = ring::aead::Nonce::try_assume_unique_for_key(nonce).ok()?; + + let mut out = Vec::from(ciphertext); + + let plain_len = self + .key + .open_in_place(nonce, aead::Aad::empty(), &mut out) + .ok()? + .len(); + out.truncate(plain_len); + + Some(out) + } +} + +struct TicketSwitcherState { + next: Option>, + current: Box, + previous: Option>, + next_switch_time: u64, +} + +/// A ticketer that has a 'current' sub-ticketer and a single +/// 'previous' ticketer. It creates a new ticketer every so +/// often, demoting the current ticketer. +struct TicketSwitcher { + generator: fn() -> Result, rand::GetRandomFailed>, + lifetime: u32, + state: Mutex, +} + +impl TicketSwitcher { + /// `lifetime` is in seconds, and is how long the current ticketer + /// is used to generate new tickets. Tickets are accepted for no + /// longer than twice this duration. `generator` produces a new + /// `ProducesTickets` implementation. + fn new( + lifetime: u32, + generator: fn() -> Result, rand::GetRandomFailed>, + ) -> Result { + let now = TimeBase::now()?; + Ok(Self { + generator, + lifetime, + state: Mutex::new(TicketSwitcherState { + next: Some(generator()?), + current: generator()?, + previous: None, + next_switch_time: now + .as_secs() + .saturating_add(u64::from(lifetime)), + }), + }) + } + + /// If it's time, demote the `current` ticketer to `previous` (so it + /// does no new encryptions but can do decryption) and use next for a + /// new `current` ticketer. + /// + /// Calling this regularly will ensure timely key erasure. Otherwise, + /// key erasure will be delayed until the next encrypt/decrypt call. + /// + /// For efficiency, this is also responsible for locking the state mutex + /// and returning the mutexguard. + fn maybe_roll(&self, now: TimeBase) -> Option> { + // The code below aims to make switching as efficient as possible + // in the common case that the generator never fails. To achieve this + // we run the following steps: + // 1. If no switch is necessary, just return the mutexguard + // 2. Shift over all of the ticketers (so current becomes previous, + // and next becomes current). After this, other threads can + // start using the new current ticketer. + // 3. unlock mutex and generate new ticketer. + // 4. Place new ticketer in next and return current + // + // There are a few things to note here. First, we don't check whether + // a new switch might be needed in step 4, even though, due to locking + // and entropy collection, significant amounts of time may have passed. + // This is to guarantee that the thread doing the switch will eventually + // make progress. + // + // Second, because next may be None, step 2 can fail. In that case + // we enter a recovery mode where we generate 2 new ticketers, one for + // next and one for the current ticketer. We then take the mutex a + // second time and redo the time check to see if a switch is still + // necessary. + // + // This somewhat convoluted approach ensures good availability of the + // mutex, by ensuring that the state is usable and the mutex not held + // during generation. It also ensures that, so long as the inner + // ticketer never generates panics during encryption/decryption, + // we are guaranteed to never panic when holding the mutex. + + let now = now.as_secs(); + let mut are_recovering = false; // Are we recovering from previous failure? + { + // Scope the mutex so we only take it for as long as needed + let mut state = self.state.lock().ok()?; + + // Fast path in case we do not need to switch to the next ticketer yet + if now <= state.next_switch_time { + return Some(state); + } + + // Make the switch, or mark for recovery if not possible + if let Some(next) = state.next.take() { + state.previous = Some(mem::replace(&mut state.current, next)); + state.next_switch_time = now.saturating_add(u64::from(self.lifetime)); + } else { + are_recovering = true; + } + } + + // We always need a next, so generate it now + let next = (self.generator)().ok()?; + if !are_recovering { + // Normal path, generate new next and place it in the state + let mut state = self.state.lock().ok()?; + state.next = Some(next); + Some(state) + } else { + // Recovering, generate also a new current ticketer, and modify state + // as needed. (we need to redo the time check, otherwise this might + // result in very rapid switching of ticketers) + let new_current = (self.generator)().ok()?; + let mut state = self.state.lock().ok()?; + state.next = Some(next); + if now > state.next_switch_time { + state.previous = Some(mem::replace(&mut state.current, new_current)); + state.next_switch_time = now.saturating_add(u64::from(self.lifetime)); + } + Some(state) + } + } +} + +impl ProducesTickets for TicketSwitcher { + fn lifetime(&self) -> u32 { + self.lifetime * 2 + } + + fn enabled(&self) -> bool { + true + } + + fn encrypt(&self, message: &[u8]) -> Option> { + let state = self.maybe_roll(TimeBase::now().ok()?)?; + + state.current.encrypt(message) + } + + fn decrypt(&self, ciphertext: &[u8]) -> Option> { + let state = self.maybe_roll(TimeBase::now().ok()?)?; + + // Decrypt with the current key; if that fails, try with the previous. + state + .current + .decrypt(ciphertext) + .or_else(|| { + state + .previous + .as_ref() + .and_then(|previous| previous.decrypt(ciphertext)) + }) + } +} + +/// A concrete, safe ticket creation mechanism. +pub struct Ticketer {} + +fn generate_inner() -> Result, rand::GetRandomFailed> { + Ok(Box::new(AeadTicketer::new()?)) +} + +impl Ticketer { + /// Make the recommended Ticketer. This produces tickets + /// with a 12 hour life and randomly generated keys. + /// + /// The encryption mechanism used in Chacha20Poly1305. + pub fn new() -> Result, Error> { + Ok(Arc::new(TicketSwitcher::new(6 * 60 * 60, generate_inner)?)) + } +} + +#[test] +fn basic_pairwise_test() { + let t = Ticketer::new().unwrap(); + assert!(t.enabled()); + let cipher = t.encrypt(b"hello world").unwrap(); + let plain = t.decrypt(&cipher).unwrap(); + assert_eq!(plain, b"hello world"); +} + +#[test] +fn ticketswitcher_switching_test() { + let t = Arc::new(TicketSwitcher::new(1, generate_inner).unwrap()); + let now = TimeBase::now().unwrap(); + let cipher1 = t.encrypt(b"ticket 1").unwrap(); + assert_eq!(t.decrypt(&cipher1).unwrap(), b"ticket 1"); + { + // Trigger new ticketer + t.maybe_roll(TimeBase(now.0 + std::time::Duration::from_secs(10))); + } + let cipher2 = t.encrypt(b"ticket 2").unwrap(); + assert_eq!(t.decrypt(&cipher1).unwrap(), b"ticket 1"); + assert_eq!(t.decrypt(&cipher2).unwrap(), b"ticket 2"); + { + // Trigger new ticketer + t.maybe_roll(TimeBase(now.0 + std::time::Duration::from_secs(20))); + } + let cipher3 = t.encrypt(b"ticket 3").unwrap(); + assert!(t.decrypt(&cipher1).is_none()); + assert_eq!(t.decrypt(&cipher2).unwrap(), b"ticket 2"); + assert_eq!(t.decrypt(&cipher3).unwrap(), b"ticket 3"); +} + +#[cfg(test)] +fn fail_generator() -> Result, rand::GetRandomFailed> { + Err(rand::GetRandomFailed) +} + +#[test] +fn ticketswitcher_recover_test() { + let mut t = TicketSwitcher::new(1, generate_inner).unwrap(); + let now = TimeBase::now().unwrap(); + let cipher1 = t.encrypt(b"ticket 1").unwrap(); + assert_eq!(t.decrypt(&cipher1).unwrap(), b"ticket 1"); + t.generator = fail_generator; + { + // Failed new ticketer + t.maybe_roll(TimeBase(now.0 + std::time::Duration::from_secs(10))); + } + t.generator = generate_inner; + let cipher2 = t.encrypt(b"ticket 2").unwrap(); + assert_eq!(t.decrypt(&cipher1).unwrap(), b"ticket 1"); + assert_eq!(t.decrypt(&cipher2).unwrap(), b"ticket 2"); + { + // recover + t.maybe_roll(TimeBase(now.0 + std::time::Duration::from_secs(20))); + } + let cipher3 = t.encrypt(b"ticket 3").unwrap(); + assert!(t.decrypt(&cipher1).is_none()); + assert_eq!(t.decrypt(&cipher2).unwrap(), b"ticket 2"); + assert_eq!(t.decrypt(&cipher3).unwrap(), b"ticket 3"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/vecbuf.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/vecbuf.rs new file mode 100644 index 0000000000000000000000000000000000000000..6126edd94496d5cc6103037b85463e2682868a46 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/vecbuf.rs @@ -0,0 +1,200 @@ +use std::cmp; +use std::collections::VecDeque; +use std::io; +use std::io::Read; + +/// This is a byte buffer that is built from a vector +/// of byte vectors. This avoids extra copies when +/// appending a new byte vector, at the expense of +/// more complexity when reading out. +pub(crate) struct ChunkVecBuffer { + chunks: VecDeque>, + limit: Option, +} + +impl ChunkVecBuffer { + pub(crate) fn new(limit: Option) -> Self { + Self { + chunks: VecDeque::new(), + limit, + } + } + + /// Sets the upper limit on how many bytes this + /// object can store. + /// + /// Setting a lower limit than the currently stored + /// data is not an error. + /// + /// A [`None`] limit is interpreted as no limit. + pub(crate) fn set_limit(&mut self, new_limit: Option) { + self.limit = new_limit; + } + + /// If we're empty + pub(crate) fn is_empty(&self) -> bool { + self.chunks.is_empty() + } + + pub(crate) fn is_full(&self) -> bool { + self.limit + .map(|limit| self.len() > limit) + .unwrap_or_default() + } + + /// How many bytes we're storing + pub(crate) fn len(&self) -> usize { + let mut len = 0; + for ch in &self.chunks { + len += ch.len(); + } + len + } + + /// For a proposed append of `len` bytes, how many + /// bytes should we actually append to adhere to the + /// currently set `limit`? + pub(crate) fn apply_limit(&self, len: usize) -> usize { + if let Some(limit) = self.limit { + let space = limit.saturating_sub(self.len()); + cmp::min(len, space) + } else { + len + } + } + + /// Append a copy of `bytes`, perhaps a prefix if + /// we're near the limit. + pub(crate) fn append_limited_copy(&mut self, bytes: &[u8]) -> usize { + let take = self.apply_limit(bytes.len()); + self.append(bytes[..take].to_vec()); + take + } + + /// Take and append the given `bytes`. + pub(crate) fn append(&mut self, bytes: Vec) -> usize { + let len = bytes.len(); + + if !bytes.is_empty() { + self.chunks.push_back(bytes); + } + + len + } + + /// Take one of the chunks from this object. This + /// function panics if the object `is_empty`. + pub(crate) fn pop(&mut self) -> Option> { + self.chunks.pop_front() + } + + /// Read data out of this object, writing it into `buf` + /// and returning how many bytes were written there. + pub(crate) fn read(&mut self, buf: &mut [u8]) -> io::Result { + let mut offs = 0; + + while offs < buf.len() && !self.is_empty() { + let used = self.chunks[0] + .as_slice() + .read(&mut buf[offs..])?; + + self.consume(used); + offs += used; + } + + Ok(offs) + } + + #[cfg(read_buf)] + /// Read data out of this object, writing it into `cursor`. + pub(crate) fn read_buf(&mut self, mut cursor: io::BorrowedCursor<'_>) -> io::Result<()> { + while !self.is_empty() && cursor.capacity() > 0 { + let chunk = self.chunks[0].as_slice(); + let used = std::cmp::min(chunk.len(), cursor.capacity()); + cursor.append(&chunk[..used]); + self.consume(used); + } + + Ok(()) + } + + fn consume(&mut self, mut used: usize) { + while let Some(mut buf) = self.chunks.pop_front() { + if used < buf.len() { + self.chunks + .push_front(buf.split_off(used)); + break; + } else { + used -= buf.len(); + } + } + } + + /// Read data out of this object, passing it `wr` + pub(crate) fn write_to(&mut self, wr: &mut dyn io::Write) -> io::Result { + if self.is_empty() { + return Ok(0); + } + + let mut bufs = [io::IoSlice::new(&[]); 64]; + for (iov, chunk) in bufs.iter_mut().zip(self.chunks.iter()) { + *iov = io::IoSlice::new(chunk); + } + let len = cmp::min(bufs.len(), self.chunks.len()); + let used = wr.write_vectored(&bufs[..len])?; + self.consume(used); + Ok(used) + } +} + +#[cfg(test)] +mod test { + use super::ChunkVecBuffer; + + #[test] + fn short_append_copy_with_limit() { + let mut cvb = ChunkVecBuffer::new(Some(12)); + assert_eq!(cvb.append_limited_copy(b"hello"), 5); + assert_eq!(cvb.append_limited_copy(b"world"), 5); + assert_eq!(cvb.append_limited_copy(b"hello"), 2); + assert_eq!(cvb.append_limited_copy(b"world"), 0); + + let mut buf = [0u8; 12]; + assert_eq!(cvb.read(&mut buf).unwrap(), 12); + assert_eq!(buf.to_vec(), b"helloworldhe".to_vec()); + } + + #[cfg(read_buf)] + #[test] + fn read_buf() { + use std::{io::BorrowedBuf, mem::MaybeUninit}; + + { + let mut cvb = ChunkVecBuffer::new(None); + cvb.append(b"test ".to_vec()); + cvb.append(b"fixture ".to_vec()); + cvb.append(b"data".to_vec()); + + let mut buf = [MaybeUninit::::uninit(); 8]; + let mut buf: BorrowedBuf<'_> = buf.as_mut_slice().into(); + cvb.read_buf(buf.unfilled()).unwrap(); + assert_eq!(buf.filled(), b"test fix"); + buf.clear(); + cvb.read_buf(buf.unfilled()).unwrap(); + assert_eq!(buf.filled(), b"ture dat"); + buf.clear(); + cvb.read_buf(buf.unfilled()).unwrap(); + assert_eq!(buf.filled(), b"a"); + } + + { + let mut cvb = ChunkVecBuffer::new(None); + cvb.append(b"short message".to_vec()); + + let mut buf = [MaybeUninit::::uninit(); 1024]; + let mut buf: BorrowedBuf<'_> = buf.as_mut_slice().into(); + cvb.read_buf(buf.unfilled()).unwrap(); + assert_eq!(buf.filled(), b"short message"); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verify.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verify.rs new file mode 100644 index 0000000000000000000000000000000000000000..58fd7dc6b22de07d7582afb965538ff9602ec110 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verify.rs @@ -0,0 +1,809 @@ +use std::fmt; + +use crate::anchors::{OwnedTrustAnchor, RootCertStore}; +use crate::client::ServerName; +use crate::enums::SignatureScheme; +use crate::error::Error; +use crate::key::Certificate; +#[cfg(feature = "logging")] +use crate::log::{debug, trace, warn}; +use crate::msgs::handshake::{DigitallySignedStruct, DistinguishedNames}; + +use ring::digest::Digest; + +use std::convert::TryFrom; +use std::sync::Arc; +use std::time::SystemTime; + +type SignatureAlgorithms = &'static [&'static webpki::SignatureAlgorithm]; + +/// Which signature verification mechanisms we support. No particular +/// order. +static SUPPORTED_SIG_ALGS: SignatureAlgorithms = &[ + &webpki::ECDSA_P256_SHA256, + &webpki::ECDSA_P256_SHA384, + &webpki::ECDSA_P384_SHA256, + &webpki::ECDSA_P384_SHA384, + &webpki::ED25519, + &webpki::RSA_PSS_2048_8192_SHA256_LEGACY_KEY, + &webpki::RSA_PSS_2048_8192_SHA384_LEGACY_KEY, + &webpki::RSA_PSS_2048_8192_SHA512_LEGACY_KEY, + &webpki::RSA_PKCS1_2048_8192_SHA256, + &webpki::RSA_PKCS1_2048_8192_SHA384, + &webpki::RSA_PKCS1_2048_8192_SHA512, + &webpki::RSA_PKCS1_3072_8192_SHA384, +]; + +// Marker types. These are used to bind the fact some verification +// (certificate chain or handshake signature) has taken place into +// protocol states. We use this to have the compiler check that there +// are no 'goto fail'-style elisions of important checks before we +// reach the traffic stage. +// +// These types are public, but cannot be directly constructed. This +// means their origins can be precisely determined by looking +// for their `assertion` constructors. + +/// Zero-sized marker type representing verification of a signature. +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct HandshakeSignatureValid(()); + +impl HandshakeSignatureValid { + /// Make a `HandshakeSignatureValid` + pub fn assertion() -> Self { + Self(()) + } +} + +#[derive(Debug)] +pub(crate) struct FinishedMessageVerified(()); + +impl FinishedMessageVerified { + pub(crate) fn assertion() -> Self { + Self(()) + } +} + +/// Zero-sized marker type representing verification of a server cert chain. +#[allow(unreachable_pub)] +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct ServerCertVerified(()); + +#[allow(unreachable_pub)] +impl ServerCertVerified { + /// Make a `ServerCertVerified` + pub fn assertion() -> Self { + Self(()) + } +} + +/// Zero-sized marker type representing verification of a client cert chain. +#[derive(Debug)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct ClientCertVerified(()); + +impl ClientCertVerified { + /// Make a `ClientCertVerified` + pub fn assertion() -> Self { + Self(()) + } +} + +/// Something that can verify a server certificate chain, and verify +/// signatures made by certificates. +#[allow(unreachable_pub)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub trait ServerCertVerifier: Send + Sync { + /// Verify the end-entity certificate `end_entity` is valid for the + /// hostname `dns_name` and chains to at least one trust anchor. + /// + /// `intermediates` contains all certificates other than `end_entity` that + /// were sent as part of the server's [Certificate] message. It is in the + /// same order that the server sent them and may be empty. + /// + /// Note that none of the certificates have been parsed yet, so it is the responsibility of + /// the implementor to handle invalid data. It is recommended that the implementor returns + /// [`Error::InvalidCertificateEncoding`] when these cases are encountered. + /// + /// `scts` contains the Signed Certificate Timestamps (SCTs) the server + /// sent with the end-entity certificate, if any. + /// + /// [Certificate]: https://datatracker.ietf.org/doc/html/rfc8446#section-4.4.2 + fn verify_server_cert( + &self, + end_entity: &Certificate, + intermediates: &[Certificate], + server_name: &ServerName, + scts: &mut dyn Iterator, + ocsp_response: &[u8], + now: SystemTime, + ) -> Result; + + /// Verify a signature allegedly by the given server certificate. + /// + /// `message` is not hashed, and needs hashing during the verification. + /// The signature and algorithm are within `dss`. `cert` contains the + /// public key to use. + /// + /// `cert` has already been validated by [`ServerCertVerifier::verify_server_cert`]. + /// + /// If and only if the signature is valid, return `Ok(HandshakeSignatureValid)`. + /// Otherwise, return an error -- rustls will send an alert and abort the + /// connection. + /// + /// This method is only called for TLS1.2 handshakes. Note that, in TLS1.2, + /// SignatureSchemes such as `SignatureScheme::ECDSA_NISTP256_SHA256` are not + /// in fact bound to the specific curve implied in their name. + /// + /// This trait method has a default implementation that uses webpki to verify + /// the signature. + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + verify_signed_struct(message, cert, dss) + } + + /// Verify a signature allegedly by the given server certificate. + /// + /// This method is only called for TLS1.3 handshakes. + /// + /// This method is very similar to `verify_tls12_signature`: but note the + /// tighter ECDSA SignatureScheme semantics -- e.g. `SignatureScheme::ECDSA_NISTP256_SHA256` + /// must only validate signatures using public keys on the right curve -- + /// rustls does not enforce this requirement for you. + /// + /// `cert` has already been validated by [`ServerCertVerifier::verify_server_cert`]. + /// + /// If and only if the signature is valid, return `Ok(HandshakeSignatureValid)`. + /// Otherwise, return an error -- rustls will send an alert and abort the + /// connection. + /// + /// This trait method has a default implementation that uses webpki to verify + /// the signature. + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13(message, cert, dss) + } + + /// Return the list of SignatureSchemes that this verifier will handle, + /// in `verify_tls12_signature` and `verify_tls13_signature` calls. + /// + /// This should be in priority order, with the most preferred first. + /// + /// This trait method has a default implementation that reflects the schemes + /// supported by webpki. + fn supported_verify_schemes(&self) -> Vec { + WebPkiVerifier::verification_schemes() + } + + /// Returns `true` if Rustls should ask the server to send SCTs. + /// + /// Signed Certificate Timestamps (SCTs) are used for Certificate + /// Transparency validation. + /// + /// The default implementation of this function returns true. + fn request_scts(&self) -> bool { + true + } +} + +impl fmt::Debug for dyn ServerCertVerifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "dyn ServerCertVerifier") + } +} + +/// A type which encapsulates a string that is a syntactically valid DNS name. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct DnsName(pub(crate) webpki::DnsName); + +impl AsRef for DnsName { + fn as_ref(&self) -> &str { + AsRef::::as_ref(&self.0) + } +} + +/// Something that can verify a client certificate chain +#[allow(unreachable_pub)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub trait ClientCertVerifier: Send + Sync { + /// Returns `true` to enable the server to request a client certificate and + /// `false` to skip requesting a client certificate. Defaults to `true`. + fn offer_client_auth(&self) -> bool { + true + } + + /// Return `Some(true)` to require a client certificate and `Some(false)` to make + /// client authentication optional. Return `None` to abort the connection. + /// Defaults to `Some(self.offer_client_auth())`. + fn client_auth_mandatory(&self) -> Option { + Some(self.offer_client_auth()) + } + + /// Returns the [Subjects] of the client authentication trust anchors to + /// share with the client when requesting client authentication. + /// + /// These must be DER-encoded X.500 distinguished names, per RFC 5280. + /// They are sent in the [`certificate_authorities`] extension of a + /// [`CertificateRequest`] message. + /// + /// [Subjects]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + /// [`CertificateRequest`]: https://datatracker.ietf.org/doc/html/rfc8446#section-4.3.2 + /// [`certificate_authorities`]: https://datatracker.ietf.org/doc/html/rfc8446#section-4.2.4 + /// + /// Return `None` to abort the connection. Return an empty `Vec` to continue + /// the handshake without sending a CertificateRequest message. + fn client_auth_root_subjects(&self) -> Option; + + /// Verify the end-entity certificate `end_entity` is valid, acceptable, + /// and chains to at least one of the trust anchors trusted by + /// this verifier. + /// + /// `intermediates` contains the intermediate certificates the + /// client sent along with the end-entity certificate; it is in the same + /// order that the peer sent them and may be empty. + /// + /// Note that none of the certificates have been parsed yet, so it is the responsibility of + /// the implementor to handle invalid data. It is recommended that the implementor returns + /// [`Error::InvalidCertificateEncoding`] when these cases are encountered. + fn verify_client_cert( + &self, + end_entity: &Certificate, + intermediates: &[Certificate], + now: SystemTime, + ) -> Result; + + /// Verify a signature allegedly by the given client certificate. + /// + /// `message` is not hashed, and needs hashing during the verification. + /// The signature and algorithm are within `dss`. `cert` contains the + /// public key to use. + /// + /// `cert` has already been validated by [`ClientCertVerifier::verify_client_cert`]. + /// + /// If and only if the signature is valid, return `Ok(HandshakeSignatureValid)`. + /// Otherwise, return an error -- rustls will send an alert and abort the + /// connection. + /// + /// This method is only called for TLS1.2 handshakes. Note that, in TLS1.2, + /// SignatureSchemes such as `SignatureScheme::ECDSA_NISTP256_SHA256` are not + /// in fact bound to the specific curve implied in their name. + /// + /// This trait method has a default implementation that uses webpki to verify + /// the signature. + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + verify_signed_struct(message, cert, dss) + } + + /// Verify a signature allegedly by the given client certificate. + /// + /// This method is only called for TLS1.3 handshakes. + /// + /// This method is very similar to `verify_tls12_signature`, but note the + /// tighter ECDSA SignatureScheme semantics in TLS 1.3. For example, + /// `SignatureScheme::ECDSA_NISTP256_SHA256` + /// must only validate signatures using public keys on the right curve -- + /// rustls does not enforce this requirement for you. + /// + /// This trait method has a default implementation that uses webpki to verify + /// the signature. + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13(message, cert, dss) + } + + /// Return the list of SignatureSchemes that this verifier will handle, + /// in `verify_tls12_signature` and `verify_tls13_signature` calls. + /// + /// This should be in priority order, with the most preferred first. + /// + /// This trait method has a default implementation that reflects the schemes + /// supported by webpki. + fn supported_verify_schemes(&self) -> Vec { + WebPkiVerifier::verification_schemes() + } +} + +impl fmt::Debug for dyn ClientCertVerifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "dyn ClientCertVerifier") + } +} + +impl ServerCertVerifier for WebPkiVerifier { + /// Will verify the certificate is valid in the following ways: + /// - Signed by a trusted `RootCertStore` CA + /// - Not Expired + /// - Valid for DNS entry + fn verify_server_cert( + &self, + end_entity: &Certificate, + intermediates: &[Certificate], + server_name: &ServerName, + scts: &mut dyn Iterator, + ocsp_response: &[u8], + now: SystemTime, + ) -> Result { + let (cert, chain, trustroots) = prepare(end_entity, intermediates, &self.roots)?; + let webpki_now = webpki::Time::try_from(now).map_err(|_| Error::FailedToGetCurrentTime)?; + + let dns_name = match server_name { + ServerName::DnsName(dns_name) => dns_name, + ServerName::IpAddress(_) => { + return Err(Error::UnsupportedNameType); + } + }; + + let cert = cert + .verify_is_valid_tls_server_cert( + SUPPORTED_SIG_ALGS, + &webpki::TlsServerTrustAnchors(&trustroots), + &chain, + webpki_now, + ) + .map_err(pki_error) + .map(|_| cert)?; + + if let Some(policy) = &self.ct_policy { + policy.verify(end_entity, now, scts)?; + } + + if !ocsp_response.is_empty() { + trace!("Unvalidated OCSP response: {:?}", ocsp_response.to_vec()); + } + + cert.verify_is_valid_for_dns_name(dns_name.0.as_ref()) + .map_err(pki_error) + .map(|_| ServerCertVerified::assertion()) + } +} + +/// Default `ServerCertVerifier`, see the trait impl for more information. +#[allow(unreachable_pub)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct WebPkiVerifier { + roots: RootCertStore, + ct_policy: Option, +} + +#[allow(unreachable_pub)] +impl WebPkiVerifier { + /// Constructs a new `WebPkiVerifier`. + /// + /// `roots` is the set of trust anchors to trust for issuing server certs. + /// + /// `ct_logs` is the list of logs that are trusted for Certificate + /// Transparency. Currently CT log enforcement is opportunistic; see + /// . + pub fn new(roots: RootCertStore, ct_policy: Option) -> Self { + Self { roots, ct_policy } + } + + /// Returns the signature verification methods supported by + /// webpki. + pub fn verification_schemes() -> Vec { + vec![ + SignatureScheme::ECDSA_NISTP384_SHA384, + SignatureScheme::ECDSA_NISTP256_SHA256, + SignatureScheme::ED25519, + SignatureScheme::RSA_PSS_SHA512, + SignatureScheme::RSA_PSS_SHA384, + SignatureScheme::RSA_PSS_SHA256, + SignatureScheme::RSA_PKCS1_SHA512, + SignatureScheme::RSA_PKCS1_SHA384, + SignatureScheme::RSA_PKCS1_SHA256, + ] + } +} + +/// Policy for enforcing Certificate Transparency. +/// +/// Because Certificate Transparency logs are sharded on a per-year basis and can be trusted or +/// distrusted relatively quickly, rustls stores a validation deadline. Server certificates will +/// be validated against the configured CT logs until the deadline expires. After the deadline, +/// certificates will no longer be validated, and a warning message will be logged. The deadline +/// may vary depending on how often you deploy builds with updated dependencies. +#[allow(unreachable_pub)] +#[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] +pub struct CertificateTransparencyPolicy { + logs: &'static [&'static sct::Log<'static>], + validation_deadline: SystemTime, +} + +impl CertificateTransparencyPolicy { + /// Create a new policy. + #[allow(unreachable_pub)] + pub fn new( + logs: &'static [&'static sct::Log<'static>], + validation_deadline: SystemTime, + ) -> Self { + Self { + logs, + validation_deadline, + } + } + + fn verify( + &self, + cert: &Certificate, + now: SystemTime, + scts: &mut dyn Iterator, + ) -> Result<(), Error> { + if self.logs.is_empty() { + return Ok(()); + } else if self + .validation_deadline + .duration_since(now) + .is_err() + { + warn!("certificate transparency logs have expired, validation disabled"); + return Ok(()); + } + + let now = unix_time_millis(now)?; + let mut last_sct_error = None; + for sct in scts { + #[cfg_attr(not(feature = "logging"), allow(unused_variables))] + match sct::verify_sct(&cert.0, sct, now, self.logs) { + Ok(index) => { + debug!( + "Valid SCT signed by {} on {}", + self.logs[index].operated_by, self.logs[index].description + ); + return Ok(()); + } + Err(e) => { + if e.should_be_fatal() { + return Err(Error::InvalidSct(e)); + } + debug!("SCT ignored because {:?}", e); + last_sct_error = Some(e); + } + } + } + + /* If we were supplied with some logs, and some SCTs, + * but couldn't verify any of them, fail the handshake. */ + if let Some(last_sct_error) = last_sct_error { + warn!("No valid SCTs provided"); + return Err(Error::InvalidSct(last_sct_error)); + } + + Ok(()) + } +} + +type CertChainAndRoots<'a, 'b> = ( + webpki::EndEntityCert<'a>, + Vec<&'a [u8]>, + Vec>, +); + +fn prepare<'a, 'b>( + end_entity: &'a Certificate, + intermediates: &'a [Certificate], + roots: &'b RootCertStore, +) -> Result, Error> { + // EE cert must appear first. + let cert = webpki::EndEntityCert::try_from(end_entity.0.as_ref()).map_err(pki_error)?; + + let intermediates: Vec<&'a [u8]> = intermediates + .iter() + .map(|cert| cert.0.as_ref()) + .collect(); + + let trustroots: Vec = roots + .roots + .iter() + .map(OwnedTrustAnchor::to_trust_anchor) + .collect(); + + Ok((cert, intermediates, trustroots)) +} + +/// A `ClientCertVerifier` that will ensure that every client provides a trusted +/// certificate, without any name checking. +pub struct AllowAnyAuthenticatedClient { + roots: RootCertStore, +} + +impl AllowAnyAuthenticatedClient { + /// Construct a new `AllowAnyAuthenticatedClient`. + /// + /// `roots` is the list of trust anchors to use for certificate validation. + pub fn new(roots: RootCertStore) -> Arc { + Arc::new(Self { roots }) + } +} + +impl ClientCertVerifier for AllowAnyAuthenticatedClient { + fn offer_client_auth(&self) -> bool { + true + } + + #[allow(deprecated)] + fn client_auth_root_subjects(&self) -> Option { + Some(self.roots.subjects()) + } + + fn verify_client_cert( + &self, + end_entity: &Certificate, + intermediates: &[Certificate], + now: SystemTime, + ) -> Result { + let (cert, chain, trustroots) = prepare(end_entity, intermediates, &self.roots)?; + let now = webpki::Time::try_from(now).map_err(|_| Error::FailedToGetCurrentTime)?; + cert.verify_is_valid_tls_client_cert( + SUPPORTED_SIG_ALGS, + &webpki::TlsClientTrustAnchors(&trustroots), + &chain, + now, + ) + .map_err(pki_error) + .map(|_| ClientCertVerified::assertion()) + } +} + +/// A `ClientCertVerifier` that will allow both anonymous and authenticated +/// clients, without any name checking. +/// +/// Client authentication will be requested during the TLS handshake. If the +/// client offers a certificate then this acts like +/// `AllowAnyAuthenticatedClient`, otherwise this acts like `NoClientAuth`. +pub struct AllowAnyAnonymousOrAuthenticatedClient { + inner: AllowAnyAuthenticatedClient, +} + +impl AllowAnyAnonymousOrAuthenticatedClient { + /// Construct a new `AllowAnyAnonymousOrAuthenticatedClient`. + /// + /// `roots` is the list of trust anchors to use for certificate validation. + pub fn new(roots: RootCertStore) -> Arc { + Arc::new(Self { + inner: AllowAnyAuthenticatedClient { roots }, + }) + } +} + +impl ClientCertVerifier for AllowAnyAnonymousOrAuthenticatedClient { + fn offer_client_auth(&self) -> bool { + self.inner.offer_client_auth() + } + + fn client_auth_mandatory(&self) -> Option { + Some(false) + } + + fn client_auth_root_subjects(&self) -> Option { + self.inner.client_auth_root_subjects() + } + + fn verify_client_cert( + &self, + end_entity: &Certificate, + intermediates: &[Certificate], + now: SystemTime, + ) -> Result { + self.inner + .verify_client_cert(end_entity, intermediates, now) + } +} + +fn pki_error(error: webpki::Error) -> Error { + use webpki::Error::*; + match error { + BadDer | BadDerTime => Error::InvalidCertificateEncoding, + InvalidSignatureForPublicKey => Error::InvalidCertificateSignature, + UnsupportedSignatureAlgorithm | UnsupportedSignatureAlgorithmForPublicKey => { + Error::InvalidCertificateSignatureType + } + e => Error::InvalidCertificateData(format!("invalid peer certificate: {}", e)), + } +} + +/// Turns off client authentication. +pub struct NoClientAuth; + +impl NoClientAuth { + /// Constructs a `NoClientAuth` and wraps it in an `Arc`. + pub fn new() -> Arc { + Arc::new(Self) + } +} + +impl ClientCertVerifier for NoClientAuth { + fn offer_client_auth(&self) -> bool { + false + } + + fn client_auth_root_subjects(&self) -> Option { + unimplemented!(); + } + + fn verify_client_cert( + &self, + _end_entity: &Certificate, + _intermediates: &[Certificate], + _now: SystemTime, + ) -> Result { + unimplemented!(); + } +} + +static ECDSA_SHA256: SignatureAlgorithms = + &[&webpki::ECDSA_P256_SHA256, &webpki::ECDSA_P384_SHA256]; + +static ECDSA_SHA384: SignatureAlgorithms = + &[&webpki::ECDSA_P256_SHA384, &webpki::ECDSA_P384_SHA384]; + +static ED25519: SignatureAlgorithms = &[&webpki::ED25519]; + +static RSA_SHA256: SignatureAlgorithms = &[&webpki::RSA_PKCS1_2048_8192_SHA256]; +static RSA_SHA384: SignatureAlgorithms = &[&webpki::RSA_PKCS1_2048_8192_SHA384]; +static RSA_SHA512: SignatureAlgorithms = &[&webpki::RSA_PKCS1_2048_8192_SHA512]; +static RSA_PSS_SHA256: SignatureAlgorithms = &[&webpki::RSA_PSS_2048_8192_SHA256_LEGACY_KEY]; +static RSA_PSS_SHA384: SignatureAlgorithms = &[&webpki::RSA_PSS_2048_8192_SHA384_LEGACY_KEY]; +static RSA_PSS_SHA512: SignatureAlgorithms = &[&webpki::RSA_PSS_2048_8192_SHA512_LEGACY_KEY]; + +fn convert_scheme(scheme: SignatureScheme) -> Result { + match scheme { + // nb. for TLS1.2 the curve is not fixed by SignatureScheme. + SignatureScheme::ECDSA_NISTP256_SHA256 => Ok(ECDSA_SHA256), + SignatureScheme::ECDSA_NISTP384_SHA384 => Ok(ECDSA_SHA384), + + SignatureScheme::ED25519 => Ok(ED25519), + + SignatureScheme::RSA_PKCS1_SHA256 => Ok(RSA_SHA256), + SignatureScheme::RSA_PKCS1_SHA384 => Ok(RSA_SHA384), + SignatureScheme::RSA_PKCS1_SHA512 => Ok(RSA_SHA512), + + SignatureScheme::RSA_PSS_SHA256 => Ok(RSA_PSS_SHA256), + SignatureScheme::RSA_PSS_SHA384 => Ok(RSA_PSS_SHA384), + SignatureScheme::RSA_PSS_SHA512 => Ok(RSA_PSS_SHA512), + + _ => { + let error_msg = format!("received unadvertised sig scheme {:?}", scheme); + Err(Error::PeerMisbehavedError(error_msg)) + } + } +} + +fn verify_sig_using_any_alg( + cert: &webpki::EndEntityCert, + algs: SignatureAlgorithms, + message: &[u8], + sig: &[u8], +) -> Result<(), webpki::Error> { + // TLS doesn't itself give us enough info to map to a single webpki::SignatureAlgorithm. + // Therefore, convert_algs maps to several and we try them all. + for alg in algs { + match cert.verify_signature(alg, message, sig) { + Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey) => continue, + res => return res, + } + } + + Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey) +} + +fn verify_signed_struct( + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, +) -> Result { + let possible_algs = convert_scheme(dss.scheme)?; + let cert = webpki::EndEntityCert::try_from(cert.0.as_ref()).map_err(pki_error)?; + + verify_sig_using_any_alg(&cert, possible_algs, message, dss.signature()) + .map_err(pki_error) + .map(|_| HandshakeSignatureValid::assertion()) +} + +fn convert_alg_tls13( + scheme: SignatureScheme, +) -> Result<&'static webpki::SignatureAlgorithm, Error> { + use crate::enums::SignatureScheme::*; + + match scheme { + ECDSA_NISTP256_SHA256 => Ok(&webpki::ECDSA_P256_SHA256), + ECDSA_NISTP384_SHA384 => Ok(&webpki::ECDSA_P384_SHA384), + ED25519 => Ok(&webpki::ED25519), + RSA_PSS_SHA256 => Ok(&webpki::RSA_PSS_2048_8192_SHA256_LEGACY_KEY), + RSA_PSS_SHA384 => Ok(&webpki::RSA_PSS_2048_8192_SHA384_LEGACY_KEY), + RSA_PSS_SHA512 => Ok(&webpki::RSA_PSS_2048_8192_SHA512_LEGACY_KEY), + _ => { + let error_msg = format!("received unsupported sig scheme {:?}", scheme); + Err(Error::PeerMisbehavedError(error_msg)) + } + } +} + +/// Constructs the signature message specified in section 4.4.3 of RFC8446. +pub(crate) fn construct_tls13_client_verify_message(handshake_hash: &Digest) -> Vec { + construct_tls13_verify_message(handshake_hash, b"TLS 1.3, client CertificateVerify\x00") +} + +/// Constructs the signature message specified in section 4.4.3 of RFC8446. +pub(crate) fn construct_tls13_server_verify_message(handshake_hash: &Digest) -> Vec { + construct_tls13_verify_message(handshake_hash, b"TLS 1.3, server CertificateVerify\x00") +} + +fn construct_tls13_verify_message( + handshake_hash: &Digest, + context_string_with_0: &[u8], +) -> Vec { + let mut msg = Vec::new(); + msg.resize(64, 0x20u8); + msg.extend_from_slice(context_string_with_0); + msg.extend_from_slice(handshake_hash.as_ref()); + msg +} + +fn verify_tls13( + msg: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, +) -> Result { + let alg = convert_alg_tls13(dss.scheme)?; + + let cert = webpki::EndEntityCert::try_from(cert.0.as_ref()).map_err(pki_error)?; + + cert.verify_signature(alg, msg, dss.signature()) + .map_err(pki_error) + .map(|_| HandshakeSignatureValid::assertion()) +} + +fn unix_time_millis(now: SystemTime) -> Result { + now.duration_since(std::time::UNIX_EPOCH) + .map(|dur| dur.as_secs()) + .map_err(|_| Error::FailedToGetCurrentTime) + .and_then(|secs| { + secs.checked_mul(1000) + .ok_or(Error::FailedToGetCurrentTime) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn assertions_are_debug() { + assert_eq!( + format!("{:?}", ClientCertVerified::assertion()), + "ClientCertVerified(())" + ); + assert_eq!( + format!("{:?}", HandshakeSignatureValid::assertion()), + "HandshakeSignatureValid(())" + ); + assert_eq!( + format!("{:?}", FinishedMessageVerified::assertion()), + "FinishedMessageVerified(())" + ); + assert_eq!( + format!("{:?}", ServerCertVerified::assertion()), + "ServerCertVerified(())" + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verifybench.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verifybench.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ddb2711d851cfb8eef7a66c682aa538c818e987 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/verifybench.rs @@ -0,0 +1,245 @@ +// This program does benchmarking of the functions in verify.rs, +// that do certificate chain validation and signature verification. +// +// Note: we don't use any of the standard 'cargo bench', 'test::Bencher', +// etc. because it's unstable at the time of writing. + +use std::convert::TryInto; +use std::time::{Duration, Instant, SystemTime}; + +use crate::key; +use crate::verify; +use crate::verify::ServerCertVerifier; +use crate::{anchors, OwnedTrustAnchor}; + +use webpki_roots; + +fn duration_nanos(d: Duration) -> u64 { + ((d.as_secs() as f64) * 1e9 + (d.subsec_nanos() as f64)) as u64 +} + +#[test] +fn test_reddit_cert() { + Context::new( + "reddit", + "reddit.com", + &[ + include_bytes!("testdata/cert-reddit.0.der"), + include_bytes!("testdata/cert-reddit.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_github_cert() { + Context::new( + "github", + "github.com", + &[ + include_bytes!("testdata/cert-github.0.der"), + include_bytes!("testdata/cert-github.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_arstechnica_cert() { + Context::new( + "arstechnica", + "arstechnica.com", + &[ + include_bytes!("testdata/cert-arstechnica.0.der"), + include_bytes!("testdata/cert-arstechnica.1.der"), + include_bytes!("testdata/cert-arstechnica.2.der"), + include_bytes!("testdata/cert-arstechnica.3.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_servo_cert() { + Context::new( + "servo", + "servo.org", + &[ + include_bytes!("testdata/cert-servo.0.der"), + include_bytes!("testdata/cert-servo.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_twitter_cert() { + Context::new( + "twitter", + "twitter.com", + &[ + include_bytes!("testdata/cert-twitter.0.der"), + include_bytes!("testdata/cert-twitter.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_wikipedia_cert() { + Context::new( + "wikipedia", + "wikipedia.org", + &[ + include_bytes!("testdata/cert-wikipedia.0.der"), + include_bytes!("testdata/cert-wikipedia.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_google_cert() { + Context::new( + "google", + "www.google.com", + &[ + include_bytes!("testdata/cert-google.0.der"), + include_bytes!("testdata/cert-google.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_hn_cert() { + Context::new( + "hn", + "news.ycombinator.com", + &[ + include_bytes!("testdata/cert-hn.0.der"), + include_bytes!("testdata/cert-hn.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_stackoverflow_cert() { + Context::new( + "stackoverflow", + "stackoverflow.com", + &[ + include_bytes!("testdata/cert-stackoverflow.0.der"), + include_bytes!("testdata/cert-stackoverflow.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_duckduckgo_cert() { + Context::new( + "duckduckgo", + "duckduckgo.com", + &[ + include_bytes!("testdata/cert-duckduckgo.0.der"), + include_bytes!("testdata/cert-duckduckgo.1.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_rustlang_cert() { + Context::new( + "rustlang", + "www.rust-lang.org", + &[ + include_bytes!("testdata/cert-rustlang.0.der"), + include_bytes!("testdata/cert-rustlang.1.der"), + include_bytes!("testdata/cert-rustlang.2.der"), + ], + ) + .bench(100) +} + +#[test] +fn test_wapo_cert() { + Context::new( + "wapo", + "www.washingtonpost.com", + &[ + include_bytes!("testdata/cert-wapo.0.der"), + include_bytes!("testdata/cert-wapo.1.der"), + ], + ) + .bench(100) +} + +struct Context { + name: &'static str, + domain: &'static str, + roots: anchors::RootCertStore, + chain: Vec, + now: SystemTime, +} + +impl Context { + fn new(name: &'static str, domain: &'static str, certs: &[&'static [u8]]) -> Self { + let mut roots = anchors::RootCertStore::empty(); + roots.add_server_trust_anchors( + webpki_roots::TLS_SERVER_ROOTS + .0 + .iter() + .map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject, + ta.spki, + ta.name_constraints, + ) + }), + ); + Self { + name, + domain, + roots, + chain: certs + .iter() + .copied() + .map(|bytes| key::Certificate(bytes.to_vec())) + .collect(), + now: SystemTime::UNIX_EPOCH + Duration::from_secs(1640870720), + } + } + + fn bench(&self, count: usize) { + let verifier = verify::WebPkiVerifier::new(self.roots.clone(), None); + const SCTS: &[&[u8]] = &[]; + const OCSP_RESPONSE: &[u8] = &[]; + let mut times = Vec::new(); + + let (end_entity, intermediates) = self.chain.split_first().unwrap(); + for _ in 0..count { + let start = Instant::now(); + let server_name = self.domain.try_into().unwrap(); + verifier + .verify_server_cert( + end_entity, + intermediates, + &server_name, + &mut SCTS.iter().copied(), + OCSP_RESPONSE, + self.now, + ) + .unwrap(); + times.push(duration_nanos(Instant::now().duration_since(start))); + } + + println!( + "verify_server_cert({}): min {:?}us", + self.name, + times.iter().min().unwrap() / 1000 + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/versions.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/versions.rs new file mode 100644 index 0000000000000000000000000000000000000000..738453fcc0fca43c097a5990b53c1c6abeee0d30 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/versions.rs @@ -0,0 +1,99 @@ +use std::fmt; + +use crate::enums::ProtocolVersion; + +/// A TLS protocol version supported by rustls. +/// +/// All possible instances of this class are provided by the library in +/// the [`ALL_VERSIONS`] array, as well as individually as [`TLS12`] +/// and [`TLS13`]. +#[derive(Eq, PartialEq)] +pub struct SupportedProtocolVersion { + /// The TLS enumeration naming this version. + pub version: ProtocolVersion, + is_private: (), +} + +impl fmt::Debug for SupportedProtocolVersion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.version.fmt(f) + } +} + +/// TLS1.2 +#[cfg(feature = "tls12")] +pub static TLS12: SupportedProtocolVersion = SupportedProtocolVersion { + version: ProtocolVersion::TLSv1_2, + is_private: (), +}; + +/// TLS1.3 +pub static TLS13: SupportedProtocolVersion = SupportedProtocolVersion { + version: ProtocolVersion::TLSv1_3, + is_private: (), +}; + +/// A list of all the protocol versions supported by rustls. +pub static ALL_VERSIONS: &[&SupportedProtocolVersion] = &[ + &TLS13, + #[cfg(feature = "tls12")] + &TLS12, +]; + +/// The version configuration that an application should use by default. +/// +/// This will be [`ALL_VERSIONS`] for now, but gives space in the future +/// to remove a version from here and require users to opt-in to older +/// versions. +pub static DEFAULT_VERSIONS: &[&SupportedProtocolVersion] = ALL_VERSIONS; + +#[derive(Clone)] +pub(crate) struct EnabledVersions { + #[cfg(feature = "tls12")] + tls12: Option<&'static SupportedProtocolVersion>, + tls13: Option<&'static SupportedProtocolVersion>, +} + +impl fmt::Debug for EnabledVersions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut list = &mut f.debug_list(); + #[cfg(feature = "tls12")] + if let Some(v) = self.tls12 { + list = list.entry(v) + } + if let Some(v) = self.tls13 { + list = list.entry(v) + } + list.finish() + } +} + +impl EnabledVersions { + pub(crate) fn new(versions: &[&'static SupportedProtocolVersion]) -> Self { + let mut ev = Self { + #[cfg(feature = "tls12")] + tls12: None, + tls13: None, + }; + + for v in versions { + match v.version { + #[cfg(feature = "tls12")] + ProtocolVersion::TLSv1_2 => ev.tls12 = Some(v), + ProtocolVersion::TLSv1_3 => ev.tls13 = Some(v), + _ => {} + } + } + + ev + } + + pub(crate) fn contains(&self, version: ProtocolVersion) -> bool { + match version { + #[cfg(feature = "tls12")] + ProtocolVersion::TLSv1_2 => self.tls12.is_some(), + ProtocolVersion::TLSv1_3 => self.tls13.is_some(), + _ => false, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/x509.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/x509.rs new file mode 100644 index 0000000000000000000000000000000000000000..17239d74adce791ba66813de665f8858ba620ad8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/src/x509.rs @@ -0,0 +1,93 @@ +// Additional x509/asn1 functions to those provided in webpki/ring. + +use ring::io::der; + +pub(crate) fn wrap_in_asn1_len(bytes: &mut Vec) { + let len = bytes.len(); + + if len <= 0x7f { + bytes.insert(0, len as u8); + } else { + bytes.insert(0, 0x80u8); + let mut left = len; + while left > 0 { + let byte = (left & 0xff) as u8; + bytes.insert(1, byte); + bytes[0] += 1; + left >>= 8; + } + } +} + +/// Prepend stuff to `bytes` to put it in a DER SEQUENCE. +pub(crate) fn wrap_in_sequence(bytes: &mut Vec) { + wrap_in_asn1_len(bytes); + bytes.insert(0, der::Tag::Sequence as u8); +} + +#[test] +fn test_empty() { + let mut val = Vec::new(); + wrap_in_sequence(&mut val); + assert_eq!(vec![0x30, 0x00], val); +} + +#[test] +fn test_small() { + let mut val = Vec::new(); + val.insert(0, 0x00); + val.insert(1, 0x11); + val.insert(2, 0x22); + val.insert(3, 0x33); + wrap_in_sequence(&mut val); + assert_eq!(vec![0x30, 0x04, 0x00, 0x11, 0x22, 0x33], val); +} + +#[test] +fn test_medium() { + let mut val = Vec::new(); + val.resize(255, 0x12); + wrap_in_sequence(&mut val); + assert_eq!(vec![0x30, 0x81, 0xff, 0x12, 0x12, 0x12], val[..6].to_vec()); +} + +#[test] +fn test_large() { + let mut val = Vec::new(); + val.resize(4660, 0x12); + wrap_in_sequence(&mut val); + assert_eq!(vec![0x30, 0x82, 0x12, 0x34, 0x12, 0x12], val[..6].to_vec()); +} + +#[test] +fn test_huge() { + let mut val = Vec::new(); + val.resize(0xffff, 0x12); + wrap_in_sequence(&mut val); + assert_eq!(vec![0x30, 0x82, 0xff, 0xff, 0x12, 0x12], val[..6].to_vec()); + assert_eq!(val.len(), 0xffff + 4); +} + +#[test] +fn test_gigantic() { + let mut val = Vec::new(); + val.resize(0x100000, 0x12); + wrap_in_sequence(&mut val); + assert_eq!( + vec![0x30, 0x83, 0x10, 0x00, 0x00, 0x12, 0x12], + val[..7].to_vec() + ); + assert_eq!(val.len(), 0x100000 + 5); +} + +#[test] +fn test_ludicrous() { + let mut val = Vec::new(); + val.resize(0x1000000, 0x12); + wrap_in_sequence(&mut val); + assert_eq!( + vec![0x30, 0x84, 0x01, 0x00, 0x00, 0x00, 0x12, 0x12], + val[..8].to_vec() + ); + assert_eq!(val.len(), 0x1000000 + 6); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/api.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/api.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ef18e0ec9df9522981ee0d265ac8e7eba3f0e4e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/api.rs @@ -0,0 +1,4445 @@ +//! Assorted public API tests. +use std::cell::RefCell; +use std::convert::TryFrom; +#[cfg(feature = "tls12")] +use std::convert::TryInto; +use std::fmt; +use std::io::{self, IoSlice, Read, Write}; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::sync::Mutex; + +use log; + +use rustls::client::ResolvesClientCert; +use rustls::internal::msgs::base::Payload; +use rustls::internal::msgs::codec::Codec; +#[cfg(feature = "quic")] +use rustls::quic::{self, ClientQuicExt, QuicExt, ServerQuicExt}; +use rustls::server::{AllowAnyAnonymousOrAuthenticatedClient, ClientHello, ResolvesServerCert}; +#[cfg(feature = "secret_extraction")] +use rustls::ConnectionTrafficSecrets; +use rustls::{sign, ConnectionCommon, Error, KeyLog, SideData}; +use rustls::{CipherSuite, ProtocolVersion, SignatureScheme}; +use rustls::{ClientConfig, ClientConnection}; +use rustls::{ServerConfig, ServerConnection}; +use rustls::{Stream, StreamOwned}; +use rustls::{SupportedCipherSuite, ALL_CIPHER_SUITES}; + +mod common; +use crate::common::*; + +fn alpn_test_error( + server_protos: Vec>, + client_protos: Vec>, + agreed: Option<&[u8]>, + expected_error: Option, +) { + let mut server_config = make_server_config(KeyType::Rsa); + server_config.alpn_protocols = server_protos; + + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + client_config.alpn_protocols = client_protos.clone(); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!(client.alpn_protocol(), None); + assert_eq!(server.alpn_protocol(), None); + let error = do_handshake_until_error(&mut client, &mut server); + assert_eq!(client.alpn_protocol(), agreed); + assert_eq!(server.alpn_protocol(), agreed); + assert_eq!(error.err(), expected_error); + } +} + +fn alpn_test(server_protos: Vec>, client_protos: Vec>, agreed: Option<&[u8]>) { + alpn_test_error(server_protos, client_protos, agreed, None) +} + +#[test] +fn alpn() { + // no support + alpn_test(vec![], vec![], None); + + // server support + alpn_test(vec![b"server-proto".to_vec()], vec![], None); + + // client support + alpn_test(vec![], vec![b"client-proto".to_vec()], None); + + // no overlap + alpn_test_error( + vec![b"server-proto".to_vec()], + vec![b"client-proto".to_vec()], + None, + Some(ErrorFromPeer::Server(Error::NoApplicationProtocol)), + ); + + // server chooses preference + alpn_test( + vec![b"server-proto".to_vec(), b"client-proto".to_vec()], + vec![b"client-proto".to_vec(), b"server-proto".to_vec()], + Some(b"server-proto"), + ); + + // case sensitive + alpn_test_error( + vec![b"PROTO".to_vec()], + vec![b"proto".to_vec()], + None, + Some(ErrorFromPeer::Server(Error::NoApplicationProtocol)), + ); +} + +fn version_test( + client_versions: &[&'static rustls::SupportedProtocolVersion], + server_versions: &[&'static rustls::SupportedProtocolVersion], + result: Option, +) { + let client_versions = if client_versions.is_empty() { + &rustls::ALL_VERSIONS + } else { + client_versions + }; + let server_versions = if server_versions.is_empty() { + &rustls::ALL_VERSIONS + } else { + server_versions + }; + + let client_config = make_client_config_with_versions(KeyType::Rsa, client_versions); + let server_config = make_server_config_with_versions(KeyType::Rsa, server_versions); + + println!( + "version {:?} {:?} -> {:?}", + client_versions, server_versions, result + ); + + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + assert_eq!(client.protocol_version(), None); + assert_eq!(server.protocol_version(), None); + if result.is_none() { + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); + } else { + do_handshake(&mut client, &mut server); + assert_eq!(client.protocol_version(), result); + assert_eq!(server.protocol_version(), result); + } +} + +#[test] +fn versions() { + // default -> 1.3 + version_test(&[], &[], Some(ProtocolVersion::TLSv1_3)); + + // client default, server 1.2 -> 1.2 + #[cfg(feature = "tls12")] + version_test( + &[], + &[&rustls::version::TLS12], + Some(ProtocolVersion::TLSv1_2), + ); + + // client 1.2, server default -> 1.2 + #[cfg(feature = "tls12")] + version_test( + &[&rustls::version::TLS12], + &[], + Some(ProtocolVersion::TLSv1_2), + ); + + // client 1.2, server 1.3 -> fail + #[cfg(feature = "tls12")] + version_test(&[&rustls::version::TLS12], &[&rustls::version::TLS13], None); + + // client 1.3, server 1.2 -> fail + #[cfg(feature = "tls12")] + version_test(&[&rustls::version::TLS13], &[&rustls::version::TLS12], None); + + // client 1.3, server 1.2+1.3 -> 1.3 + #[cfg(feature = "tls12")] + version_test( + &[&rustls::version::TLS13], + &[&rustls::version::TLS12, &rustls::version::TLS13], + Some(ProtocolVersion::TLSv1_3), + ); + + // client 1.2+1.3, server 1.2 -> 1.2 + #[cfg(feature = "tls12")] + version_test( + &[&rustls::version::TLS13, &rustls::version::TLS12], + &[&rustls::version::TLS12], + Some(ProtocolVersion::TLSv1_2), + ); +} + +fn check_read(reader: &mut dyn io::Read, bytes: &[u8]) { + let mut buf = vec![0u8; bytes.len() + 1]; + assert_eq!(bytes.len(), reader.read(&mut buf).unwrap()); + assert_eq!(bytes, &buf[..bytes.len()]); +} + +#[test] +fn config_builder_for_client_rejects_empty_kx_groups() { + assert_eq!( + ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_kx_groups(&[]) + .with_safe_default_protocol_versions() + .err(), + Some(Error::General("no kx groups configured".into())) + ); +} + +#[test] +fn config_builder_for_client_rejects_empty_cipher_suites() { + assert_eq!( + ClientConfig::builder() + .with_cipher_suites(&[]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .err(), + Some(Error::General("no usable cipher suites configured".into())) + ); +} + +#[cfg(feature = "tls12")] +#[test] +fn config_builder_for_client_rejects_incompatible_cipher_suites() { + assert_eq!( + ClientConfig::builder() + .with_cipher_suites(&[rustls::cipher_suite::TLS13_AES_256_GCM_SHA384]) + .with_safe_default_kx_groups() + .with_protocol_versions(&[&rustls::version::TLS12]) + .err(), + Some(Error::General("no usable cipher suites configured".into())) + ); +} + +#[test] +fn config_builder_for_server_rejects_empty_kx_groups() { + assert_eq!( + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_kx_groups(&[]) + .with_safe_default_protocol_versions() + .err(), + Some(Error::General("no kx groups configured".into())) + ); +} + +#[test] +fn config_builder_for_server_rejects_empty_cipher_suites() { + assert_eq!( + ServerConfig::builder() + .with_cipher_suites(&[]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .err(), + Some(Error::General("no usable cipher suites configured".into())) + ); +} + +#[cfg(feature = "tls12")] +#[test] +fn config_builder_for_server_rejects_incompatible_cipher_suites() { + assert_eq!( + ServerConfig::builder() + .with_cipher_suites(&[rustls::cipher_suite::TLS13_AES_256_GCM_SHA384]) + .with_safe_default_kx_groups() + .with_protocol_versions(&[&rustls::version::TLS12]) + .err(), + Some(Error::General("no usable cipher suites configured".into())) + ); +} + +#[test] +fn buffered_client_data_sent() { + let server_config = Arc::new(make_server_config(KeyType::Rsa)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!(5, client.writer().write(b"hello").unwrap()); + + do_handshake(&mut client, &mut server); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + check_read(&mut server.reader(), b"hello"); + } +} + +#[test] +fn buffered_server_data_sent() { + let server_config = Arc::new(make_server_config(KeyType::Rsa)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!(5, server.writer().write(b"hello").unwrap()); + + do_handshake(&mut client, &mut server); + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + check_read(&mut client.reader(), b"hello"); + } +} + +#[test] +fn buffered_both_data_sent() { + let server_config = Arc::new(make_server_config(KeyType::Rsa)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!( + 12, + server + .writer() + .write(b"from-server!") + .unwrap() + ); + assert_eq!( + 12, + client + .writer() + .write(b"from-client!") + .unwrap() + ); + + do_handshake(&mut client, &mut server); + + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + check_read(&mut client.reader(), b"from-server!"); + check_read(&mut server.reader(), b"from-client!"); + } +} + +#[test] +fn client_can_get_server_cert() { + for kt in ALL_KEY_TYPES.iter() { + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_configs(client_config, make_server_config(*kt)); + do_handshake(&mut client, &mut server); + + let certs = client.peer_certificates(); + assert_eq!(certs, Some(kt.get_chain().as_slice())); + } + } +} + +#[test] +fn client_can_get_server_cert_after_resumption() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = make_server_config(*kt); + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_configs(client_config.clone(), server_config.clone()); + do_handshake(&mut client, &mut server); + + let original_certs = client.peer_certificates(); + + let (mut client, mut server) = + make_pair_for_configs(client_config.clone(), server_config.clone()); + do_handshake(&mut client, &mut server); + + let resumed_certs = client.peer_certificates(); + + assert_eq!(original_certs, resumed_certs); + } + } +} + +#[test] +fn server_can_get_client_cert() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(*kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + let certs = server.peer_certificates(); + assert_eq!(certs, Some(kt.get_client_chain().as_slice())); + } + } +} + +#[test] +fn server_can_get_client_cert_after_resumption() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(*kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let client_config = Arc::new(client_config); + let (mut client, mut server) = + make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + let original_certs = server.peer_certificates(); + + let (mut client, mut server) = + make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + let resumed_certs = server.peer_certificates(); + assert_eq!(original_certs, resumed_certs); + } + } +} + +#[test] +fn test_config_builders_debug() { + let b = ServerConfig::builder(); + assert_eq!( + "ConfigBuilder { state: WantsCipherSuites(()) }", + format!("{:?}", b) + ); + let b = b.with_cipher_suites(&[rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]); + assert_eq!("ConfigBuilder { state: WantsKxGroups { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256] } }", format!("{:?}", b)); + let b = b.with_kx_groups(&[&rustls::kx_group::X25519]); + assert_eq!("ConfigBuilder { state: WantsVersions { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256], kx_groups: [X25519] } }", format!("{:?}", b)); + let b = b + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap(); + let b = b.with_no_client_auth(); + assert_eq!("ConfigBuilder { state: WantsServerCert { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256], kx_groups: [X25519], versions: [TLSv1_3], verifier: dyn ClientCertVerifier } }", format!("{:?}", b)); + + let b = ClientConfig::builder(); + assert_eq!( + "ConfigBuilder { state: WantsCipherSuites(()) }", + format!("{:?}", b) + ); + let b = b.with_cipher_suites(&[rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]); + assert_eq!("ConfigBuilder { state: WantsKxGroups { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256] } }", format!("{:?}", b)); + let b = b.with_kx_groups(&[&rustls::kx_group::X25519]); + assert_eq!("ConfigBuilder { state: WantsVersions { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256], kx_groups: [X25519] } }", format!("{:?}", b)); + let b = b + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap(); + assert_eq!("ConfigBuilder { state: WantsVerifier { cipher_suites: [TLS13_CHACHA20_POLY1305_SHA256], kx_groups: [X25519], versions: [TLSv1_3] } }", format!("{:?}", b)); +} + +/// Test that the server handles combination of `offer_client_auth()` returning true +/// and `client_auth_mandatory` returning `Some(false)`. This exercises both the +/// client's and server's ability to "recover" from the server asking for a client +/// certificate and not being given one. This also covers the implementation +/// of `AllowAnyAnonymousOrAuthenticatedClient`. +#[test] +fn server_allow_any_anonymous_or_authenticated_client() { + let kt = KeyType::Rsa; + for client_cert_chain in [None, Some(kt.get_client_chain())].iter() { + let client_auth_roots = get_client_root_store(kt); + let client_auth = AllowAnyAnonymousOrAuthenticatedClient::new(client_auth_roots); + + let server_config = ServerConfig::builder() + .with_safe_defaults() + .with_client_cert_verifier(client_auth) + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap(); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = if client_cert_chain.is_some() { + make_client_config_with_versions_with_auth(kt, &[version]) + } else { + make_client_config_with_versions(kt, &[version]) + }; + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + let certs = server.peer_certificates(); + assert_eq!(certs, client_cert_chain.as_deref()); + } + } +} + +fn check_read_and_close(reader: &mut dyn io::Read, expect: &[u8]) { + check_read(reader, expect); + assert!(matches!(reader.read(&mut [0u8; 5]), Ok(0))); +} + +#[test] +fn server_close_notify() { + let kt = KeyType::Rsa; + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + // check that alerts don't overtake appdata + assert_eq!( + 12, + server + .writer() + .write(b"from-server!") + .unwrap() + ); + assert_eq!( + 12, + client + .writer() + .write(b"from-client!") + .unwrap() + ); + server.send_close_notify(); + + transfer(&mut server, &mut client); + let io_state = client.process_new_packets().unwrap(); + assert!(io_state.peer_has_closed()); + check_read_and_close(&mut client.reader(), b"from-server!"); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + check_read(&mut server.reader(), b"from-client!"); + } +} + +#[test] +fn client_close_notify() { + let kt = KeyType::Rsa; + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + // check that alerts don't overtake appdata + assert_eq!( + 12, + server + .writer() + .write(b"from-server!") + .unwrap() + ); + assert_eq!( + 12, + client + .writer() + .write(b"from-client!") + .unwrap() + ); + client.send_close_notify(); + + transfer(&mut client, &mut server); + let io_state = server.process_new_packets().unwrap(); + assert!(io_state.peer_has_closed()); + check_read_and_close(&mut server.reader(), b"from-client!"); + + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + check_read(&mut client.reader(), b"from-server!"); + } +} + +#[test] +fn server_closes_uncleanly() { + let kt = KeyType::Rsa; + let server_config = Arc::new(make_server_config(kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + // check that unclean EOF reporting does not overtake appdata + assert_eq!( + 12, + server + .writer() + .write(b"from-server!") + .unwrap() + ); + assert_eq!( + 12, + client + .writer() + .write(b"from-client!") + .unwrap() + ); + + transfer(&mut server, &mut client); + transfer_eof(&mut client); + let io_state = client.process_new_packets().unwrap(); + assert!(!io_state.peer_has_closed()); + check_read(&mut client.reader(), b"from-server!"); + + assert!(matches!(client.reader().read(&mut [0u8; 1]), + Err(err) if err.kind() == io::ErrorKind::UnexpectedEof)); + + // may still transmit pending frames + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + check_read(&mut server.reader(), b"from-client!"); + } +} + +#[test] +fn client_closes_uncleanly() { + let kt = KeyType::Rsa; + let server_config = Arc::new(make_server_config(kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + + // check that unclean EOF reporting does not overtake appdata + assert_eq!( + 12, + server + .writer() + .write(b"from-server!") + .unwrap() + ); + assert_eq!( + 12, + client + .writer() + .write(b"from-client!") + .unwrap() + ); + + transfer(&mut client, &mut server); + transfer_eof(&mut server); + let io_state = server.process_new_packets().unwrap(); + assert!(!io_state.peer_has_closed()); + check_read(&mut server.reader(), b"from-client!"); + + assert!(matches!(server.reader().read(&mut [0u8; 1]), + Err(err) if err.kind() == io::ErrorKind::UnexpectedEof)); + + // may still transmit pending frames + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + check_read(&mut client.reader(), b"from-server!"); + } +} + +#[derive(Default)] +struct ServerCheckCertResolve { + expected_sni: Option, + expected_sigalgs: Option>, + expected_alpn: Option>>, + expected_cipher_suites: Option>, +} + +impl ResolvesServerCert for ServerCheckCertResolve { + fn resolve(&self, client_hello: ClientHello) -> Option> { + if client_hello + .signature_schemes() + .is_empty() + { + panic!("no signature schemes shared by client"); + } + + if client_hello.cipher_suites().is_empty() { + panic!("no cipher suites shared by client"); + } + + if let Some(expected_sni) = &self.expected_sni { + let sni: &str = client_hello + .server_name() + .expect("sni unexpectedly absent"); + assert_eq!(expected_sni, sni); + } + + if let Some(expected_sigalgs) = &self.expected_sigalgs { + assert_eq!( + expected_sigalgs, + client_hello.signature_schemes(), + "unexpected signature schemes" + ); + } + + if let Some(expected_alpn) = &self.expected_alpn { + let alpn = client_hello + .alpn() + .expect("alpn unexpectedly absent") + .collect::>(); + assert_eq!(alpn.len(), expected_alpn.len()); + + for (got, wanted) in alpn.iter().zip(expected_alpn.iter()) { + assert_eq!(got, &wanted.as_slice()); + } + } + + if let Some(expected_cipher_suites) = &self.expected_cipher_suites { + assert_eq!( + expected_cipher_suites, + client_hello.cipher_suites(), + "unexpected cipher suites" + ); + } + + None + } +} + +#[test] +fn server_cert_resolve_with_sni() { + for kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config(*kt); + let mut server_config = make_server_config(*kt); + + server_config.cert_resolver = Arc::new(ServerCheckCertResolve { + expected_sni: Some("the-value-from-sni".into()), + ..Default::default() + }); + + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("the-value-from-sni")).unwrap(); + let mut server = ServerConnection::new(Arc::new(server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); + } +} + +#[test] +fn server_cert_resolve_with_alpn() { + for kt in ALL_KEY_TYPES.iter() { + let mut client_config = make_client_config(*kt); + client_config.alpn_protocols = vec!["foo".into(), "bar".into()]; + + let mut server_config = make_server_config(*kt); + server_config.cert_resolver = Arc::new(ServerCheckCertResolve { + expected_alpn: Some(vec![b"foo".to_vec(), b"bar".to_vec()]), + ..Default::default() + }); + + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("sni-value")).unwrap(); + let mut server = ServerConnection::new(Arc::new(server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); + } +} + +#[test] +fn client_trims_terminating_dot() { + for kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config(*kt); + let mut server_config = make_server_config(*kt); + + server_config.cert_resolver = Arc::new(ServerCheckCertResolve { + expected_sni: Some("some-host.com".into()), + ..Default::default() + }); + + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("some-host.com.")).unwrap(); + let mut server = ServerConnection::new(Arc::new(server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); + } +} + +#[cfg(feature = "tls12")] +fn check_sigalgs_reduced_by_ciphersuite( + kt: KeyType, + suite: CipherSuite, + expected_sigalgs: Vec, +) { + let client_config = finish_client_config( + kt, + ClientConfig::builder() + .with_cipher_suites(&[find_suite(suite)]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .unwrap(), + ); + + let mut server_config = make_server_config(kt); + + server_config.cert_resolver = Arc::new(ServerCheckCertResolve { + expected_sigalgs: Some(expected_sigalgs), + expected_cipher_suites: Some(vec![suite, CipherSuite::TLS_EMPTY_RENEGOTIATION_INFO_SCSV]), + ..Default::default() + }); + + let mut client = ClientConnection::new(Arc::new(client_config), dns_name("localhost")).unwrap(); + let mut server = ServerConnection::new(Arc::new(server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); +} + +#[cfg(feature = "tls12")] +#[test] +fn server_cert_resolve_reduces_sigalgs_for_rsa_ciphersuite() { + check_sigalgs_reduced_by_ciphersuite( + KeyType::Rsa, + CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + vec![ + SignatureScheme::RSA_PSS_SHA512, + SignatureScheme::RSA_PSS_SHA384, + SignatureScheme::RSA_PSS_SHA256, + SignatureScheme::RSA_PKCS1_SHA512, + SignatureScheme::RSA_PKCS1_SHA384, + SignatureScheme::RSA_PKCS1_SHA256, + ], + ); +} + +#[cfg(feature = "tls12")] +#[test] +fn server_cert_resolve_reduces_sigalgs_for_ecdsa_ciphersuite() { + check_sigalgs_reduced_by_ciphersuite( + KeyType::Ecdsa, + CipherSuite::TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + vec![ + SignatureScheme::ECDSA_NISTP384_SHA384, + SignatureScheme::ECDSA_NISTP256_SHA256, + SignatureScheme::ED25519, + ], + ); +} + +struct ServerCheckNoSNI {} + +impl ResolvesServerCert for ServerCheckNoSNI { + fn resolve(&self, client_hello: ClientHello) -> Option> { + assert!(client_hello.server_name().is_none()); + + None + } +} + +#[test] +fn client_with_sni_disabled_does_not_send_sni() { + for kt in ALL_KEY_TYPES.iter() { + let mut server_config = make_server_config(*kt); + server_config.cert_resolver = Arc::new(ServerCheckNoSNI {}); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(*kt, &[version]); + client_config.enable_sni = false; + + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("value-not-sent")).unwrap(); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert!(err.is_err()); + } + } +} + +#[test] +fn client_checks_server_certificate_with_given_name() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = Arc::new(make_server_config(*kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let mut client = ClientConnection::new( + Arc::new(client_config), + dns_name("not-the-right-hostname.com"), + ) + .unwrap(); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + + let err = do_handshake_until_error(&mut client, &mut server); + assert_eq!( + err, + Err(ErrorFromPeer::Client(Error::InvalidCertificateData( + "invalid peer certificate: CertNotValidForName".into(), + ))) + ); + } + } +} + +struct ClientCheckCertResolve { + query_count: AtomicUsize, + expect_queries: usize, +} + +impl ClientCheckCertResolve { + fn new(expect_queries: usize) -> Self { + ClientCheckCertResolve { + query_count: AtomicUsize::new(0), + expect_queries, + } + } +} + +impl Drop for ClientCheckCertResolve { + fn drop(&mut self) { + if !std::thread::panicking() { + let count = self.query_count.load(Ordering::SeqCst); + assert_eq!(count, self.expect_queries); + } + } +} + +impl ResolvesClientCert for ClientCheckCertResolve { + fn resolve( + &self, + acceptable_issuers: &[&[u8]], + sigschemes: &[SignatureScheme], + ) -> Option> { + self.query_count + .fetch_add(1, Ordering::SeqCst); + + if acceptable_issuers.is_empty() { + panic!("no issuers offered by server"); + } + + if sigschemes.is_empty() { + panic!("no signature schemes shared by server"); + } + + None + } + + fn has_certs(&self) -> bool { + true + } +} + +#[test] +fn client_cert_resolve() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(*kt)); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(*kt, &[version]); + client_config.client_auth_cert_resolver = Arc::new(ClientCheckCertResolve::new(1)); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!( + do_handshake_until_error(&mut client, &mut server), + Err(ErrorFromPeer::Server(Error::NoCertificatesPresented)) + ); + } + } +} + +#[test] +fn client_auth_works() { + for kt in ALL_KEY_TYPES.iter() { + let server_config = Arc::new(make_server_config_with_mandatory_client_auth(*kt)); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + } + } +} + +#[test] +fn client_error_is_sticky() { + let (mut client, _) = make_pair(KeyType::Rsa); + client + .read_tls(&mut b"\x16\x03\x03\x00\x08\x0f\x00\x00\x04junk".as_ref()) + .unwrap(); + let mut err = client.process_new_packets(); + assert!(err.is_err()); + err = client.process_new_packets(); + assert!(err.is_err()); +} + +#[test] +fn server_error_is_sticky() { + let (_, mut server) = make_pair(KeyType::Rsa); + server + .read_tls(&mut b"\x16\x03\x03\x00\x08\x0f\x00\x00\x04junk".as_ref()) + .unwrap(); + let mut err = server.process_new_packets(); + assert!(err.is_err()); + err = server.process_new_packets(); + assert!(err.is_err()); +} + +#[test] +fn server_flush_does_nothing() { + let (_, mut server) = make_pair(KeyType::Rsa); + assert!(matches!(server.writer().flush(), Ok(()))); +} + +#[test] +fn client_flush_does_nothing() { + let (mut client, _) = make_pair(KeyType::Rsa); + assert!(matches!(client.writer().flush(), Ok(()))); +} + +#[test] +fn server_is_send_and_sync() { + let (_, server) = make_pair(KeyType::Rsa); + &server as &dyn Send; + &server as &dyn Sync; +} + +#[test] +fn client_is_send_and_sync() { + let (client, _) = make_pair(KeyType::Rsa); + &client as &dyn Send; + &client as &dyn Sync; +} + +#[test] +fn server_respects_buffer_limit_pre_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + server.set_buffer_limit(Some(32)); + + assert_eq!( + server + .writer() + .write(b"01234567890123456789") + .unwrap(), + 20 + ); + assert_eq!( + server + .writer() + .write(b"01234567890123456789") + .unwrap(), + 12 + ); + + do_handshake(&mut client, &mut server); + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + check_read(&mut client.reader(), b"01234567890123456789012345678901"); +} + +#[test] +fn server_respects_buffer_limit_pre_handshake_with_vectored_write() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + server.set_buffer_limit(Some(32)); + + assert_eq!( + server + .writer() + .write_vectored(&[ + IoSlice::new(b"01234567890123456789"), + IoSlice::new(b"01234567890123456789") + ]) + .unwrap(), + 32 + ); + + do_handshake(&mut client, &mut server); + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + check_read(&mut client.reader(), b"01234567890123456789012345678901"); +} + +#[test] +fn server_respects_buffer_limit_post_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + // this test will vary in behaviour depending on the default suites + do_handshake(&mut client, &mut server); + server.set_buffer_limit(Some(48)); + + assert_eq!( + server + .writer() + .write(b"01234567890123456789") + .unwrap(), + 20 + ); + assert_eq!( + server + .writer() + .write(b"01234567890123456789") + .unwrap(), + 6 + ); + + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + check_read(&mut client.reader(), b"01234567890123456789012345"); +} + +#[test] +fn client_respects_buffer_limit_pre_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + client.set_buffer_limit(Some(32)); + + assert_eq!( + client + .writer() + .write(b"01234567890123456789") + .unwrap(), + 20 + ); + assert_eq!( + client + .writer() + .write(b"01234567890123456789") + .unwrap(), + 12 + ); + + do_handshake(&mut client, &mut server); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + check_read(&mut server.reader(), b"01234567890123456789012345678901"); +} + +#[test] +fn client_respects_buffer_limit_pre_handshake_with_vectored_write() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + client.set_buffer_limit(Some(32)); + + assert_eq!( + client + .writer() + .write_vectored(&[ + IoSlice::new(b"01234567890123456789"), + IoSlice::new(b"01234567890123456789") + ]) + .unwrap(), + 32 + ); + + do_handshake(&mut client, &mut server); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + check_read(&mut server.reader(), b"01234567890123456789012345678901"); +} + +#[test] +fn client_respects_buffer_limit_post_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + do_handshake(&mut client, &mut server); + client.set_buffer_limit(Some(48)); + + assert_eq!( + client + .writer() + .write(b"01234567890123456789") + .unwrap(), + 20 + ); + assert_eq!( + client + .writer() + .write(b"01234567890123456789") + .unwrap(), + 6 + ); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + check_read(&mut server.reader(), b"01234567890123456789012345"); +} + +struct OtherSession<'a, C, S> +where + C: DerefMut + Deref>, + S: SideData, +{ + sess: &'a mut C, + pub reads: usize, + pub writevs: Vec>, + fail_ok: bool, + pub short_writes: bool, + pub last_error: Option, +} + +impl<'a, C, S> OtherSession<'a, C, S> +where + C: DerefMut + Deref>, + S: SideData, +{ + fn new(sess: &'a mut C) -> OtherSession<'a, C, S> { + OtherSession { + sess, + reads: 0, + writevs: vec![], + fail_ok: false, + short_writes: false, + last_error: None, + } + } + + fn new_fails(sess: &'a mut C) -> OtherSession<'a, C, S> { + let mut os = OtherSession::new(sess); + os.fail_ok = true; + os + } +} + +impl<'a, C, S> io::Read for OtherSession<'a, C, S> +where + C: DerefMut + Deref>, + S: SideData, +{ + fn read(&mut self, mut b: &mut [u8]) -> io::Result { + self.reads += 1; + self.sess.write_tls(b.by_ref()) + } +} + +impl<'a, C, S> io::Write for OtherSession<'a, C, S> +where + C: DerefMut + Deref>, + S: SideData, +{ + fn write(&mut self, _: &[u8]) -> io::Result { + unreachable!() + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + + fn write_vectored<'b>(&mut self, b: &[io::IoSlice<'b>]) -> io::Result { + let mut total = 0; + let mut lengths = vec![]; + for bytes in b { + let write_len = if self.short_writes { + if bytes.len() > 5 { + bytes.len() / 2 + } else { + bytes.len() + } + } else { + bytes.len() + }; + + let l = self + .sess + .read_tls(&mut io::Cursor::new(&bytes[..write_len]))?; + lengths.push(l); + total += l; + if bytes.len() != l { + break; + } + } + + let rc = self.sess.process_new_packets(); + if !self.fail_ok { + rc.unwrap(); + } else if rc.is_err() { + self.last_error = rc.err(); + } + + self.writevs.push(lengths); + Ok(total) + } +} + +#[test] +fn server_read_returns_wouldblock_when_no_data() { + let (_, mut server) = make_pair(KeyType::Rsa); + assert!(matches!(server.reader().read(&mut [0u8; 1]), + Err(err) if err.kind() == io::ErrorKind::WouldBlock)); +} + +#[test] +fn client_read_returns_wouldblock_when_no_data() { + let (mut client, _) = make_pair(KeyType::Rsa); + assert!(matches!(client.reader().read(&mut [0u8; 1]), + Err(err) if err.kind() == io::ErrorKind::WouldBlock)); +} + +#[test] +fn new_server_returns_initial_io_state() { + let (_, mut server) = make_pair(KeyType::Rsa); + let io_state = server.process_new_packets().unwrap(); + println!("IoState is Debug {:?}", io_state); + assert_eq!(io_state.plaintext_bytes_to_read(), 0); + assert!(!io_state.peer_has_closed()); + assert_eq!(io_state.tls_bytes_to_write(), 0); +} + +#[test] +fn new_client_returns_initial_io_state() { + let (mut client, _) = make_pair(KeyType::Rsa); + let io_state = client.process_new_packets().unwrap(); + println!("IoState is Debug {:?}", io_state); + assert_eq!(io_state.plaintext_bytes_to_read(), 0); + assert!(!io_state.peer_has_closed()); + assert!(io_state.tls_bytes_to_write() > 200); +} + +#[test] +fn client_complete_io_for_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + assert!(client.is_handshaking()); + let (rdlen, wrlen) = client + .complete_io(&mut OtherSession::new(&mut server)) + .unwrap(); + assert!(rdlen > 0 && wrlen > 0); + assert!(!client.is_handshaking()); +} + +#[test] +fn client_complete_io_for_handshake_eof() { + let (mut client, _) = make_pair(KeyType::Rsa); + let mut input = io::Cursor::new(Vec::new()); + + assert!(client.is_handshaking()); + let err = client + .complete_io(&mut input) + .unwrap_err(); + assert_eq!(io::ErrorKind::UnexpectedEof, err.kind()); +} + +#[test] +fn client_complete_io_for_write() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + do_handshake(&mut client, &mut server); + + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut server); + let (rdlen, wrlen) = client.complete_io(&mut pipe).unwrap(); + assert!(rdlen == 0 && wrlen > 0); + println!("{:?}", pipe.writevs); + assert_eq!(pipe.writevs, vec![vec![42, 42]]); + } + check_read( + &mut server.reader(), + b"0123456789012345678901234567890123456789", + ); + } +} + +#[test] +fn client_complete_io_for_read() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + do_handshake(&mut client, &mut server); + + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut server); + let (rdlen, wrlen) = client.complete_io(&mut pipe).unwrap(); + assert!(rdlen > 0 && wrlen == 0); + assert_eq!(pipe.reads, 1); + } + check_read(&mut client.reader(), b"01234567890123456789"); + } +} + +#[test] +fn server_complete_io_for_handshake() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + assert!(server.is_handshaking()); + let (rdlen, wrlen) = server + .complete_io(&mut OtherSession::new(&mut client)) + .unwrap(); + assert!(rdlen > 0 && wrlen > 0); + assert!(!server.is_handshaking()); + } +} + +#[test] +fn server_complete_io_for_handshake_eof() { + let (_, mut server) = make_pair(KeyType::Rsa); + let mut input = io::Cursor::new(Vec::new()); + + assert!(server.is_handshaking()); + let err = server + .complete_io(&mut input) + .unwrap_err(); + assert_eq!(io::ErrorKind::UnexpectedEof, err.kind()); +} + +#[test] +fn server_complete_io_for_write() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + do_handshake(&mut client, &mut server); + + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let (rdlen, wrlen) = server.complete_io(&mut pipe).unwrap(); + assert!(rdlen == 0 && wrlen > 0); + assert_eq!(pipe.writevs, vec![vec![42, 42]]); + } + check_read( + &mut client.reader(), + b"0123456789012345678901234567890123456789", + ); + } +} + +#[test] +fn server_complete_io_for_read() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + do_handshake(&mut client, &mut server); + + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let (rdlen, wrlen) = server.complete_io(&mut pipe).unwrap(); + assert!(rdlen > 0 && wrlen == 0); + assert_eq!(pipe.reads, 1); + } + check_read(&mut server.reader(), b"01234567890123456789"); + } +} + +#[test] +fn client_stream_write() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + { + let mut pipe = OtherSession::new(&mut server); + let mut stream = Stream::new(&mut client, &mut pipe); + assert_eq!(stream.write(b"hello").unwrap(), 5); + } + check_read(&mut server.reader(), b"hello"); + } +} + +#[test] +fn client_streamowned_write() { + for kt in ALL_KEY_TYPES.iter() { + let (client, mut server) = make_pair(*kt); + + { + let pipe = OtherSession::new(&mut server); + let mut stream = StreamOwned::new(client, pipe); + assert_eq!(stream.write(b"hello").unwrap(), 5); + } + check_read(&mut server.reader(), b"hello"); + } +} + +#[test] +fn client_stream_read() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + server + .writer() + .write_all(b"world") + .unwrap(); + + { + let mut pipe = OtherSession::new(&mut server); + let mut stream = Stream::new(&mut client, &mut pipe); + check_read(&mut stream, b"world"); + } + } +} + +#[test] +fn client_streamowned_read() { + for kt in ALL_KEY_TYPES.iter() { + let (client, mut server) = make_pair(*kt); + + server + .writer() + .write_all(b"world") + .unwrap(); + + { + let pipe = OtherSession::new(&mut server); + let mut stream = StreamOwned::new(client, pipe); + check_read(&mut stream, b"world"); + } + } +} + +#[test] +fn server_stream_write() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + { + let mut pipe = OtherSession::new(&mut client); + let mut stream = Stream::new(&mut server, &mut pipe); + assert_eq!(stream.write(b"hello").unwrap(), 5); + } + check_read(&mut client.reader(), b"hello"); + } +} + +#[test] +fn server_streamowned_write() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, server) = make_pair(*kt); + + { + let pipe = OtherSession::new(&mut client); + let mut stream = StreamOwned::new(server, pipe); + assert_eq!(stream.write(b"hello").unwrap(), 5); + } + check_read(&mut client.reader(), b"hello"); + } +} + +#[test] +fn server_stream_read() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, mut server) = make_pair(*kt); + + client + .writer() + .write_all(b"world") + .unwrap(); + + { + let mut pipe = OtherSession::new(&mut client); + let mut stream = Stream::new(&mut server, &mut pipe); + check_read(&mut stream, b"world"); + } + } +} + +#[test] +fn server_streamowned_read() { + for kt in ALL_KEY_TYPES.iter() { + let (mut client, server) = make_pair(*kt); + + client + .writer() + .write_all(b"world") + .unwrap(); + + { + let pipe = OtherSession::new(&mut client); + let mut stream = StreamOwned::new(server, pipe); + check_read(&mut stream, b"world"); + } + } +} + +struct FailsWrites { + errkind: io::ErrorKind, + after: usize, +} + +impl io::Read for FailsWrites { + fn read(&mut self, _b: &mut [u8]) -> io::Result { + Ok(0) + } +} + +impl io::Write for FailsWrites { + fn write(&mut self, b: &[u8]) -> io::Result { + if self.after > 0 { + self.after -= 1; + Ok(b.len()) + } else { + Err(io::Error::new(self.errkind, "oops")) + } + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +#[test] +fn stream_write_reports_underlying_io_error_before_plaintext_processed() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + do_handshake(&mut client, &mut server); + + let mut pipe = FailsWrites { + errkind: io::ErrorKind::ConnectionAborted, + after: 0, + }; + client + .writer() + .write_all(b"hello") + .unwrap(); + let mut client_stream = Stream::new(&mut client, &mut pipe); + let rc = client_stream.write(b"world"); + assert!(rc.is_err()); + let err = rc.err().unwrap(); + assert_eq!(err.kind(), io::ErrorKind::ConnectionAborted); +} + +#[test] +fn stream_write_swallows_underlying_io_error_after_plaintext_processed() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + do_handshake(&mut client, &mut server); + + let mut pipe = FailsWrites { + errkind: io::ErrorKind::ConnectionAborted, + after: 1, + }; + client + .writer() + .write_all(b"hello") + .unwrap(); + let mut client_stream = Stream::new(&mut client, &mut pipe); + let rc = client_stream.write(b"world"); + assert_eq!(format!("{:?}", rc), "Ok(5)"); +} + +fn make_disjoint_suite_configs() -> (ClientConfig, ServerConfig) { + let kt = KeyType::Rsa; + let server_config = finish_server_config( + kt, + ServerConfig::builder() + .with_cipher_suites(&[rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .unwrap(), + ); + + let client_config = finish_client_config( + kt, + ClientConfig::builder() + .with_cipher_suites(&[rustls::cipher_suite::TLS13_AES_256_GCM_SHA384]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .unwrap(), + ); + + (client_config, server_config) +} + +#[test] +fn client_stream_handshake_error() { + let (client_config, server_config) = make_disjoint_suite_configs(); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + { + let mut pipe = OtherSession::new_fails(&mut server); + let mut client_stream = Stream::new(&mut client, &mut pipe); + let rc = client_stream.write(b"hello"); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: AlertReceived(HandshakeFailure) })" + ); + let rc = client_stream.write(b"hello"); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: AlertReceived(HandshakeFailure) })" + ); + } +} + +#[test] +fn client_streamowned_handshake_error() { + let (client_config, server_config) = make_disjoint_suite_configs(); + let (client, mut server) = make_pair_for_configs(client_config, server_config); + + let pipe = OtherSession::new_fails(&mut server); + let mut client_stream = StreamOwned::new(client, pipe); + let rc = client_stream.write(b"hello"); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: AlertReceived(HandshakeFailure) })" + ); + let rc = client_stream.write(b"hello"); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: AlertReceived(HandshakeFailure) })" + ); +} + +#[test] +fn server_stream_handshake_error() { + let (client_config, server_config) = make_disjoint_suite_configs(); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + client + .writer() + .write_all(b"world") + .unwrap(); + + { + let mut pipe = OtherSession::new_fails(&mut client); + let mut server_stream = Stream::new(&mut server, &mut pipe); + let mut bytes = [0u8; 5]; + let rc = server_stream.read(&mut bytes); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: PeerIncompatibleError(\"no ciphersuites in common\") })" + ); + } +} + +#[test] +fn server_streamowned_handshake_error() { + let (client_config, server_config) = make_disjoint_suite_configs(); + let (mut client, server) = make_pair_for_configs(client_config, server_config); + + client + .writer() + .write_all(b"world") + .unwrap(); + + let pipe = OtherSession::new_fails(&mut client); + let mut server_stream = StreamOwned::new(server, pipe); + let mut bytes = [0u8; 5]; + let rc = server_stream.read(&mut bytes); + assert!(rc.is_err()); + assert_eq!( + format!("{:?}", rc), + "Err(Custom { kind: InvalidData, error: PeerIncompatibleError(\"no ciphersuites in common\") })" + ); +} + +#[test] +fn server_config_is_clone() { + let _ = make_server_config(KeyType::Rsa); +} + +#[test] +fn client_config_is_clone() { + let _ = make_client_config(KeyType::Rsa); +} + +#[test] +fn client_connection_is_debug() { + let (client, _) = make_pair(KeyType::Rsa); + println!("{:?}", client); +} + +#[test] +fn server_connection_is_debug() { + let (_, server) = make_pair(KeyType::Rsa); + println!("{:?}", server); +} + +#[test] +fn server_complete_io_for_handshake_ending_with_alert() { + let (client_config, server_config) = make_disjoint_suite_configs(); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + assert!(server.is_handshaking()); + + let mut pipe = OtherSession::new_fails(&mut client); + let rc = server.complete_io(&mut pipe); + assert!(rc.is_err(), "server io failed due to handshake failure"); + assert!(!server.wants_write(), "but server did send its alert"); + assert_eq!( + format!("{:?}", pipe.last_error), + "Some(AlertReceived(HandshakeFailure))", + "which was received by client" + ); +} + +#[test] +fn server_exposes_offered_sni() { + let kt = KeyType::Rsa; + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(kt, &[version]); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("second.testserver.com")) + .unwrap(); + let mut server = ServerConnection::new(Arc::new(make_server_config(kt))).unwrap(); + + assert_eq!(None, server.sni_hostname()); + do_handshake(&mut client, &mut server); + assert_eq!(Some("second.testserver.com"), server.sni_hostname()); + } +} + +#[test] +fn server_exposes_offered_sni_smashed_to_lowercase() { + // webpki actually does this for us in its DnsName type + let kt = KeyType::Rsa; + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(kt, &[version]); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("SECOND.TESTServer.com")) + .unwrap(); + let mut server = ServerConnection::new(Arc::new(make_server_config(kt))).unwrap(); + + assert_eq!(None, server.sni_hostname()); + do_handshake(&mut client, &mut server); + assert_eq!(Some("second.testserver.com"), server.sni_hostname()); + } +} + +#[test] +fn server_exposes_offered_sni_even_if_resolver_fails() { + let kt = KeyType::Rsa; + let resolver = rustls::server::ResolvesServerCertUsingSni::new(); + + let mut server_config = make_server_config(kt); + server_config.cert_resolver = Arc::new(resolver); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("thisdoesNOTexist.com")) + .unwrap(); + + assert_eq!(None, server.sni_hostname()); + transfer(&mut client, &mut server); + assert_eq!( + server.process_new_packets(), + Err(Error::General( + "no server certificate chain resolved".to_string() + )) + ); + assert_eq!(Some("thisdoesnotexist.com"), server.sni_hostname()); + } +} + +#[test] +fn sni_resolver_works() { + let kt = KeyType::Rsa; + let mut resolver = rustls::server::ResolvesServerCertUsingSni::new(); + let signing_key = sign::RsaSigningKey::new(&kt.get_key()).unwrap(); + let signing_key: Arc = Arc::new(signing_key); + resolver + .add( + "localhost", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()), + ) + .unwrap(); + + let mut server_config = make_server_config(kt); + server_config.cert_resolver = Arc::new(resolver); + let server_config = Arc::new(server_config); + + let mut server1 = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client1 = + ClientConnection::new(Arc::new(make_client_config(kt)), dns_name("localhost")).unwrap(); + let err = do_handshake_until_error(&mut client1, &mut server1); + assert_eq!(err, Ok(())); + + let mut server2 = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client2 = + ClientConnection::new(Arc::new(make_client_config(kt)), dns_name("notlocalhost")).unwrap(); + let err = do_handshake_until_error(&mut client2, &mut server2); + assert_eq!( + err, + Err(ErrorFromPeer::Server(Error::General( + "no server certificate chain resolved".into() + ))) + ); +} + +#[test] +fn sni_resolver_rejects_wrong_names() { + let kt = KeyType::Rsa; + let mut resolver = rustls::server::ResolvesServerCertUsingSni::new(); + let signing_key = sign::RsaSigningKey::new(&kt.get_key()).unwrap(); + let signing_key: Arc = Arc::new(signing_key); + + assert_eq!( + Ok(()), + resolver.add( + "localhost", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()) + ) + ); + assert_eq!( + Err(Error::General( + "The server certificate is not valid for the given name".into() + )), + resolver.add( + "not-localhost", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()) + ) + ); + assert_eq!( + Err(Error::General("Bad DNS name".into())), + resolver.add( + "not ascii 🦀", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()) + ) + ); +} + +#[test] +fn sni_resolver_lower_cases_configured_names() { + let kt = KeyType::Rsa; + let mut resolver = rustls::server::ResolvesServerCertUsingSni::new(); + let signing_key = sign::RsaSigningKey::new(&kt.get_key()).unwrap(); + let signing_key: Arc = Arc::new(signing_key); + + assert_eq!( + Ok(()), + resolver.add( + "LOCALHOST", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()) + ) + ); + + let mut server_config = make_server_config(kt); + server_config.cert_resolver = Arc::new(resolver); + let server_config = Arc::new(server_config); + + let mut server1 = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client1 = + ClientConnection::new(Arc::new(make_client_config(kt)), dns_name("localhost")).unwrap(); + let err = do_handshake_until_error(&mut client1, &mut server1); + assert_eq!(err, Ok(())); +} + +#[test] +fn sni_resolver_lower_cases_queried_names() { + // actually, the handshake parser does this, but the effect is the same. + let kt = KeyType::Rsa; + let mut resolver = rustls::server::ResolvesServerCertUsingSni::new(); + let signing_key = sign::RsaSigningKey::new(&kt.get_key()).unwrap(); + let signing_key: Arc = Arc::new(signing_key); + + assert_eq!( + Ok(()), + resolver.add( + "localhost", + sign::CertifiedKey::new(kt.get_chain(), signing_key.clone()) + ) + ); + + let mut server_config = make_server_config(kt); + server_config.cert_resolver = Arc::new(resolver); + let server_config = Arc::new(server_config); + + let mut server1 = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client1 = + ClientConnection::new(Arc::new(make_client_config(kt)), dns_name("LOCALHOST")).unwrap(); + let err = do_handshake_until_error(&mut client1, &mut server1); + assert_eq!(err, Ok(())); +} + +#[test] +fn sni_resolver_rejects_bad_certs() { + let kt = KeyType::Rsa; + let mut resolver = rustls::server::ResolvesServerCertUsingSni::new(); + let signing_key = sign::RsaSigningKey::new(&kt.get_key()).unwrap(); + let signing_key: Arc = Arc::new(signing_key); + + assert_eq!( + Err(Error::General( + "No end-entity certificate in certificate chain".into() + )), + resolver.add( + "localhost", + sign::CertifiedKey::new(vec![], signing_key.clone()) + ) + ); + + let bad_chain = vec![rustls::Certificate(vec![0xa0])]; + assert_eq!( + Err(Error::General( + "End-entity certificate in certificate chain is syntactically invalid".into() + )), + resolver.add( + "localhost", + sign::CertifiedKey::new(bad_chain, signing_key.clone()) + ) + ); +} + +fn do_exporter_test(client_config: ClientConfig, server_config: ServerConfig) { + let mut client_secret = [0u8; 64]; + let mut server_secret = [0u8; 64]; + + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + assert_eq!( + Err(Error::HandshakeNotComplete), + client.export_keying_material(&mut client_secret, b"label", Some(b"context")) + ); + assert_eq!( + Err(Error::HandshakeNotComplete), + server.export_keying_material(&mut server_secret, b"label", Some(b"context")) + ); + do_handshake(&mut client, &mut server); + + assert_eq!( + Ok(()), + client.export_keying_material(&mut client_secret, b"label", Some(b"context")) + ); + assert_eq!( + Ok(()), + server.export_keying_material(&mut server_secret, b"label", Some(b"context")) + ); + assert_eq!(client_secret.to_vec(), server_secret.to_vec()); + + assert_eq!( + Ok(()), + client.export_keying_material(&mut client_secret, b"label", None) + ); + assert_ne!(client_secret.to_vec(), server_secret.to_vec()); + assert_eq!( + Ok(()), + server.export_keying_material(&mut server_secret, b"label", None) + ); + assert_eq!(client_secret.to_vec(), server_secret.to_vec()); +} + +#[cfg(feature = "tls12")] +#[test] +fn test_tls12_exporter() { + for kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config_with_versions(*kt, &[&rustls::version::TLS12]); + let server_config = make_server_config(*kt); + + do_exporter_test(client_config, server_config); + } +} + +#[test] +fn test_tls13_exporter() { + for kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config_with_versions(*kt, &[&rustls::version::TLS13]); + let server_config = make_server_config(*kt); + + do_exporter_test(client_config, server_config); + } +} + +fn do_suite_test( + client_config: ClientConfig, + server_config: ServerConfig, + expect_suite: SupportedCipherSuite, + expect_version: ProtocolVersion, +) { + println!( + "do_suite_test {:?} {:?}", + expect_version, + expect_suite.suite() + ); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + assert_eq!(None, client.negotiated_cipher_suite()); + assert_eq!(None, server.negotiated_cipher_suite()); + assert_eq!(None, client.protocol_version()); + assert_eq!(None, server.protocol_version()); + assert!(client.is_handshaking()); + assert!(server.is_handshaking()); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + assert!(client.is_handshaking()); + assert!(server.is_handshaking()); + assert_eq!(None, client.protocol_version()); + assert_eq!(Some(expect_version), server.protocol_version()); + assert_eq!(None, client.negotiated_cipher_suite()); + assert_eq!(Some(expect_suite), server.negotiated_cipher_suite()); + + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + assert_eq!(Some(expect_suite), client.negotiated_cipher_suite()); + assert_eq!(Some(expect_suite), server.negotiated_cipher_suite()); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + assert!(!client.is_handshaking()); + assert!(!server.is_handshaking()); + assert_eq!(Some(expect_version), client.protocol_version()); + assert_eq!(Some(expect_version), server.protocol_version()); + assert_eq!(Some(expect_suite), client.negotiated_cipher_suite()); + assert_eq!(Some(expect_suite), server.negotiated_cipher_suite()); +} + +fn find_suite(suite: CipherSuite) -> SupportedCipherSuite { + for scs in ALL_CIPHER_SUITES.iter().copied() { + if scs.suite() == suite { + return scs; + } + } + + panic!("find_suite given unsupported suite"); +} + +static TEST_CIPHERSUITES: &[(&rustls::SupportedProtocolVersion, KeyType, CipherSuite)] = &[ + ( + &rustls::version::TLS13, + KeyType::Rsa, + CipherSuite::TLS13_CHACHA20_POLY1305_SHA256, + ), + ( + &rustls::version::TLS13, + KeyType::Rsa, + CipherSuite::TLS13_AES_256_GCM_SHA384, + ), + ( + &rustls::version::TLS13, + KeyType::Rsa, + CipherSuite::TLS13_AES_128_GCM_SHA256, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Ecdsa, + CipherSuite::TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Rsa, + CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Ecdsa, + CipherSuite::TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Ecdsa, + CipherSuite::TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Rsa, + CipherSuite::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + ), + #[cfg(feature = "tls12")] + ( + &rustls::version::TLS12, + KeyType::Rsa, + CipherSuite::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + ), +]; + +#[test] +fn negotiated_ciphersuite_default() { + for kt in ALL_KEY_TYPES.iter() { + do_suite_test( + make_client_config(*kt), + make_server_config(*kt), + find_suite(CipherSuite::TLS13_AES_256_GCM_SHA384), + ProtocolVersion::TLSv1_3, + ); + } +} + +#[test] +fn all_suites_covered() { + assert_eq!(ALL_CIPHER_SUITES.len(), TEST_CIPHERSUITES.len()); +} + +#[test] +fn negotiated_ciphersuite_client() { + for item in TEST_CIPHERSUITES.iter() { + let (version, kt, suite) = *item; + let scs = find_suite(suite); + let client_config = finish_client_config( + kt, + ClientConfig::builder() + .with_cipher_suites(&[scs]) + .with_safe_default_kx_groups() + .with_protocol_versions(&[version]) + .unwrap(), + ); + + do_suite_test(client_config, make_server_config(kt), scs, version.version); + } +} + +#[test] +fn negotiated_ciphersuite_server() { + for item in TEST_CIPHERSUITES.iter() { + let (version, kt, suite) = *item; + let scs = find_suite(suite); + let server_config = finish_server_config( + kt, + ServerConfig::builder() + .with_cipher_suites(&[scs]) + .with_safe_default_kx_groups() + .with_protocol_versions(&[version]) + .unwrap(), + ); + + do_suite_test(make_client_config(kt), server_config, scs, version.version); + } +} + +#[derive(Debug, PartialEq)] +struct KeyLogItem { + label: String, + client_random: Vec, + secret: Vec, +} + +struct KeyLogToVec { + label: &'static str, + items: Mutex>, +} + +impl KeyLogToVec { + fn new(who: &'static str) -> Self { + KeyLogToVec { + label: who, + items: Mutex::new(vec![]), + } + } + + fn take(&self) -> Vec { + std::mem::take(&mut self.items.lock().unwrap()) + } +} + +impl KeyLog for KeyLogToVec { + fn log(&self, label: &str, client: &[u8], secret: &[u8]) { + let value = KeyLogItem { + label: label.into(), + client_random: client.into(), + secret: secret.into(), + }; + + println!("key log {:?}: {:?}", self.label, value); + + self.items.lock().unwrap().push(value); + } +} + +#[cfg(feature = "tls12")] +#[test] +fn key_log_for_tls12() { + let client_key_log = Arc::new(KeyLogToVec::new("client")); + let server_key_log = Arc::new(KeyLogToVec::new("server")); + + let kt = KeyType::Rsa; + let mut client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS12]); + client_config.key_log = client_key_log.clone(); + let client_config = Arc::new(client_config); + + let mut server_config = make_server_config(kt); + server_config.key_log = server_key_log.clone(); + let server_config = Arc::new(server_config); + + // full handshake + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let client_full_log = client_key_log.take(); + let server_full_log = server_key_log.take(); + assert_eq!(client_full_log, server_full_log); + assert_eq!(1, client_full_log.len()); + assert_eq!("CLIENT_RANDOM", client_full_log[0].label); + + // resumed + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let client_resume_log = client_key_log.take(); + let server_resume_log = server_key_log.take(); + assert_eq!(client_resume_log, server_resume_log); + assert_eq!(1, client_resume_log.len()); + assert_eq!("CLIENT_RANDOM", client_resume_log[0].label); + assert_eq!(client_full_log[0].secret, client_resume_log[0].secret); +} + +#[test] +fn key_log_for_tls13() { + let client_key_log = Arc::new(KeyLogToVec::new("client")); + let server_key_log = Arc::new(KeyLogToVec::new("server")); + + let kt = KeyType::Rsa; + let mut client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + client_config.key_log = client_key_log.clone(); + let client_config = Arc::new(client_config); + + let mut server_config = make_server_config(kt); + server_config.key_log = server_key_log.clone(); + let server_config = Arc::new(server_config); + + // full handshake + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let client_full_log = client_key_log.take(); + let server_full_log = server_key_log.take(); + + assert_eq!(5, client_full_log.len()); + assert_eq!("CLIENT_HANDSHAKE_TRAFFIC_SECRET", client_full_log[0].label); + assert_eq!("SERVER_HANDSHAKE_TRAFFIC_SECRET", client_full_log[1].label); + assert_eq!("CLIENT_TRAFFIC_SECRET_0", client_full_log[2].label); + assert_eq!("SERVER_TRAFFIC_SECRET_0", client_full_log[3].label); + assert_eq!("EXPORTER_SECRET", client_full_log[4].label); + + assert_eq!(client_full_log[0], server_full_log[0]); + assert_eq!(client_full_log[1], server_full_log[1]); + assert_eq!(client_full_log[2], server_full_log[2]); + assert_eq!(client_full_log[3], server_full_log[3]); + assert_eq!(client_full_log[4], server_full_log[4]); + + // resumed + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let client_resume_log = client_key_log.take(); + let server_resume_log = server_key_log.take(); + + assert_eq!(5, client_resume_log.len()); + assert_eq!( + "CLIENT_HANDSHAKE_TRAFFIC_SECRET", + client_resume_log[0].label + ); + assert_eq!( + "SERVER_HANDSHAKE_TRAFFIC_SECRET", + client_resume_log[1].label + ); + assert_eq!("CLIENT_TRAFFIC_SECRET_0", client_resume_log[2].label); + assert_eq!("SERVER_TRAFFIC_SECRET_0", client_resume_log[3].label); + assert_eq!("EXPORTER_SECRET", client_resume_log[4].label); + + assert_eq!(6, server_resume_log.len()); + assert_eq!("CLIENT_EARLY_TRAFFIC_SECRET", server_resume_log[0].label); + assert_eq!( + "CLIENT_HANDSHAKE_TRAFFIC_SECRET", + server_resume_log[1].label + ); + assert_eq!( + "SERVER_HANDSHAKE_TRAFFIC_SECRET", + server_resume_log[2].label + ); + assert_eq!("CLIENT_TRAFFIC_SECRET_0", server_resume_log[3].label); + assert_eq!("SERVER_TRAFFIC_SECRET_0", server_resume_log[4].label); + assert_eq!("EXPORTER_SECRET", server_resume_log[5].label); + + assert_eq!(client_resume_log[0], server_resume_log[1]); + assert_eq!(client_resume_log[1], server_resume_log[2]); + assert_eq!(client_resume_log[2], server_resume_log[3]); + assert_eq!(client_resume_log[3], server_resume_log[4]); + assert_eq!(client_resume_log[4], server_resume_log[5]); +} + +#[test] +fn vectored_write_for_server_appdata() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + do_handshake(&mut client, &mut server); + + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + assert_eq!(84, wrlen); + assert_eq!(pipe.writevs, vec![vec![42, 42]]); + } + check_read( + &mut client.reader(), + b"0123456789012345678901234567890123456789", + ); +} + +#[test] +fn vectored_write_for_client_appdata() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + do_handshake(&mut client, &mut server); + + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut server); + let wrlen = client.write_tls(&mut pipe).unwrap(); + assert_eq!(84, wrlen); + assert_eq!(pipe.writevs, vec![vec![42, 42]]); + } + check_read( + &mut server.reader(), + b"0123456789012345678901234567890123456789", + ); +} + +#[test] +fn vectored_write_for_server_handshake_with_half_rtt_data() { + let mut server_config = make_server_config(KeyType::Rsa); + server_config.send_half_rtt_data = true; + let (mut client, mut server) = + make_pair_for_configs(make_client_config_with_auth(KeyType::Rsa), server_config); + + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + server + .writer() + .write_all(b"0123456789") + .unwrap(); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + // don't assert exact sizes here, to avoid a brittle test + assert!(wrlen > 4000); // its pretty big (contains cert chain) + assert_eq!(pipe.writevs.len(), 1); // only one writev + assert_eq!(pipe.writevs[0].len(), 8); // at least a server hello/ccs/cert/serverkx/0.5rtt data + } + + client.process_new_packets().unwrap(); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + assert_eq!(wrlen, 103); + assert_eq!(pipe.writevs, vec![vec![103]]); + } + + assert!(!server.is_handshaking()); + assert!(!client.is_handshaking()); + check_read(&mut client.reader(), b"012345678901234567890123456789"); +} + +fn check_half_rtt_does_not_work(server_config: ServerConfig) { + let (mut client, mut server) = + make_pair_for_configs(make_client_config_with_auth(KeyType::Rsa), server_config); + + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + server + .writer() + .write_all(b"0123456789") + .unwrap(); + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + // don't assert exact sizes here, to avoid a brittle test + assert!(wrlen > 4000); // its pretty big (contains cert chain) + assert_eq!(pipe.writevs.len(), 1); // only one writev + assert!(pipe.writevs[0].len() >= 6); // at least a server hello/ccs/cert/serverkx data + } + + // client second flight + client.process_new_packets().unwrap(); + transfer(&mut client, &mut server); + + // when client auth is enabled, we don't sent 0.5-rtt data, as we'd be sending + // it to an unauthenticated peer. so it happens here, in the server's second + // flight (42 and 32 are lengths of appdata sent above). + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + assert_eq!(wrlen, 177); + assert_eq!(pipe.writevs, vec![vec![103, 42, 32]]); + } + + assert!(!server.is_handshaking()); + assert!(!client.is_handshaking()); + check_read(&mut client.reader(), b"012345678901234567890123456789"); +} + +#[test] +fn vectored_write_for_server_handshake_no_half_rtt_with_client_auth() { + let mut server_config = make_server_config_with_mandatory_client_auth(KeyType::Rsa); + server_config.send_half_rtt_data = true; // ask even though it will be ignored + check_half_rtt_does_not_work(server_config); +} + +#[test] +fn vectored_write_for_server_handshake_no_half_rtt_by_default() { + let server_config = make_server_config(KeyType::Rsa); + assert!(!server_config.send_half_rtt_data); + check_half_rtt_does_not_work(server_config); +} + +#[test] +fn vectored_write_for_client_handshake() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + client + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + client + .writer() + .write_all(b"0123456789") + .unwrap(); + { + let mut pipe = OtherSession::new(&mut server); + let wrlen = client.write_tls(&mut pipe).unwrap(); + // don't assert exact sizes here, to avoid a brittle test + assert!(wrlen > 200); // just the client hello + assert_eq!(pipe.writevs.len(), 1); // only one writev + assert!(pipe.writevs[0].len() == 1); // only a client hello + } + + transfer(&mut server, &mut client); + client.process_new_packets().unwrap(); + + { + let mut pipe = OtherSession::new(&mut server); + let wrlen = client.write_tls(&mut pipe).unwrap(); + assert_eq!(wrlen, 154); + // CCS, finished, then two application datas + assert_eq!(pipe.writevs, vec![vec![6, 74, 42, 32]]); + } + + assert!(!server.is_handshaking()); + assert!(!client.is_handshaking()); + check_read(&mut server.reader(), b"012345678901234567890123456789"); +} + +#[test] +fn vectored_write_with_slow_client() { + let (mut client, mut server) = make_pair(KeyType::Rsa); + + client.set_buffer_limit(Some(32)); + + do_handshake(&mut client, &mut server); + server + .writer() + .write_all(b"01234567890123456789") + .unwrap(); + + { + let mut pipe = OtherSession::new(&mut client); + pipe.short_writes = true; + let wrlen = server.write_tls(&mut pipe).unwrap() + + server.write_tls(&mut pipe).unwrap() + + server.write_tls(&mut pipe).unwrap() + + server.write_tls(&mut pipe).unwrap() + + server.write_tls(&mut pipe).unwrap() + + server.write_tls(&mut pipe).unwrap(); + assert_eq!(42, wrlen); + assert_eq!( + pipe.writevs, + vec![vec![21], vec![10], vec![5], vec![3], vec![3]] + ); + } + check_read(&mut client.reader(), b"01234567890123456789"); +} + +struct ServerStorage { + storage: Arc, + put_count: AtomicUsize, + get_count: AtomicUsize, + take_count: AtomicUsize, +} + +impl ServerStorage { + fn new() -> Self { + ServerStorage { + storage: rustls::server::ServerSessionMemoryCache::new(1024), + put_count: AtomicUsize::new(0), + get_count: AtomicUsize::new(0), + take_count: AtomicUsize::new(0), + } + } + + fn puts(&self) -> usize { + self.put_count.load(Ordering::SeqCst) + } + fn gets(&self) -> usize { + self.get_count.load(Ordering::SeqCst) + } + fn takes(&self) -> usize { + self.take_count.load(Ordering::SeqCst) + } +} + +impl fmt::Debug for ServerStorage { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "(put: {:?}, get: {:?}, take: {:?})", + self.put_count, self.get_count, self.take_count + ) + } +} + +impl rustls::server::StoresServerSessions for ServerStorage { + fn put(&self, key: Vec, value: Vec) -> bool { + self.put_count + .fetch_add(1, Ordering::SeqCst); + self.storage.put(key, value) + } + + fn get(&self, key: &[u8]) -> Option> { + self.get_count + .fetch_add(1, Ordering::SeqCst); + self.storage.get(key) + } + + fn take(&self, key: &[u8]) -> Option> { + self.take_count + .fetch_add(1, Ordering::SeqCst); + self.storage.take(key) + } + + fn can_cache(&self) -> bool { + true + } +} + +struct ClientStorage { + storage: Arc, + put_count: AtomicUsize, + get_count: AtomicUsize, + last_put_key: Mutex>>, +} + +impl ClientStorage { + fn new() -> Self { + ClientStorage { + storage: rustls::client::ClientSessionMemoryCache::new(1024), + put_count: AtomicUsize::new(0), + get_count: AtomicUsize::new(0), + last_put_key: Mutex::new(None), + } + } + + fn puts(&self) -> usize { + self.put_count.load(Ordering::SeqCst) + } + fn gets(&self) -> usize { + self.get_count.load(Ordering::SeqCst) + } +} + +impl fmt::Debug for ClientStorage { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "(puts: {:?}, gets: {:?} )", + self.put_count, self.get_count + ) + } +} + +impl rustls::client::StoresClientSessions for ClientStorage { + fn put(&self, key: Vec, value: Vec) -> bool { + self.put_count + .fetch_add(1, Ordering::SeqCst); + *self.last_put_key.lock().unwrap() = Some(key.clone()); + self.storage.put(key, value) + } + + fn get(&self, key: &[u8]) -> Option> { + self.get_count + .fetch_add(1, Ordering::SeqCst); + self.storage.get(key) + } +} + +#[test] +fn tls13_stateful_resumption() { + let kt = KeyType::Rsa; + let client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + let client_config = Arc::new(client_config); + + let mut server_config = make_server_config(kt); + let storage = Arc::new(ServerStorage::new()); + server_config.session_storage = storage.clone(); + let server_config = Arc::new(server_config); + + // full handshake + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (full_c2s, full_s2c) = do_handshake(&mut client, &mut server); + assert_eq!(storage.puts(), 1); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 0); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); + + // resumed + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (resume_c2s, resume_s2c) = do_handshake(&mut client, &mut server); + assert!(resume_c2s > full_c2s); + assert!(resume_s2c < full_s2c); + assert_eq!(storage.puts(), 2); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 1); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); + + // resumed again + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (resume2_c2s, resume2_s2c) = do_handshake(&mut client, &mut server); + assert_eq!(resume_s2c, resume2_s2c); + assert_eq!(resume_c2s, resume2_c2s); + assert_eq!(storage.puts(), 3); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 2); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); +} + +#[test] +fn tls13_stateless_resumption() { + let kt = KeyType::Rsa; + let client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + let client_config = Arc::new(client_config); + + let mut server_config = make_server_config(kt); + server_config.ticketer = rustls::Ticketer::new().unwrap(); + let storage = Arc::new(ServerStorage::new()); + server_config.session_storage = storage.clone(); + let server_config = Arc::new(server_config); + + // full handshake + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (full_c2s, full_s2c) = do_handshake(&mut client, &mut server); + assert_eq!(storage.puts(), 0); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 0); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); + + // resumed + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (resume_c2s, resume_s2c) = do_handshake(&mut client, &mut server); + assert!(resume_c2s > full_c2s); + assert!(resume_s2c < full_s2c); + assert_eq!(storage.puts(), 0); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 0); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); + + // resumed again + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + let (resume2_c2s, resume2_s2c) = do_handshake(&mut client, &mut server); + assert_eq!(resume_s2c, resume2_s2c); + assert_eq!(resume_c2s, resume2_c2s); + assert_eq!(storage.puts(), 0); + assert_eq!(storage.gets(), 0); + assert_eq!(storage.takes(), 0); + assert_eq!( + client + .peer_certificates() + .map(|certs| certs.len()), + Some(3) + ); +} + +#[test] +fn early_data_not_available() { + let (mut client, _) = make_pair(KeyType::Rsa); + assert!(client.early_data().is_none()); +} + +fn early_data_configs() -> (Arc, Arc) { + let kt = KeyType::Rsa; + let mut client_config = make_client_config(kt); + client_config.enable_early_data = true; + client_config.session_storage = Arc::new(ClientStorage::new()); + + let mut server_config = make_server_config(kt); + server_config.max_early_data_size = 1234; + (Arc::new(client_config), Arc::new(server_config)) +} + +#[test] +fn early_data_is_available_on_resumption() { + let (client_config, server_config) = early_data_configs(); + + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + assert!(client.early_data().is_some()); + assert_eq!( + client + .early_data() + .unwrap() + .bytes_left(), + 1234 + ); + client + .early_data() + .unwrap() + .flush() + .unwrap(); + assert_eq!( + client + .early_data() + .unwrap() + .write(b"hello") + .unwrap(), + 5 + ); + do_handshake(&mut client, &mut server); + + let mut received_early_data = [0u8; 5]; + assert_eq!( + server + .early_data() + .expect("early_data didn't happen") + .read(&mut received_early_data) + .expect("early_data failed unexpectedly"), + 5 + ); + assert_eq!(&received_early_data[..], b"hello"); +} + +#[test] +fn early_data_not_available_on_server_before_client_hello() { + let mut server = ServerConnection::new(Arc::new(make_server_config(KeyType::Rsa))).unwrap(); + assert!(server.early_data().is_none()); +} + +#[test] +fn early_data_can_be_rejected_by_server() { + let (client_config, server_config) = early_data_configs(); + + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + assert!(client.early_data().is_some()); + assert_eq!( + client + .early_data() + .unwrap() + .bytes_left(), + 1234 + ); + client + .early_data() + .unwrap() + .flush() + .unwrap(); + assert_eq!( + client + .early_data() + .unwrap() + .write(b"hello") + .unwrap(), + 5 + ); + server.reject_early_data(); + do_handshake(&mut client, &mut server); + + assert!(!client.is_early_data_accepted()); +} + +#[cfg(feature = "quic")] +mod test_quic { + use super::*; + use rustls::Connection; + + // Returns the sender's next secrets to use, or the receiver's error. + fn step( + send: &mut dyn QuicExt, + recv: &mut dyn QuicExt, + ) -> Result, Error> { + let mut buf = Vec::new(); + let change = loop { + let prev = buf.len(); + if let Some(x) = send.write_hs(&mut buf) { + break Some(x); + } + if prev == buf.len() { + break None; + } + }; + if let Err(e) = recv.read_hs(&buf) { + return Err(e); + } else { + assert_eq!(recv.alert(), None); + } + + Ok(change) + } + + #[test] + fn test_quic_handshake() { + fn equal_packet_keys(x: &quic::PacketKey, y: &quic::PacketKey) -> bool { + // Check that these two sets of keys are equal. + let mut buf = vec![0; 32]; + let (header, payload_tag) = buf.split_at_mut(8); + let (payload, tag_buf) = payload_tag.split_at_mut(8); + let tag = x + .encrypt_in_place(42, &*header, payload) + .unwrap(); + tag_buf.copy_from_slice(tag.as_ref()); + + let result = y.decrypt_in_place(42, &*header, payload_tag); + match result { + Ok(payload) => payload == &[0; 8], + Err(_) => false, + } + } + + fn compatible_keys(x: &quic::KeyChange, y: &quic::KeyChange) -> bool { + fn keys(kc: &quic::KeyChange) -> &quic::Keys { + match kc { + quic::KeyChange::Handshake { keys } => keys, + quic::KeyChange::OneRtt { keys, .. } => keys, + } + } + + let (x, y) = (keys(x), keys(y)); + equal_packet_keys(&x.local.packet, &y.remote.packet) + && equal_packet_keys(&x.remote.packet, &y.local.packet) + } + + let kt = KeyType::Rsa; + let mut client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + client_config.enable_early_data = true; + let client_config = Arc::new(client_config); + let mut server_config = make_server_config_with_versions(kt, &[&rustls::version::TLS13]); + server_config.max_early_data_size = 0xffffffff; + let server_config = Arc::new(server_config); + let client_params = &b"client params"[..]; + let server_params = &b"server params"[..]; + + // full handshake + let mut client = Connection::from( + ClientConnection::new_quic( + Arc::clone(&client_config), + quic::Version::V1, + dns_name("localhost"), + client_params.into(), + ) + .unwrap(), + ); + + let mut server = Connection::from( + ServerConnection::new_quic( + Arc::clone(&server_config), + quic::Version::V1, + server_params.into(), + ) + .unwrap(), + ); + + let client_initial = step(&mut client, &mut server).unwrap(); + assert!(client_initial.is_none()); + assert!(client.zero_rtt_keys().is_none()); + assert_eq!(server.quic_transport_parameters(), Some(client_params)); + let server_hs = step(&mut server, &mut client) + .unwrap() + .unwrap(); + assert!(server.zero_rtt_keys().is_none()); + let client_hs = step(&mut client, &mut server) + .unwrap() + .unwrap(); + assert!(compatible_keys(&server_hs, &client_hs)); + assert!(client.is_handshaking()); + let server_1rtt = step(&mut server, &mut client) + .unwrap() + .unwrap(); + assert!(!client.is_handshaking()); + assert_eq!(client.quic_transport_parameters(), Some(server_params)); + assert!(server.is_handshaking()); + let client_1rtt = step(&mut client, &mut server) + .unwrap() + .unwrap(); + assert!(!server.is_handshaking()); + assert!(compatible_keys(&server_1rtt, &client_1rtt)); + assert!(!compatible_keys(&server_hs, &server_1rtt)); + assert!(step(&mut client, &mut server) + .unwrap() + .is_none()); + assert!(step(&mut server, &mut client) + .unwrap() + .is_none()); + + // 0-RTT handshake + let mut client = ClientConnection::new_quic( + Arc::clone(&client_config), + quic::Version::V1, + dns_name("localhost"), + client_params.into(), + ) + .unwrap(); + assert!(client + .negotiated_cipher_suite() + .is_some()); + + let mut server = ServerConnection::new_quic( + Arc::clone(&server_config), + quic::Version::V1, + server_params.into(), + ) + .unwrap(); + + step(&mut client, &mut server).unwrap(); + assert_eq!(client.quic_transport_parameters(), Some(server_params)); + { + let client_early = client.zero_rtt_keys().unwrap(); + let server_early = server.zero_rtt_keys().unwrap(); + assert!(equal_packet_keys( + &client_early.packet, + &server_early.packet + )); + } + step(&mut server, &mut client) + .unwrap() + .unwrap(); + step(&mut client, &mut server) + .unwrap() + .unwrap(); + step(&mut server, &mut client) + .unwrap() + .unwrap(); + assert!(client.is_early_data_accepted()); + + // 0-RTT rejection + { + let client_config = (*client_config).clone(); + let mut client = ClientConnection::new_quic( + Arc::new(client_config), + quic::Version::V1, + dns_name("localhost"), + client_params.into(), + ) + .unwrap(); + + let mut server = ServerConnection::new_quic( + Arc::clone(&server_config), + quic::Version::V1, + server_params.into(), + ) + .unwrap(); + + step(&mut client, &mut server).unwrap(); + assert_eq!(client.quic_transport_parameters(), Some(server_params)); + assert!(client.zero_rtt_keys().is_some()); + assert!(server.zero_rtt_keys().is_none()); + step(&mut server, &mut client) + .unwrap() + .unwrap(); + step(&mut client, &mut server) + .unwrap() + .unwrap(); + step(&mut server, &mut client) + .unwrap() + .unwrap(); + assert!(!client.is_early_data_accepted()); + } + + // failed handshake + let mut client = ClientConnection::new_quic( + client_config, + quic::Version::V1, + dns_name("example.com"), + client_params.into(), + ) + .unwrap(); + + let mut server = + ServerConnection::new_quic(server_config, quic::Version::V1, server_params.into()) + .unwrap(); + + step(&mut client, &mut server).unwrap(); + step(&mut server, &mut client) + .unwrap() + .unwrap(); + assert!(step(&mut server, &mut client).is_err()); + assert_eq!( + client.alert(), + Some(rustls::AlertDescription::BadCertificate) + ); + + // Key updates + + let (mut client_secrets, mut server_secrets) = match (client_1rtt, server_1rtt) { + (quic::KeyChange::OneRtt { next: c, .. }, quic::KeyChange::OneRtt { next: s, .. }) => { + (c, s) + } + _ => unreachable!(), + }; + + let mut client_next = client_secrets.next_packet_keys(); + let mut server_next = server_secrets.next_packet_keys(); + assert!(equal_packet_keys(&client_next.local, &server_next.remote)); + assert!(equal_packet_keys(&server_next.local, &client_next.remote)); + + client_next = client_secrets.next_packet_keys(); + server_next = server_secrets.next_packet_keys(); + assert!(equal_packet_keys(&client_next.local, &server_next.remote)); + assert!(equal_packet_keys(&server_next.local, &client_next.remote)); + } + + #[test] + fn test_quic_rejects_missing_alpn() { + let client_params = &b"client params"[..]; + let server_params = &b"server params"[..]; + + for &kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + let client_config = Arc::new(client_config); + + let mut server_config = + make_server_config_with_versions(kt, &[&rustls::version::TLS13]); + server_config.alpn_protocols = vec!["foo".into()]; + let server_config = Arc::new(server_config); + + let mut client = ClientConnection::new_quic( + client_config, + quic::Version::V1, + dns_name("localhost"), + client_params.into(), + ) + .unwrap(); + let mut server = + ServerConnection::new_quic(server_config, quic::Version::V1, server_params.into()) + .unwrap(); + + assert_eq!( + step(&mut client, &mut server) + .err() + .unwrap(), + Error::NoApplicationProtocol + ); + + assert_eq!( + server.alert(), + Some(rustls::AlertDescription::NoApplicationProtocol) + ); + } + } + + #[cfg(feature = "tls12")] + #[test] + fn test_quic_no_tls13_error() { + let mut client_config = + make_client_config_with_versions(KeyType::Ed25519, &[&rustls::version::TLS12]); + client_config.alpn_protocols = vec!["foo".into()]; + let client_config = Arc::new(client_config); + + assert!(ClientConnection::new_quic( + client_config, + quic::Version::V1, + dns_name("localhost"), + b"client params".to_vec(), + ) + .is_err()); + + let mut server_config = + make_server_config_with_versions(KeyType::Ed25519, &[&rustls::version::TLS12]); + server_config.alpn_protocols = vec!["foo".into()]; + let server_config = Arc::new(server_config); + + assert!(ServerConnection::new_quic( + server_config, + quic::Version::V1, + b"server params".to_vec(), + ) + .is_err()); + } + + #[test] + fn test_quic_invalid_early_data_size() { + let mut server_config = + make_server_config_with_versions(KeyType::Ed25519, &[&rustls::version::TLS13]); + server_config.alpn_protocols = vec!["foo".into()]; + + let cases = [ + (None, true), + (Some(0u32), true), + (Some(5), false), + (Some(0xffff_ffff), true), + ]; + + for &(size, ok) in cases.iter() { + println!("early data size case: {:?}", size); + if let Some(new) = size { + server_config.max_early_data_size = new; + } + + let wrapped = Arc::new(server_config.clone()); + assert_eq!( + ServerConnection::new_quic(wrapped, quic::Version::V1, b"server params".to_vec(),) + .is_ok(), + ok + ); + } + } + + #[test] + fn test_quic_server_no_params_received() { + let server_config = + make_server_config_with_versions(KeyType::Ed25519, &[&rustls::version::TLS13]); + let server_config = Arc::new(server_config); + + let mut server = + ServerConnection::new_quic(server_config, quic::Version::V1, b"server params".to_vec()) + .unwrap(); + + use ring::rand::SecureRandom; + use rustls::internal::msgs::base::PayloadU16; + use rustls::internal::msgs::enums::{Compression, HandshakeType, NamedGroup}; + use rustls::internal::msgs::handshake::{ + ClientHelloPayload, HandshakeMessagePayload, KeyShareEntry, Random, SessionID, + }; + use rustls::internal::msgs::message::PlainMessage; + use rustls::{CipherSuite, SignatureScheme}; + + let rng = ring::rand::SystemRandom::new(); + let mut random = [0; 32]; + rng.fill(&mut random).unwrap(); + let random = Random::from(random); + + let kx = ring::agreement::EphemeralPrivateKey::generate(&ring::agreement::X25519, &rng) + .unwrap() + .compute_public_key() + .unwrap(); + + let client_hello = Message { + version: ProtocolVersion::TLSv1_3, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(ClientHelloPayload { + client_version: ProtocolVersion::TLSv1_3, + random, + session_id: SessionID::random().unwrap(), + cipher_suites: vec![CipherSuite::TLS13_AES_128_GCM_SHA256], + compression_methods: vec![Compression::Null], + extensions: vec![ + ClientExtension::SupportedVersions(vec![ProtocolVersion::TLSv1_3]), + ClientExtension::NamedGroups(vec![NamedGroup::X25519]), + ClientExtension::SignatureAlgorithms(vec![SignatureScheme::ED25519]), + ClientExtension::KeyShare(vec![KeyShareEntry { + group: NamedGroup::X25519, + payload: PayloadU16::new(kx.as_ref().to_vec()), + }]), + ], + }), + }), + }; + + let buf = PlainMessage::from(client_hello) + .into_unencrypted_opaque() + .encode(); + server + .read_tls(&mut buf.as_slice()) + .unwrap(); + assert_eq!( + server.process_new_packets().err(), + Some(Error::PeerMisbehavedError( + "QUIC transport parameters not found".into(), + )), + ); + } + + #[test] + fn test_quic_server_no_tls12() { + let mut server_config = + make_server_config_with_versions(KeyType::Ed25519, &[&rustls::version::TLS13]); + server_config.alpn_protocols = vec!["foo".into()]; + let server_config = Arc::new(server_config); + + use ring::rand::SecureRandom; + use rustls::internal::msgs::base::PayloadU16; + use rustls::internal::msgs::enums::{Compression, HandshakeType, NamedGroup}; + use rustls::internal::msgs::handshake::{ + ClientHelloPayload, HandshakeMessagePayload, KeyShareEntry, Random, SessionID, + }; + use rustls::internal::msgs::message::PlainMessage; + use rustls::{CipherSuite, SignatureScheme}; + + let rng = ring::rand::SystemRandom::new(); + let mut random = [0; 32]; + rng.fill(&mut random).unwrap(); + let random = Random::from(random); + + let kx = ring::agreement::EphemeralPrivateKey::generate(&ring::agreement::X25519, &rng) + .unwrap() + .compute_public_key() + .unwrap(); + + let mut server = + ServerConnection::new_quic(server_config, quic::Version::V1, b"server params".to_vec()) + .unwrap(); + + let client_hello = Message { + version: ProtocolVersion::TLSv1_2, + payload: MessagePayload::handshake(HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(ClientHelloPayload { + client_version: ProtocolVersion::TLSv1_2, + random: random.clone(), + session_id: SessionID::random().unwrap(), + cipher_suites: vec![CipherSuite::TLS13_AES_128_GCM_SHA256], + compression_methods: vec![Compression::Null], + extensions: vec![ + ClientExtension::NamedGroups(vec![NamedGroup::X25519]), + ClientExtension::SignatureAlgorithms(vec![SignatureScheme::ED25519]), + ClientExtension::KeyShare(vec![KeyShareEntry { + group: NamedGroup::X25519, + payload: PayloadU16::new(kx.as_ref().to_vec()), + }]), + ], + }), + }), + }; + + let buf = PlainMessage::from(client_hello) + .into_unencrypted_opaque() + .encode(); + server + .read_tls(&mut buf.as_slice()) + .unwrap(); + assert_eq!( + server.process_new_packets().err(), + Some(Error::PeerIncompatibleError( + "Server requires TLS1.3, but client omitted versions ext".into(), + )), + ); + } + + #[test] + fn packet_key_api() { + use rustls::quic::{Keys, Version}; + + // Test vectors: https://www.rfc-editor.org/rfc/rfc9001.html#name-client-initial + const CONNECTION_ID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; + const PACKET_NUMBER: u64 = 2; + const PLAIN_HEADER: &[u8] = &[ + 0xc3, 0x00, 0x00, 0x00, 0x01, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, + 0x00, 0x00, 0x44, 0x9e, 0x00, 0x00, 0x00, 0x02, + ]; + + const PAYLOAD: &[u8] = &[ + 0x06, 0x00, 0x40, 0xf1, 0x01, 0x00, 0x00, 0xed, 0x03, 0x03, 0xeb, 0xf8, 0xfa, 0x56, + 0xf1, 0x29, 0x39, 0xb9, 0x58, 0x4a, 0x38, 0x96, 0x47, 0x2e, 0xc4, 0x0b, 0xb8, 0x63, + 0xcf, 0xd3, 0xe8, 0x68, 0x04, 0xfe, 0x3a, 0x47, 0xf0, 0x6a, 0x2b, 0x69, 0x48, 0x4c, + 0x00, 0x00, 0x04, 0x13, 0x01, 0x13, 0x02, 0x01, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x0e, 0x00, 0x00, 0x0b, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2e, + 0x63, 0x6f, 0x6d, 0xff, 0x01, 0x00, 0x01, 0x00, 0x00, 0x0a, 0x00, 0x08, 0x00, 0x06, + 0x00, 0x1d, 0x00, 0x17, 0x00, 0x18, 0x00, 0x10, 0x00, 0x07, 0x00, 0x05, 0x04, 0x61, + 0x6c, 0x70, 0x6e, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, + 0x00, 0x26, 0x00, 0x24, 0x00, 0x1d, 0x00, 0x20, 0x93, 0x70, 0xb2, 0xc9, 0xca, 0xa4, + 0x7f, 0xba, 0xba, 0xf4, 0x55, 0x9f, 0xed, 0xba, 0x75, 0x3d, 0xe1, 0x71, 0xfa, 0x71, + 0xf5, 0x0f, 0x1c, 0xe1, 0x5d, 0x43, 0xe9, 0x94, 0xec, 0x74, 0xd7, 0x48, 0x00, 0x2b, + 0x00, 0x03, 0x02, 0x03, 0x04, 0x00, 0x0d, 0x00, 0x10, 0x00, 0x0e, 0x04, 0x03, 0x05, + 0x03, 0x06, 0x03, 0x02, 0x03, 0x08, 0x04, 0x08, 0x05, 0x08, 0x06, 0x00, 0x2d, 0x00, + 0x02, 0x01, 0x01, 0x00, 0x1c, 0x00, 0x02, 0x40, 0x01, 0x00, 0x39, 0x00, 0x32, 0x04, + 0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x05, 0x04, 0x80, 0x00, 0xff, + 0xff, 0x07, 0x04, 0x80, 0x00, 0xff, 0xff, 0x08, 0x01, 0x10, 0x01, 0x04, 0x80, 0x00, + 0x75, 0x30, 0x09, 0x01, 0x10, 0x0f, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, + 0x08, 0x06, 0x04, 0x80, 0x00, 0xff, 0xff, + ]; + + let client_keys = Keys::initial(Version::V1, &CONNECTION_ID, true); + assert_eq!( + client_keys + .local + .packet + .confidentiality_limit(), + 2u64.pow(23) + ); + assert_eq!( + client_keys + .local + .packet + .integrity_limit(), + 2u64.pow(52) + ); + assert_eq!(client_keys.local.packet.tag_len(), 16); + + let mut buf = Vec::new(); + buf.extend(PLAIN_HEADER); + buf.extend(PAYLOAD); + let header_len = PLAIN_HEADER.len(); + let tag_len = client_keys.local.packet.tag_len(); + let padding_len = 1200 - header_len - PAYLOAD.len() - tag_len; + buf.extend(std::iter::repeat(0).take(padding_len)); + let (header, payload) = buf.split_at_mut(header_len); + let tag = client_keys + .local + .packet + .encrypt_in_place(PACKET_NUMBER, &*header, payload) + .unwrap(); + + let sample_len = client_keys.local.header.sample_len(); + let sample = &payload[..sample_len]; + let (first, rest) = header.split_at_mut(1); + client_keys + .local + .header + .encrypt_in_place(sample, &mut first[0], &mut rest[17..21]) + .unwrap(); + buf.extend_from_slice(tag.as_ref()); + + const PROTECTED: &[u8] = &[ + 0xc0, 0x00, 0x00, 0x00, 0x01, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, + 0x00, 0x00, 0x44, 0x9e, 0x7b, 0x9a, 0xec, 0x34, 0xd1, 0xb1, 0xc9, 0x8d, 0xd7, 0x68, + 0x9f, 0xb8, 0xec, 0x11, 0xd2, 0x42, 0xb1, 0x23, 0xdc, 0x9b, 0xd8, 0xba, 0xb9, 0x36, + 0xb4, 0x7d, 0x92, 0xec, 0x35, 0x6c, 0x0b, 0xab, 0x7d, 0xf5, 0x97, 0x6d, 0x27, 0xcd, + 0x44, 0x9f, 0x63, 0x30, 0x00, 0x99, 0xf3, 0x99, 0x1c, 0x26, 0x0e, 0xc4, 0xc6, 0x0d, + 0x17, 0xb3, 0x1f, 0x84, 0x29, 0x15, 0x7b, 0xb3, 0x5a, 0x12, 0x82, 0xa6, 0x43, 0xa8, + 0xd2, 0x26, 0x2c, 0xad, 0x67, 0x50, 0x0c, 0xad, 0xb8, 0xe7, 0x37, 0x8c, 0x8e, 0xb7, + 0x53, 0x9e, 0xc4, 0xd4, 0x90, 0x5f, 0xed, 0x1b, 0xee, 0x1f, 0xc8, 0xaa, 0xfb, 0xa1, + 0x7c, 0x75, 0x0e, 0x2c, 0x7a, 0xce, 0x01, 0xe6, 0x00, 0x5f, 0x80, 0xfc, 0xb7, 0xdf, + 0x62, 0x12, 0x30, 0xc8, 0x37, 0x11, 0xb3, 0x93, 0x43, 0xfa, 0x02, 0x8c, 0xea, 0x7f, + 0x7f, 0xb5, 0xff, 0x89, 0xea, 0xc2, 0x30, 0x82, 0x49, 0xa0, 0x22, 0x52, 0x15, 0x5e, + 0x23, 0x47, 0xb6, 0x3d, 0x58, 0xc5, 0x45, 0x7a, 0xfd, 0x84, 0xd0, 0x5d, 0xff, 0xfd, + 0xb2, 0x03, 0x92, 0x84, 0x4a, 0xe8, 0x12, 0x15, 0x46, 0x82, 0xe9, 0xcf, 0x01, 0x2f, + 0x90, 0x21, 0xa6, 0xf0, 0xbe, 0x17, 0xdd, 0xd0, 0xc2, 0x08, 0x4d, 0xce, 0x25, 0xff, + 0x9b, 0x06, 0xcd, 0xe5, 0x35, 0xd0, 0xf9, 0x20, 0xa2, 0xdb, 0x1b, 0xf3, 0x62, 0xc2, + 0x3e, 0x59, 0x6d, 0x11, 0xa4, 0xf5, 0xa6, 0xcf, 0x39, 0x48, 0x83, 0x8a, 0x3a, 0xec, + 0x4e, 0x15, 0xda, 0xf8, 0x50, 0x0a, 0x6e, 0xf6, 0x9e, 0xc4, 0xe3, 0xfe, 0xb6, 0xb1, + 0xd9, 0x8e, 0x61, 0x0a, 0xc8, 0xb7, 0xec, 0x3f, 0xaf, 0x6a, 0xd7, 0x60, 0xb7, 0xba, + 0xd1, 0xdb, 0x4b, 0xa3, 0x48, 0x5e, 0x8a, 0x94, 0xdc, 0x25, 0x0a, 0xe3, 0xfd, 0xb4, + 0x1e, 0xd1, 0x5f, 0xb6, 0xa8, 0xe5, 0xeb, 0xa0, 0xfc, 0x3d, 0xd6, 0x0b, 0xc8, 0xe3, + 0x0c, 0x5c, 0x42, 0x87, 0xe5, 0x38, 0x05, 0xdb, 0x05, 0x9a, 0xe0, 0x64, 0x8d, 0xb2, + 0xf6, 0x42, 0x64, 0xed, 0x5e, 0x39, 0xbe, 0x2e, 0x20, 0xd8, 0x2d, 0xf5, 0x66, 0xda, + 0x8d, 0xd5, 0x99, 0x8c, 0xca, 0xbd, 0xae, 0x05, 0x30, 0x60, 0xae, 0x6c, 0x7b, 0x43, + 0x78, 0xe8, 0x46, 0xd2, 0x9f, 0x37, 0xed, 0x7b, 0x4e, 0xa9, 0xec, 0x5d, 0x82, 0xe7, + 0x96, 0x1b, 0x7f, 0x25, 0xa9, 0x32, 0x38, 0x51, 0xf6, 0x81, 0xd5, 0x82, 0x36, 0x3a, + 0xa5, 0xf8, 0x99, 0x37, 0xf5, 0xa6, 0x72, 0x58, 0xbf, 0x63, 0xad, 0x6f, 0x1a, 0x0b, + 0x1d, 0x96, 0xdb, 0xd4, 0xfa, 0xdd, 0xfc, 0xef, 0xc5, 0x26, 0x6b, 0xa6, 0x61, 0x17, + 0x22, 0x39, 0x5c, 0x90, 0x65, 0x56, 0xbe, 0x52, 0xaf, 0xe3, 0xf5, 0x65, 0x63, 0x6a, + 0xd1, 0xb1, 0x7d, 0x50, 0x8b, 0x73, 0xd8, 0x74, 0x3e, 0xeb, 0x52, 0x4b, 0xe2, 0x2b, + 0x3d, 0xcb, 0xc2, 0xc7, 0x46, 0x8d, 0x54, 0x11, 0x9c, 0x74, 0x68, 0x44, 0x9a, 0x13, + 0xd8, 0xe3, 0xb9, 0x58, 0x11, 0xa1, 0x98, 0xf3, 0x49, 0x1d, 0xe3, 0xe7, 0xfe, 0x94, + 0x2b, 0x33, 0x04, 0x07, 0xab, 0xf8, 0x2a, 0x4e, 0xd7, 0xc1, 0xb3, 0x11, 0x66, 0x3a, + 0xc6, 0x98, 0x90, 0xf4, 0x15, 0x70, 0x15, 0x85, 0x3d, 0x91, 0xe9, 0x23, 0x03, 0x7c, + 0x22, 0x7a, 0x33, 0xcd, 0xd5, 0xec, 0x28, 0x1c, 0xa3, 0xf7, 0x9c, 0x44, 0x54, 0x6b, + 0x9d, 0x90, 0xca, 0x00, 0xf0, 0x64, 0xc9, 0x9e, 0x3d, 0xd9, 0x79, 0x11, 0xd3, 0x9f, + 0xe9, 0xc5, 0xd0, 0xb2, 0x3a, 0x22, 0x9a, 0x23, 0x4c, 0xb3, 0x61, 0x86, 0xc4, 0x81, + 0x9e, 0x8b, 0x9c, 0x59, 0x27, 0x72, 0x66, 0x32, 0x29, 0x1d, 0x6a, 0x41, 0x82, 0x11, + 0xcc, 0x29, 0x62, 0xe2, 0x0f, 0xe4, 0x7f, 0xeb, 0x3e, 0xdf, 0x33, 0x0f, 0x2c, 0x60, + 0x3a, 0x9d, 0x48, 0xc0, 0xfc, 0xb5, 0x69, 0x9d, 0xbf, 0xe5, 0x89, 0x64, 0x25, 0xc5, + 0xba, 0xc4, 0xae, 0xe8, 0x2e, 0x57, 0xa8, 0x5a, 0xaf, 0x4e, 0x25, 0x13, 0xe4, 0xf0, + 0x57, 0x96, 0xb0, 0x7b, 0xa2, 0xee, 0x47, 0xd8, 0x05, 0x06, 0xf8, 0xd2, 0xc2, 0x5e, + 0x50, 0xfd, 0x14, 0xde, 0x71, 0xe6, 0xc4, 0x18, 0x55, 0x93, 0x02, 0xf9, 0x39, 0xb0, + 0xe1, 0xab, 0xd5, 0x76, 0xf2, 0x79, 0xc4, 0xb2, 0xe0, 0xfe, 0xb8, 0x5c, 0x1f, 0x28, + 0xff, 0x18, 0xf5, 0x88, 0x91, 0xff, 0xef, 0x13, 0x2e, 0xef, 0x2f, 0xa0, 0x93, 0x46, + 0xae, 0xe3, 0x3c, 0x28, 0xeb, 0x13, 0x0f, 0xf2, 0x8f, 0x5b, 0x76, 0x69, 0x53, 0x33, + 0x41, 0x13, 0x21, 0x19, 0x96, 0xd2, 0x00, 0x11, 0xa1, 0x98, 0xe3, 0xfc, 0x43, 0x3f, + 0x9f, 0x25, 0x41, 0x01, 0x0a, 0xe1, 0x7c, 0x1b, 0xf2, 0x02, 0x58, 0x0f, 0x60, 0x47, + 0x47, 0x2f, 0xb3, 0x68, 0x57, 0xfe, 0x84, 0x3b, 0x19, 0xf5, 0x98, 0x40, 0x09, 0xdd, + 0xc3, 0x24, 0x04, 0x4e, 0x84, 0x7a, 0x4f, 0x4a, 0x0a, 0xb3, 0x4f, 0x71, 0x95, 0x95, + 0xde, 0x37, 0x25, 0x2d, 0x62, 0x35, 0x36, 0x5e, 0x9b, 0x84, 0x39, 0x2b, 0x06, 0x10, + 0x85, 0x34, 0x9d, 0x73, 0x20, 0x3a, 0x4a, 0x13, 0xe9, 0x6f, 0x54, 0x32, 0xec, 0x0f, + 0xd4, 0xa1, 0xee, 0x65, 0xac, 0xcd, 0xd5, 0xe3, 0x90, 0x4d, 0xf5, 0x4c, 0x1d, 0xa5, + 0x10, 0xb0, 0xff, 0x20, 0xdc, 0xc0, 0xc7, 0x7f, 0xcb, 0x2c, 0x0e, 0x0e, 0xb6, 0x05, + 0xcb, 0x05, 0x04, 0xdb, 0x87, 0x63, 0x2c, 0xf3, 0xd8, 0xb4, 0xda, 0xe6, 0xe7, 0x05, + 0x76, 0x9d, 0x1d, 0xe3, 0x54, 0x27, 0x01, 0x23, 0xcb, 0x11, 0x45, 0x0e, 0xfc, 0x60, + 0xac, 0x47, 0x68, 0x3d, 0x7b, 0x8d, 0x0f, 0x81, 0x13, 0x65, 0x56, 0x5f, 0xd9, 0x8c, + 0x4c, 0x8e, 0xb9, 0x36, 0xbc, 0xab, 0x8d, 0x06, 0x9f, 0xc3, 0x3b, 0xd8, 0x01, 0xb0, + 0x3a, 0xde, 0xa2, 0xe1, 0xfb, 0xc5, 0xaa, 0x46, 0x3d, 0x08, 0xca, 0x19, 0x89, 0x6d, + 0x2b, 0xf5, 0x9a, 0x07, 0x1b, 0x85, 0x1e, 0x6c, 0x23, 0x90, 0x52, 0x17, 0x2f, 0x29, + 0x6b, 0xfb, 0x5e, 0x72, 0x40, 0x47, 0x90, 0xa2, 0x18, 0x10, 0x14, 0xf3, 0xb9, 0x4a, + 0x4e, 0x97, 0xd1, 0x17, 0xb4, 0x38, 0x13, 0x03, 0x68, 0xcc, 0x39, 0xdb, 0xb2, 0xd1, + 0x98, 0x06, 0x5a, 0xe3, 0x98, 0x65, 0x47, 0x92, 0x6c, 0xd2, 0x16, 0x2f, 0x40, 0xa2, + 0x9f, 0x0c, 0x3c, 0x87, 0x45, 0xc0, 0xf5, 0x0f, 0xba, 0x38, 0x52, 0xe5, 0x66, 0xd4, + 0x45, 0x75, 0xc2, 0x9d, 0x39, 0xa0, 0x3f, 0x0c, 0xda, 0x72, 0x19, 0x84, 0xb6, 0xf4, + 0x40, 0x59, 0x1f, 0x35, 0x5e, 0x12, 0xd4, 0x39, 0xff, 0x15, 0x0a, 0xab, 0x76, 0x13, + 0x49, 0x9d, 0xbd, 0x49, 0xad, 0xab, 0xc8, 0x67, 0x6e, 0xef, 0x02, 0x3b, 0x15, 0xb6, + 0x5b, 0xfc, 0x5c, 0xa0, 0x69, 0x48, 0x10, 0x9f, 0x23, 0xf3, 0x50, 0xdb, 0x82, 0x12, + 0x35, 0x35, 0xeb, 0x8a, 0x74, 0x33, 0xbd, 0xab, 0xcb, 0x90, 0x92, 0x71, 0xa6, 0xec, + 0xbc, 0xb5, 0x8b, 0x93, 0x6a, 0x88, 0xcd, 0x4e, 0x8f, 0x2e, 0x6f, 0xf5, 0x80, 0x01, + 0x75, 0xf1, 0x13, 0x25, 0x3d, 0x8f, 0xa9, 0xca, 0x88, 0x85, 0xc2, 0xf5, 0x52, 0xe6, + 0x57, 0xdc, 0x60, 0x3f, 0x25, 0x2e, 0x1a, 0x8e, 0x30, 0x8f, 0x76, 0xf0, 0xbe, 0x79, + 0xe2, 0xfb, 0x8f, 0x5d, 0x5f, 0xbb, 0xe2, 0xe3, 0x0e, 0xca, 0xdd, 0x22, 0x07, 0x23, + 0xc8, 0xc0, 0xae, 0xa8, 0x07, 0x8c, 0xdf, 0xcb, 0x38, 0x68, 0x26, 0x3f, 0xf8, 0xf0, + 0x94, 0x00, 0x54, 0xda, 0x48, 0x78, 0x18, 0x93, 0xa7, 0xe4, 0x9a, 0xd5, 0xaf, 0xf4, + 0xaf, 0x30, 0x0c, 0xd8, 0x04, 0xa6, 0xb6, 0x27, 0x9a, 0xb3, 0xff, 0x3a, 0xfb, 0x64, + 0x49, 0x1c, 0x85, 0x19, 0x4a, 0xab, 0x76, 0x0d, 0x58, 0xa6, 0x06, 0x65, 0x4f, 0x9f, + 0x44, 0x00, 0xe8, 0xb3, 0x85, 0x91, 0x35, 0x6f, 0xbf, 0x64, 0x25, 0xac, 0xa2, 0x6d, + 0xc8, 0x52, 0x44, 0x25, 0x9f, 0xf2, 0xb1, 0x9c, 0x41, 0xb9, 0xf9, 0x6f, 0x3c, 0xa9, + 0xec, 0x1d, 0xde, 0x43, 0x4d, 0xa7, 0xd2, 0xd3, 0x92, 0xb9, 0x05, 0xdd, 0xf3, 0xd1, + 0xf9, 0xaf, 0x93, 0xd1, 0xaf, 0x59, 0x50, 0xbd, 0x49, 0x3f, 0x5a, 0xa7, 0x31, 0xb4, + 0x05, 0x6d, 0xf3, 0x1b, 0xd2, 0x67, 0xb6, 0xb9, 0x0a, 0x07, 0x98, 0x31, 0xaa, 0xf5, + 0x79, 0xbe, 0x0a, 0x39, 0x01, 0x31, 0x37, 0xaa, 0xc6, 0xd4, 0x04, 0xf5, 0x18, 0xcf, + 0xd4, 0x68, 0x40, 0x64, 0x7e, 0x78, 0xbf, 0xe7, 0x06, 0xca, 0x4c, 0xf5, 0xe9, 0xc5, + 0x45, 0x3e, 0x9f, 0x7c, 0xfd, 0x2b, 0x8b, 0x4c, 0x8d, 0x16, 0x9a, 0x44, 0xe5, 0x5c, + 0x88, 0xd4, 0xa9, 0xa7, 0xf9, 0x47, 0x42, 0x41, 0xe2, 0x21, 0xaf, 0x44, 0x86, 0x00, + 0x18, 0xab, 0x08, 0x56, 0x97, 0x2e, 0x19, 0x4c, 0xd9, 0x34, + ]; + + assert_eq!(&buf, PROTECTED); + + let (header, payload) = buf.split_at_mut(header_len); + let (first, rest) = header.split_at_mut(1); + let sample = &payload[..sample_len]; + + let server_keys = Keys::initial(Version::V1, &CONNECTION_ID, false); + server_keys + .remote + .header + .decrypt_in_place(sample, &mut first[0], &mut rest[17..21]) + .unwrap(); + let payload = server_keys + .remote + .packet + .decrypt_in_place(PACKET_NUMBER, &*header, payload) + .unwrap(); + + assert_eq!(&payload[..PAYLOAD.len()], PAYLOAD); + assert_eq!(payload.len(), buf.len() - header_len - tag_len); + } + + #[test] + fn test_quic_exporter() { + for &kt in ALL_KEY_TYPES.iter() { + let client_config = make_client_config_with_versions(kt, &[&rustls::version::TLS13]); + let server_config = make_server_config_with_versions(kt, &[&rustls::version::TLS13]); + + do_exporter_test(client_config, server_config); + } + } +} // mod test_quic + +#[test] +fn test_client_does_not_offer_sha1() { + use rustls::internal::msgs::{ + codec::Reader, enums::HandshakeType, handshake::HandshakePayload, message::MessagePayload, + message::OpaqueMessage, + }; + + for kt in ALL_KEY_TYPES.iter() { + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let (mut client, _) = make_pair_for_configs(client_config, make_server_config(*kt)); + + assert!(client.wants_write()); + let mut buf = [0u8; 262144]; + let sz = client + .write_tls(&mut buf.as_mut()) + .unwrap(); + let msg = OpaqueMessage::read(&mut Reader::init(&buf[..sz])).unwrap(); + let msg = Message::try_from(msg.into_plain_message()).unwrap(); + assert!(msg.is_handshake_type(HandshakeType::ClientHello)); + + let client_hello = match msg.payload { + MessagePayload::Handshake { parsed, .. } => match parsed.payload { + HandshakePayload::ClientHello(ch) => ch, + _ => unreachable!(), + }, + _ => unreachable!(), + }; + + let sigalgs = client_hello + .get_sigalgs_extension() + .unwrap(); + assert!( + !sigalgs.contains(&SignatureScheme::RSA_PKCS1_SHA1), + "sha1 unexpectedly offered" + ); + } + } +} + +#[test] +fn test_client_config_keyshare() { + let client_config = + make_client_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::SECP384R1]); + let server_config = + make_server_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::SECP384R1]); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + do_handshake_until_error(&mut client, &mut server).unwrap(); +} + +#[test] +fn test_client_config_keyshare_mismatch() { + let client_config = + make_client_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::SECP384R1]); + let server_config = + make_server_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::X25519]); + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + assert!(do_handshake_until_error(&mut client, &mut server).is_err()); +} + +#[test] +fn test_client_sends_helloretryrequest() { + // client sends a secp384r1 key share + let mut client_config = make_client_config_with_kx_groups( + KeyType::Rsa, + &[&rustls::kx_group::SECP384R1, &rustls::kx_group::X25519], + ); + + let storage = Arc::new(ClientStorage::new()); + client_config.session_storage = storage.clone(); + + // but server only accepts x25519, so a HRR is required + let server_config = + make_server_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::X25519]); + + let (mut client, mut server) = make_pair_for_configs(client_config, server_config); + + // client sends hello + { + let mut pipe = OtherSession::new(&mut server); + let wrlen = client.write_tls(&mut pipe).unwrap(); + assert!(wrlen > 200); + assert_eq!(pipe.writevs.len(), 1); + assert!(pipe.writevs[0].len() == 1); + } + + // server sends HRR + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + assert!(wrlen < 100); // just the hello retry request + assert_eq!(pipe.writevs.len(), 1); // only one writev + assert!(pipe.writevs[0].len() == 2); // hello retry request and CCS + } + + // client sends fixed hello + { + let mut pipe = OtherSession::new(&mut server); + let wrlen = client.write_tls(&mut pipe).unwrap(); + assert!(wrlen > 200); // just the client hello retry + assert_eq!(pipe.writevs.len(), 1); // only one writev + assert!(pipe.writevs[0].len() == 2); // only a CCS & client hello retry + } + + // server completes handshake + { + let mut pipe = OtherSession::new(&mut client); + let wrlen = server.write_tls(&mut pipe).unwrap(); + assert!(wrlen > 200); + assert_eq!(pipe.writevs.len(), 1); + assert!(pipe.writevs[0].len() == 5); // server hello / encrypted exts / cert / cert-verify / finished + } + + do_handshake_until_error(&mut client, &mut server).unwrap(); + + // client only did two storage queries: one for a session, another for a kx type + assert_eq!(storage.gets(), 2); + assert_eq!(storage.puts(), 2); +} + +#[test] +fn test_client_rejects_hrr_with_varied_session_id() { + let different_session_id = SessionID::random().unwrap(); + + let assert_client_sends_hello_with_secp384 = |msg: &mut Message| -> Altered { + if let MessagePayload::Handshake { parsed, encoded } = &mut msg.payload { + if let HandshakePayload::ClientHello(ch) = &mut parsed.payload { + let keyshares = ch + .get_keyshare_extension() + .expect("missing key share extension"); + assert_eq!(keyshares.len(), 1); + assert_eq!(keyshares[0].group, rustls::NamedGroup::secp384r1); + + ch.session_id = different_session_id; + *encoded = Payload::new(parsed.get_encoding()); + } + } + Altered::InPlace + }; + + let assert_server_requests_retry_and_echoes_session_id = |msg: &mut Message| -> Altered { + if let MessagePayload::Handshake { parsed, .. } = &mut msg.payload { + if let HandshakePayload::HelloRetryRequest(hrr) = &mut parsed.payload { + let group = hrr.get_requested_key_share_group(); + assert_eq!(group, Some(rustls::NamedGroup::X25519)); + + assert_eq!(hrr.session_id, different_session_id); + } + } + Altered::InPlace + }; + + // client prefers a secp384r1 key share, server only accepts x25519 + let client_config = make_client_config_with_kx_groups( + KeyType::Rsa, + &[&rustls::kx_group::SECP384R1, &rustls::kx_group::X25519], + ); + + let server_config = + make_server_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::X25519]); + + let (client, server) = make_pair_for_configs(client_config, server_config); + let (mut client, mut server) = (client.into(), server.into()); + transfer_altered( + &mut client, + assert_client_sends_hello_with_secp384, + &mut server, + ); + server.process_new_packets().unwrap(); + transfer_altered( + &mut server, + assert_server_requests_retry_and_echoes_session_id, + &mut client, + ); + assert_eq!( + client.process_new_packets(), + Err(Error::PeerMisbehavedError( + "server did not echo the session_id from client hello".to_string() + )) + ); +} + +#[test] +fn test_client_attempts_to_use_unsupported_kx_group() { + // common to both client configs + let shared_storage = Arc::new(ClientStorage::new()); + + // first, client sends a x25519 and server agrees. x25519 is inserted + // into kx group cache. + let mut client_config_1 = + make_client_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::X25519]); + client_config_1.session_storage = shared_storage.clone(); + + // second, client only supports secp-384 and so kx group cache + // contains an unusable value. + let mut client_config_2 = + make_client_config_with_kx_groups(KeyType::Rsa, &[&rustls::kx_group::SECP384R1]); + client_config_2.session_storage = shared_storage; + + let server_config = make_server_config(KeyType::Rsa); + + // first handshake + let (mut client_1, mut server) = make_pair_for_configs(client_config_1, server_config.clone()); + do_handshake_until_error(&mut client_1, &mut server).unwrap(); + + // second handshake + let (mut client_2, mut server) = make_pair_for_configs(client_config_2, server_config); + do_handshake_until_error(&mut client_2, &mut server).unwrap(); +} + +#[test] +fn test_client_mtu_reduction() { + struct CollectWrites { + writevs: Vec>, + } + + impl io::Write for CollectWrites { + fn write(&mut self, _: &[u8]) -> io::Result { + panic!() + } + fn flush(&mut self) -> io::Result<()> { + panic!() + } + fn write_vectored<'b>(&mut self, b: &[io::IoSlice<'b>]) -> io::Result { + let writes = b + .iter() + .map(|slice| slice.len()) + .collect::>(); + let len = writes.iter().sum(); + self.writevs.push(writes); + Ok(len) + } + } + + fn collect_write_lengths(client: &mut ClientConnection) -> Vec { + let mut collector = CollectWrites { writevs: vec![] }; + + client + .write_tls(&mut collector) + .unwrap(); + assert_eq!(collector.writevs.len(), 1); + collector.writevs[0].clone() + } + + for kt in ALL_KEY_TYPES.iter() { + let mut client_config = make_client_config(*kt); + client_config.max_fragment_size = Some(64); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("localhost")).unwrap(); + let writes = collect_write_lengths(&mut client); + println!("writes at mtu=64: {:?}", writes); + assert!(writes.iter().all(|x| *x <= 64)); + assert!(writes.len() > 1); + } +} + +#[test] +fn test_server_mtu_reduction() { + let mut server_config = make_server_config(KeyType::Rsa); + server_config.max_fragment_size = Some(64); + server_config.send_half_rtt_data = true; + let (mut client, mut server) = + make_pair_for_configs(make_client_config(KeyType::Rsa), server_config); + + let big_data = [0u8; 2048]; + server + .writer() + .write_all(&big_data) + .unwrap(); + + let encryption_overhead = 20; // FIXME: see issue #991 + + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + server.write_tls(&mut pipe).unwrap(); + + assert_eq!(pipe.writevs.len(), 1); + assert!(pipe.writevs[0] + .iter() + .all(|x| *x <= 64 + encryption_overhead)); + } + + client.process_new_packets().unwrap(); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + { + let mut pipe = OtherSession::new(&mut client); + server.write_tls(&mut pipe).unwrap(); + assert_eq!(pipe.writevs.len(), 1); + assert!(pipe.writevs[0] + .iter() + .all(|x| *x <= 64 + encryption_overhead)); + } + + client.process_new_packets().unwrap(); + check_read(&mut client.reader(), &big_data); +} + +fn check_client_max_fragment_size(size: usize) -> Option { + let mut client_config = make_client_config(KeyType::Ed25519); + client_config.max_fragment_size = Some(size); + ClientConnection::new(Arc::new(client_config), dns_name("localhost")).err() +} + +#[test] +fn bad_client_max_fragment_sizes() { + assert_eq!( + check_client_max_fragment_size(31), + Some(Error::BadMaxFragmentSize) + ); + assert_eq!(check_client_max_fragment_size(32), None); + assert_eq!(check_client_max_fragment_size(64), None); + assert_eq!(check_client_max_fragment_size(1460), None); + assert_eq!(check_client_max_fragment_size(0x4000), None); + assert_eq!(check_client_max_fragment_size(0x4005), None); + assert_eq!( + check_client_max_fragment_size(0x4006), + Some(Error::BadMaxFragmentSize) + ); + assert_eq!( + check_client_max_fragment_size(0xffff), + Some(Error::BadMaxFragmentSize) + ); +} + +fn assert_lt(left: usize, right: usize) { + if left >= right { + panic!("expected {} < {}", left, right); + } +} + +#[test] +fn connection_types_are_not_huge() { + // Arbitrary sizes + assert_lt(mem::size_of::(), 1600); + assert_lt(mem::size_of::(), 1600); +} + +use rustls::internal::msgs::handshake::SessionID; +use rustls::internal::msgs::{ + handshake::ClientExtension, handshake::HandshakePayload, message::Message, + message::MessagePayload, +}; + +#[test] +fn test_server_rejects_duplicate_sni_names() { + fn duplicate_sni_payload(msg: &mut Message) -> Altered { + if let MessagePayload::Handshake { parsed, encoded } = &mut msg.payload { + if let HandshakePayload::ClientHello(ch) = &mut parsed.payload { + for mut ext in ch.extensions.iter_mut() { + if let ClientExtension::ServerName(snr) = &mut ext { + snr.push(snr[0].clone()); + } + } + } + + *encoded = Payload::new(parsed.get_encoding()); + } + Altered::InPlace + } + + let (client, server) = make_pair(KeyType::Rsa); + let (mut client, mut server) = (client.into(), server.into()); + transfer_altered(&mut client, duplicate_sni_payload, &mut server); + assert_eq!( + server.process_new_packets(), + Err(Error::PeerMisbehavedError( + "ClientHello SNI contains duplicate name types".into() + )) + ); +} + +#[test] +fn test_server_rejects_empty_sni_extension() { + fn empty_sni_payload(msg: &mut Message) -> Altered { + if let MessagePayload::Handshake { parsed, encoded } = &mut msg.payload { + if let HandshakePayload::ClientHello(ch) = &mut parsed.payload { + for mut ext in ch.extensions.iter_mut() { + if let ClientExtension::ServerName(snr) = &mut ext { + snr.clear(); + } + } + } + + *encoded = Payload::new(parsed.get_encoding()); + } + + Altered::InPlace + } + + let (client, server) = make_pair(KeyType::Rsa); + let (mut client, mut server) = (client.into(), server.into()); + transfer_altered(&mut client, empty_sni_payload, &mut server); + assert_eq!( + server.process_new_packets(), + Err(Error::PeerMisbehavedError( + "ClientHello SNI did not contain a hostname".into() + )) + ); +} + +#[test] +fn test_server_rejects_clients_without_any_kx_group_overlap() { + fn different_kx_group(msg: &mut Message) -> Altered { + if let MessagePayload::Handshake { parsed, encoded } = &mut msg.payload { + if let HandshakePayload::ClientHello(ch) = &mut parsed.payload { + for mut ext in ch.extensions.iter_mut() { + if let ClientExtension::NamedGroups(ngs) = &mut ext { + ngs.clear(); + } + if let ClientExtension::KeyShare(ks) = &mut ext { + ks.clear(); + } + } + } + + *encoded = Payload::new(parsed.get_encoding()); + } + Altered::InPlace + } + + let (client, server) = make_pair(KeyType::Rsa); + let (mut client, mut server) = (client.into(), server.into()); + transfer_altered(&mut client, different_kx_group, &mut server); + assert_eq!( + server.process_new_packets(), + Err(Error::PeerIncompatibleError( + "no kx group overlap with client".into() + )) + ); +} + +#[test] +fn test_client_rejects_illegal_tls13_ccs() { + fn corrupt_ccs(msg: &mut Message) -> Altered { + if let MessagePayload::ChangeCipherSpec(_) = &mut msg.payload { + println!("seen CCS {:?}", msg); + return Altered::Raw(vec![0x14, 0x03, 0x03, 0x00, 0x02, 0x01, 0x02]); + } + Altered::InPlace + } + + let (mut client, mut server) = make_pair(KeyType::Rsa); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + + let (mut server, mut client) = (server.into(), client.into()); + + transfer_altered(&mut server, corrupt_ccs, &mut client); + assert_eq!( + client.process_new_packets(), + Err(Error::PeerMisbehavedError( + "illegal middlebox CCS received".into() + )) + ); +} + +/// https://github.com/rustls/rustls/issues/797 +#[cfg(feature = "tls12")] +#[test] +fn test_client_tls12_no_resume_after_server_downgrade() { + let mut client_config = common::make_client_config(KeyType::Ed25519); + let client_storage = Arc::new(ClientStorage::new()); + client_config.session_storage = client_storage.clone(); + let client_config = Arc::new(client_config); + + let server_config_1 = Arc::new(common::finish_server_config( + KeyType::Ed25519, + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(&[&rustls::version::TLS13]) + .unwrap(), + )); + + let mut server_config_2 = common::finish_server_config( + KeyType::Ed25519, + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(&[&rustls::version::TLS12]) + .unwrap(), + ); + server_config_2.session_storage = Arc::new(rustls::server::NoServerSessionStorage {}); + + dbg!("handshake 1"); + let mut client_1 = + ClientConnection::new(client_config.clone(), "localhost".try_into().unwrap()).unwrap(); + let mut server_1 = ServerConnection::new(server_config_1).unwrap(); + common::do_handshake(&mut client_1, &mut server_1); + assert_eq!(client_storage.puts(), 2); + + dbg!("handshake 2"); + let mut client_2 = + ClientConnection::new(client_config, "localhost".try_into().unwrap()).unwrap(); + let mut server_2 = ServerConnection::new(Arc::new(server_config_2)).unwrap(); + common::do_handshake(&mut client_2, &mut server_2); + assert_eq!(client_storage.puts(), 2); +} + +#[test] +fn test_acceptor() { + use rustls::server::Acceptor; + + let client_config = Arc::new(make_client_config(KeyType::Ed25519)); + let mut client = ClientConnection::new(client_config, dns_name("localhost")).unwrap(); + let mut buf = Vec::new(); + client.write_tls(&mut buf).unwrap(); + + let server_config = Arc::new(make_server_config(KeyType::Ed25519)); + let mut acceptor = Acceptor::default(); + acceptor + .read_tls(&mut buf.as_slice()) + .unwrap(); + let accepted = acceptor.accept().unwrap().unwrap(); + let ch = accepted.client_hello(); + assert_eq!(ch.server_name(), Some("localhost")); + + let server = accepted + .into_connection(server_config) + .unwrap(); + assert!(server.wants_write()); + + // Reusing an acceptor is not allowed + assert_eq!( + acceptor + .read_tls(&mut [0u8].as_ref()) + .err() + .unwrap() + .kind(), + io::ErrorKind::Other, + ); + assert_eq!( + acceptor.accept().err(), + Some(Error::General("Acceptor polled after completion".into())) + ); + + let mut acceptor = Acceptor::default(); + assert!(acceptor.accept().unwrap().is_none()); + acceptor + .read_tls(&mut &buf[..3]) + .unwrap(); // incomplete message + assert!(acceptor.accept().unwrap().is_none()); + acceptor + .read_tls(&mut [0x80, 0x00].as_ref()) + .unwrap(); // invalid message (len = 32k bytes) + assert!(acceptor.accept().is_err()); + + let mut acceptor = Acceptor::default(); + // Minimal valid 1-byte application data message is not a handshake message + acceptor + .read_tls(&mut [0x17, 0x03, 0x03, 0x00, 0x01, 0x00].as_ref()) + .unwrap(); + assert!(acceptor.accept().is_err()); + + let mut acceptor = Acceptor::default(); + // Minimal 1-byte ClientHello message is not a legal handshake message + acceptor + .read_tls(&mut [0x16, 0x03, 0x03, 0x00, 0x05, 0x01, 0x00, 0x00, 0x01, 0x00].as_ref()) + .unwrap(); + assert!(acceptor.accept().is_err()); +} + +#[derive(Default, Debug)] +struct LogCounts { + trace: usize, + debug: usize, + info: usize, + warn: usize, + error: usize, +} + +impl LogCounts { + fn new() -> Self { + Self { + ..Default::default() + } + } + + fn reset(&mut self) { + *self = Self::new(); + } + + fn add(&mut self, level: log::Level) { + match level { + log::Level::Trace => self.trace += 1, + log::Level::Debug => self.debug += 1, + log::Level::Info => self.info += 1, + log::Level::Warn => self.warn += 1, + log::Level::Error => self.error += 1, + } + } +} + +thread_local!(static COUNTS: RefCell = RefCell::new(LogCounts::new())); + +struct CountingLogger; + +static LOGGER: CountingLogger = CountingLogger; + +impl CountingLogger { + fn install() { + log::set_logger(&LOGGER).unwrap(); + log::set_max_level(log::LevelFilter::Trace); + } + + fn reset() { + COUNTS.with(|c| { + c.borrow_mut().reset(); + }); + } +} + +impl log::Log for CountingLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + println!("logging at {:?}: {:?}", record.level(), record.args()); + + COUNTS.with(|c| { + c.borrow_mut().add(record.level()); + }); + } + + fn flush(&self) {} +} + +#[test] +fn test_no_warning_logging_during_successful_sessions() { + CountingLogger::install(); + CountingLogger::reset(); + + for kt in ALL_KEY_TYPES.iter() { + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_configs(client_config, make_server_config(*kt)); + do_handshake(&mut client, &mut server); + } + } + + if cfg!(feature = "logging") { + COUNTS.with(|c| { + println!("After tests: {:?}", c.borrow()); + assert_eq!(c.borrow().warn, 0); + assert_eq!(c.borrow().error, 0); + assert_eq!(c.borrow().info, 0); + assert!(c.borrow().trace > 0); + assert!(c.borrow().debug > 0); + }); + } else { + COUNTS.with(|c| { + println!("After tests: {:?}", c.borrow()); + assert_eq!(c.borrow().warn, 0); + assert_eq!(c.borrow().error, 0); + assert_eq!(c.borrow().info, 0); + assert_eq!(c.borrow().trace, 0); + assert_eq!(c.borrow().debug, 0); + }); + } +} + +/// Test that secrets can be extracted and used for encryption/decryption. +#[cfg(feature = "secret_extraction")] +#[test] +fn test_secret_extraction_enabled() { + // Normally, secret extraction would be used to configure kTLS (TLS offload + // to the kernel). We want this test to run on any platform, though, so + // instead we just compare secrets for equality. + + // TLS 1.2 and 1.3 have different mechanisms for key exchange and handshake, + // and secrets are stored/extracted differently, so we want to test them both. + // We support 3 different AEAD algorithms (AES-128-GCM mode, AES-256-GCM, and + // Chacha20Poly1305), so that's 2*3 = 6 combinations to test. + let kt = KeyType::Rsa; + for suite in [ + rustls::cipher_suite::TLS13_AES_128_GCM_SHA256, + rustls::cipher_suite::TLS13_AES_256_GCM_SHA384, + rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256, + rustls::cipher_suite::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + rustls::cipher_suite::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + rustls::cipher_suite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + ] { + let version = suite.version(); + println!("Testing suite {:?}", suite.suite().as_str()); + + // Only offer the cipher suite (and protocol version) that we're testing + let mut server_config = ServerConfig::builder() + .with_cipher_suites(&[suite]) + .with_safe_default_kx_groups() + .with_protocol_versions(&[version]) + .unwrap() + .with_no_client_auth() + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap(); + // Opt into secret extraction from both sides + server_config.enable_secret_extraction = true; + let server_config = Arc::new(server_config); + + let mut client_config = make_client_config(kt); + client_config.enable_secret_extraction = true; + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + do_handshake(&mut client, &mut server); + + // The handshake is finished, we're now able to extract traffic secrets + let client_secrets = client.extract_secrets().unwrap(); + let server_secrets = server.extract_secrets().unwrap(); + + // Comparing secrets for equality is something you should never have to + // do in production code, so ConnectionTrafficSecrets doesn't implement + // PartialEq/Eq on purpose. Instead, we have to get creative. + fn explode_secrets(s: &ConnectionTrafficSecrets) -> (&[u8], &[u8], &[u8]) { + match s { + ConnectionTrafficSecrets::Aes128Gcm { key, salt, iv } => (key, salt, iv), + ConnectionTrafficSecrets::Aes256Gcm { key, salt, iv } => (key, salt, iv), + ConnectionTrafficSecrets::Chacha20Poly1305 { key, iv } => (key, &[], iv), + _ => panic!("unexpected secret type"), + } + } + + fn assert_secrets_equal( + (l_seq, l_sec): (u64, ConnectionTrafficSecrets), + (r_seq, r_sec): (u64, ConnectionTrafficSecrets), + ) { + assert_eq!(l_seq, r_seq); + assert_eq!(explode_secrets(&l_sec), explode_secrets(&r_sec)); + } + + assert_secrets_equal(client_secrets.tx, server_secrets.rx); + assert_secrets_equal(client_secrets.rx, server_secrets.tx); + } +} + +/// Test that secrets cannot be extracted unless explicitly enabled, and until +/// the handshake is done. +#[cfg(feature = "secret_extraction")] +#[test] +fn test_secret_extraction_disabled_or_too_early() { + let suite = rustls::cipher_suite::TLS13_AES_128_GCM_SHA256; + let kt = KeyType::Rsa; + + for (server_enable, client_enable) in [(true, false), (false, true)] { + let mut server_config = ServerConfig::builder() + .with_cipher_suites(&[suite]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .unwrap() + .with_no_client_auth() + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap(); + server_config.enable_secret_extraction = server_enable; + let server_config = Arc::new(server_config); + + let mut client_config = make_client_config(kt); + client_config.enable_secret_extraction = client_enable; + + let client_config = Arc::new(client_config); + + let (client, server) = make_pair_for_arc_configs(&client_config, &server_config); + + assert!( + client.extract_secrets().is_err(), + "extraction should fail until handshake completes" + ); + assert!( + server.extract_secrets().is_err(), + "extraction should fail until handshake completes" + ); + + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + + do_handshake(&mut client, &mut server); + + assert_eq!(server_enable, server.extract_secrets().is_ok()); + assert_eq!(client_enable, client.extract_secrets().is_ok()); + } +} + +#[test] +fn test_received_plaintext_backpressure() { + let suite = rustls::cipher_suite::TLS13_AES_128_GCM_SHA256; + let kt = KeyType::Rsa; + + let server_config = Arc::new( + ServerConfig::builder() + .with_cipher_suites(&[suite]) + .with_safe_default_kx_groups() + .with_safe_default_protocol_versions() + .unwrap() + .with_no_client_auth() + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap(), + ); + + let client_config = Arc::new(make_client_config(kt)); + let (mut client, mut server) = make_pair_for_arc_configs(&client_config, &server_config); + do_handshake(&mut client, &mut server); + + // Fill the server's received plaintext buffer with 16k bytes + let client_buf = [0; 16_385]; + dbg!(client + .writer() + .write(&client_buf) + .unwrap()); + let mut network_buf = Vec::with_capacity(32_768); + let sent = dbg!(client + .write_tls(&mut network_buf) + .unwrap()); + assert_eq!( + sent, + dbg!(server + .read_tls(&mut &network_buf[..sent]) + .unwrap()) + ); + server.process_new_packets().unwrap(); + + // Send two more bytes from client to server + dbg!(client + .writer() + .write(&client_buf[..2]) + .unwrap()); + let sent = dbg!(client + .write_tls(&mut network_buf) + .unwrap()); + + // Get an error because the received plaintext buffer is full + assert!(server + .read_tls(&mut &network_buf[..sent]) + .is_err()); + + // Read out some of the plaintext + server + .reader() + .read_exact(&mut [0; 2]) + .unwrap(); + + // Now there's room again in the plaintext buffer + assert_eq!( + server + .read_tls(&mut &network_buf[..sent]) + .unwrap(), + 24 + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/bogo.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/bogo.rs new file mode 100644 index 0000000000000000000000000000000000000000..e96073d1098e3cc6d6dc73f1afe31ef84828f522 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/bogo.rs @@ -0,0 +1,18 @@ +// Runs the bogo test suite, in the form of a rust test. +// Note that bogo requires a golang environment to build +// and run. + +#[test] +#[cfg(all(coverage, feature = "quic", feature = "dangerous_configuration"))] +fn run_bogo_tests() { + use std::process::Command; + + let rc = Command::new("./runme") + .current_dir("../bogo") + .spawn() + .expect("cannot run bogo/runme") + .wait() + .expect("cannot wait for bogo"); + + assert!(rc.success(), "bogo exited non-zero"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/client_cert_verifier.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/client_cert_verifier.rs new file mode 100644 index 0000000000000000000000000000000000000000..5af9c2ac67e7a685999be0c7639490fdde56cc4a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/client_cert_verifier.rs @@ -0,0 +1,324 @@ +//! Tests for configuring and using a [`ClientCertVerifier`] for a server. + +#![cfg(feature = "dangerous_configuration")] + +mod common; + +use crate::common::{ + dns_name, do_handshake_until_both_error, do_handshake_until_error, get_client_root_store, + make_client_config_with_versions, make_client_config_with_versions_with_auth, + make_pair_for_arc_configs, ErrorFromPeer, KeyType, ALL_KEY_TYPES, +}; +use rustls::client::WebPkiVerifier; +use rustls::internal::msgs::base::PayloadU16; +use rustls::server::{ClientCertVerified, ClientCertVerifier}; +use rustls::AlertDescription; +use rustls::ContentType; +use rustls::{ + Certificate, ClientConnection, DistinguishedNames, Error, ServerConfig, ServerConnection, + SignatureScheme, +}; +use std::sync::Arc; + +// Client is authorized! +fn ver_ok() -> Result { + Ok(rustls::server::ClientCertVerified::assertion()) +} + +// Use when we shouldn't even attempt verification +fn ver_unreachable() -> Result { + unreachable!() +} + +// Verifier that returns an error that we can expect +fn ver_err() -> Result { + Err(Error::General("test err".to_string())) +} + +fn server_config_with_verifier( + kt: KeyType, + client_cert_verifier: MockClientVerifier, +) -> ServerConfig { + ServerConfig::builder() + .with_safe_defaults() + .with_client_cert_verifier(Arc::new(client_cert_verifier)) + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap() +} + +#[test] +// Happy path, we resolve to a root, it is verified OK, should be able to connect +fn client_verifier_works() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_ok, + subjects: Some( + get_client_root_store(*kt) + .roots + .iter() + .map(|r| PayloadU16(r.subject().to_vec())) + .collect(), + ), + mandatory: Some(true), + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config.clone()), &server_config); + let err = do_handshake_until_error(&mut client, &mut server); + assert_eq!(err, Ok(())); + } + } +} + +// Server offers no verification schemes +#[test] +fn client_verifier_no_schemes() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_ok, + subjects: Some( + get_client_root_store(*kt) + .roots + .iter() + .map(|r| PayloadU16(r.subject().to_vec())) + .collect(), + ), + mandatory: Some(true), + offered_schemes: Some(vec![]), + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config.clone()), &server_config); + let err = do_handshake_until_error(&mut client, &mut server); + assert_eq!( + err, + Err(ErrorFromPeer::Client(Error::CorruptMessagePayload( + ContentType::Handshake + ))) + ); + } + } +} + +// Common case, we do not find a root store to resolve to +#[test] +fn client_verifier_no_root() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_ok, + subjects: None, + mandatory: Some(true), + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("notlocalhost")).unwrap(); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Server(Error::General( + "client rejected by client_auth_root_subjects".into() + )), + ErrorFromPeer::Client(Error::AlertReceived(AlertDescription::AccessDenied)) + ]) + ); + } + } +} + +// If we cannot resolve a root, we cannot decide if auth is mandatory +#[test] +fn client_verifier_no_auth_no_root() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_unreachable, + subjects: None, + mandatory: Some(true), + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("notlocalhost")).unwrap(); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Server(Error::General( + "client rejected by client_auth_root_subjects".into() + )), + ErrorFromPeer::Client(Error::AlertReceived(AlertDescription::AccessDenied)) + ]) + ); + } + } +} + +// If we do have a root, we must do auth +#[test] +fn client_verifier_no_auth_yes_root() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_unreachable, + subjects: Some( + get_client_root_store(*kt) + .roots + .iter() + .map(|r| PayloadU16(r.subject().to_vec())) + .collect(), + ), + mandatory: Some(true), + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(*kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("localhost")).unwrap(); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Server(Error::NoCertificatesPresented), + ErrorFromPeer::Client(Error::AlertReceived( + AlertDescription::CertificateRequired + )) + ]) + ); + } + } +} + +#[test] +// Triple checks we propagate the rustls::Error through +fn client_verifier_fails_properly() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_err, + subjects: Some( + get_client_root_store(*kt) + .roots + .iter() + .map(|r| PayloadU16(r.subject().to_vec())) + .collect(), + ), + mandatory: Some(true), + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("localhost")).unwrap(); + let err = do_handshake_until_error(&mut client, &mut server); + assert_eq!( + err, + Err(ErrorFromPeer::Server(Error::General("test err".into()))) + ); + } + } +} + +#[test] +// If a verifier returns a None on Mandatory-ness, then we error out +fn client_verifier_must_determine_client_auth_requirement_to_continue() { + for kt in ALL_KEY_TYPES.iter() { + let client_verifier = MockClientVerifier { + verified: ver_ok, + subjects: Some( + get_client_root_store(*kt) + .roots + .iter() + .map(|r| PayloadU16(r.subject().to_vec())) + .collect(), + ), + mandatory: None, + offered_schemes: None, + }; + + let server_config = server_config_with_verifier(*kt, client_verifier); + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions_with_auth(*kt, &[version]); + let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap(); + let mut client = + ClientConnection::new(Arc::new(client_config), dns_name("localhost")).unwrap(); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Server(Error::General( + "client rejected by client_auth_mandatory".into() + )), + ErrorFromPeer::Client(Error::AlertReceived(AlertDescription::AccessDenied)) + ]) + ); + } + } +} + +pub struct MockClientVerifier { + pub verified: fn() -> Result, + pub subjects: Option, + pub mandatory: Option, + pub offered_schemes: Option>, +} + +impl ClientCertVerifier for MockClientVerifier { + fn client_auth_mandatory(&self) -> Option { + self.mandatory + } + + fn client_auth_root_subjects(&self) -> Option { + self.subjects.as_ref().cloned() + } + + fn verify_client_cert( + &self, + _end_entity: &Certificate, + _intermediates: &[Certificate], + _now: std::time::SystemTime, + ) -> Result { + (self.verified)() + } + + fn supported_verify_schemes(&self) -> Vec { + if let Some(schemes) = &self.offered_schemes { + schemes.clone() + } else { + WebPkiVerifier::verification_schemes() + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/common/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/common/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d176ed87ee44d51855e0433ee3f737ac2014f2bc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/common/mod.rs @@ -0,0 +1,474 @@ +#![allow(dead_code)] + +use std::convert::{TryFrom, TryInto}; +use std::io; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; + +use rustls::internal::msgs::codec::Reader; +use rustls::internal::msgs::message::{Message, OpaqueMessage, PlainMessage}; +use rustls::server::AllowAnyAuthenticatedClient; +use rustls::Connection; +use rustls::Error; +use rustls::RootCertStore; +use rustls::{Certificate, PrivateKey}; +use rustls::{ClientConfig, ClientConnection}; +use rustls::{ConnectionCommon, ServerConfig, ServerConnection, SideData}; + +macro_rules! embed_files { + ( + $( + ($name:ident, $keytype:expr, $path:expr); + )+ + ) => { + $( + const $name: &'static [u8] = include_bytes!( + concat!("../../../test-ca/", $keytype, "/", $path)); + )+ + + pub fn bytes_for(keytype: &str, path: &str) -> &'static [u8] { + match (keytype, path) { + $( + ($keytype, $path) => $name, + )+ + _ => panic!("unknown keytype {} with path {}", keytype, path), + } + } + } +} + +embed_files! { + (ECDSA_CA_CERT, "ecdsa", "ca.cert"); + (ECDSA_CA_DER, "ecdsa", "ca.der"); + (ECDSA_CA_KEY, "ecdsa", "ca.key"); + (ECDSA_CLIENT_CERT, "ecdsa", "client.cert"); + (ECDSA_CLIENT_CHAIN, "ecdsa", "client.chain"); + (ECDSA_CLIENT_FULLCHAIN, "ecdsa", "client.fullchain"); + (ECDSA_CLIENT_KEY, "ecdsa", "client.key"); + (ECDSA_CLIENT_REQ, "ecdsa", "client.req"); + (ECDSA_END_CERT, "ecdsa", "end.cert"); + (ECDSA_END_CHAIN, "ecdsa", "end.chain"); + (ECDSA_END_FULLCHAIN, "ecdsa", "end.fullchain"); + (ECDSA_END_KEY, "ecdsa", "end.key"); + (ECDSA_END_REQ, "ecdsa", "end.req"); + (ECDSA_INTER_CERT, "ecdsa", "inter.cert"); + (ECDSA_INTER_KEY, "ecdsa", "inter.key"); + (ECDSA_INTER_REQ, "ecdsa", "inter.req"); + (ECDSA_NISTP256_PEM, "ecdsa", "nistp256.pem"); + (ECDSA_NISTP384_PEM, "ecdsa", "nistp384.pem"); + + (EDDSA_CA_CERT, "eddsa", "ca.cert"); + (EDDSA_CA_DER, "eddsa", "ca.der"); + (EDDSA_CA_KEY, "eddsa", "ca.key"); + (EDDSA_CLIENT_CERT, "eddsa", "client.cert"); + (EDDSA_CLIENT_CHAIN, "eddsa", "client.chain"); + (EDDSA_CLIENT_FULLCHAIN, "eddsa", "client.fullchain"); + (EDDSA_CLIENT_KEY, "eddsa", "client.key"); + (EDDSA_CLIENT_REQ, "eddsa", "client.req"); + (EDDSA_END_CERT, "eddsa", "end.cert"); + (EDDSA_END_CHAIN, "eddsa", "end.chain"); + (EDDSA_END_FULLCHAIN, "eddsa", "end.fullchain"); + (EDDSA_END_KEY, "eddsa", "end.key"); + (EDDSA_END_REQ, "eddsa", "end.req"); + (EDDSA_INTER_CERT, "eddsa", "inter.cert"); + (EDDSA_INTER_KEY, "eddsa", "inter.key"); + (EDDSA_INTER_REQ, "eddsa", "inter.req"); + + (RSA_CA_CERT, "rsa", "ca.cert"); + (RSA_CA_DER, "rsa", "ca.der"); + (RSA_CA_KEY, "rsa", "ca.key"); + (RSA_CLIENT_CERT, "rsa", "client.cert"); + (RSA_CLIENT_CHAIN, "rsa", "client.chain"); + (RSA_CLIENT_FULLCHAIN, "rsa", "client.fullchain"); + (RSA_CLIENT_KEY, "rsa", "client.key"); + (RSA_CLIENT_REQ, "rsa", "client.req"); + (RSA_CLIENT_RSA, "rsa", "client.rsa"); + (RSA_END_CERT, "rsa", "end.cert"); + (RSA_END_CHAIN, "rsa", "end.chain"); + (RSA_END_FULLCHAIN, "rsa", "end.fullchain"); + (RSA_END_KEY, "rsa", "end.key"); + (RSA_END_REQ, "rsa", "end.req"); + (RSA_END_RSA, "rsa", "end.rsa"); + (RSA_INTER_CERT, "rsa", "inter.cert"); + (RSA_INTER_KEY, "rsa", "inter.key"); + (RSA_INTER_REQ, "rsa", "inter.req"); +} + +pub fn transfer( + left: &mut (impl DerefMut + Deref>), + right: &mut (impl DerefMut + Deref>), +) -> usize { + let mut buf = [0u8; 262144]; + let mut total = 0; + + while left.wants_write() { + let sz = { + let into_buf: &mut dyn io::Write = &mut &mut buf[..]; + left.write_tls(into_buf).unwrap() + }; + total += sz; + if sz == 0 { + return total; + } + + let mut offs = 0; + loop { + let from_buf: &mut dyn io::Read = &mut &buf[offs..sz]; + offs += right.read_tls(from_buf).unwrap(); + if sz == offs { + break; + } + } + } + + total +} + +pub fn transfer_eof(conn: &mut (impl DerefMut + Deref>)) { + let empty_buf = [0u8; 0]; + let empty_cursor: &mut dyn io::Read = &mut &empty_buf[..]; + let sz = conn.read_tls(empty_cursor).unwrap(); + assert_eq!(sz, 0); +} + +pub enum Altered { + /// message has been edited in-place (or is unchanged) + InPlace, + /// send these raw bytes instead of the message. + Raw(Vec), +} + +pub fn transfer_altered(left: &mut Connection, filter: F, right: &mut Connection) -> usize +where + F: Fn(&mut Message) -> Altered, +{ + let mut buf = [0u8; 262144]; + let mut total = 0; + + while left.wants_write() { + let sz = { + let into_buf: &mut dyn io::Write = &mut &mut buf[..]; + left.write_tls(into_buf).unwrap() + }; + total += sz; + if sz == 0 { + return total; + } + + let mut reader = Reader::init(&buf[..sz]); + while reader.any_left() { + let message = OpaqueMessage::read(&mut reader).unwrap(); + let mut message = Message::try_from(message.into_plain_message()).unwrap(); + let message_enc = match filter(&mut message) { + Altered::InPlace => PlainMessage::from(message) + .into_unencrypted_opaque() + .encode(), + Altered::Raw(data) => data, + }; + + let message_enc_reader: &mut dyn io::Read = &mut &message_enc[..]; + let len = right + .read_tls(message_enc_reader) + .unwrap(); + assert_eq!(len, message_enc.len()); + } + } + + total +} + +#[derive(Clone, Copy, PartialEq)] +pub enum KeyType { + Rsa, + Ecdsa, + Ed25519, +} + +pub static ALL_KEY_TYPES: [KeyType; 3] = [KeyType::Rsa, KeyType::Ecdsa, KeyType::Ed25519]; + +impl KeyType { + fn bytes_for(&self, part: &str) -> &'static [u8] { + match self { + KeyType::Rsa => bytes_for("rsa", part), + KeyType::Ecdsa => bytes_for("ecdsa", part), + KeyType::Ed25519 => bytes_for("eddsa", part), + } + } + + pub fn get_chain(&self) -> Vec { + rustls_pemfile::certs(&mut io::BufReader::new(self.bytes_for("end.fullchain"))) + .unwrap() + .iter() + .map(|v| Certificate(v.clone())) + .collect() + } + + pub fn get_key(&self) -> PrivateKey { + PrivateKey( + rustls_pemfile::pkcs8_private_keys(&mut io::BufReader::new(self.bytes_for("end.key"))) + .unwrap()[0] + .clone(), + ) + } + + pub fn get_client_chain(&self) -> Vec { + rustls_pemfile::certs(&mut io::BufReader::new(self.bytes_for("client.fullchain"))) + .unwrap() + .iter() + .map(|v| Certificate(v.clone())) + .collect() + } + + fn get_client_key(&self) -> PrivateKey { + PrivateKey( + rustls_pemfile::pkcs8_private_keys(&mut io::BufReader::new( + self.bytes_for("client.key"), + )) + .unwrap()[0] + .clone(), + ) + } +} + +pub fn finish_server_config( + kt: KeyType, + conf: rustls::ConfigBuilder, +) -> ServerConfig { + conf.with_no_client_auth() + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap() +} + +pub fn make_server_config(kt: KeyType) -> ServerConfig { + finish_server_config(kt, ServerConfig::builder().with_safe_defaults()) +} + +pub fn make_server_config_with_versions( + kt: KeyType, + versions: &[&'static rustls::SupportedProtocolVersion], +) -> ServerConfig { + finish_server_config( + kt, + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(versions) + .unwrap(), + ) +} + +pub fn make_server_config_with_kx_groups( + kt: KeyType, + kx_groups: &[&'static rustls::SupportedKxGroup], +) -> ServerConfig { + finish_server_config( + kt, + ServerConfig::builder() + .with_safe_default_cipher_suites() + .with_kx_groups(kx_groups) + .with_safe_default_protocol_versions() + .unwrap(), + ) +} + +pub fn get_client_root_store(kt: KeyType) -> RootCertStore { + let roots = kt.get_chain(); + let mut client_auth_roots = RootCertStore::empty(); + for root in roots { + client_auth_roots.add(&root).unwrap(); + } + client_auth_roots +} + +pub fn make_server_config_with_mandatory_client_auth(kt: KeyType) -> ServerConfig { + let client_auth_roots = get_client_root_store(kt); + + let client_auth = AllowAnyAuthenticatedClient::new(client_auth_roots); + + ServerConfig::builder() + .with_safe_defaults() + .with_client_cert_verifier(client_auth) + .with_single_cert(kt.get_chain(), kt.get_key()) + .unwrap() +} + +pub fn finish_client_config( + kt: KeyType, + config: rustls::ConfigBuilder, +) -> ClientConfig { + let mut root_store = RootCertStore::empty(); + let mut rootbuf = io::BufReader::new(kt.bytes_for("ca.cert")); + root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut rootbuf).unwrap()); + + config + .with_root_certificates(root_store) + .with_no_client_auth() +} + +pub fn finish_client_config_with_creds( + kt: KeyType, + config: rustls::ConfigBuilder, +) -> ClientConfig { + let mut root_store = RootCertStore::empty(); + let mut rootbuf = io::BufReader::new(kt.bytes_for("ca.cert")); + root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut rootbuf).unwrap()); + + config + .with_root_certificates(root_store) + .with_single_cert(kt.get_client_chain(), kt.get_client_key()) + .unwrap() +} + +pub fn make_client_config(kt: KeyType) -> ClientConfig { + finish_client_config(kt, ClientConfig::builder().with_safe_defaults()) +} + +pub fn make_client_config_with_kx_groups( + kt: KeyType, + kx_groups: &[&'static rustls::SupportedKxGroup], +) -> ClientConfig { + let builder = ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_kx_groups(kx_groups) + .with_safe_default_protocol_versions() + .unwrap(); + finish_client_config(kt, builder) +} + +pub fn make_client_config_with_versions( + kt: KeyType, + versions: &[&'static rustls::SupportedProtocolVersion], +) -> ClientConfig { + let builder = ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(versions) + .unwrap(); + finish_client_config(kt, builder) +} + +pub fn make_client_config_with_auth(kt: KeyType) -> ClientConfig { + finish_client_config_with_creds(kt, ClientConfig::builder().with_safe_defaults()) +} + +pub fn make_client_config_with_versions_with_auth( + kt: KeyType, + versions: &[&'static rustls::SupportedProtocolVersion], +) -> ClientConfig { + let builder = ClientConfig::builder() + .with_safe_default_cipher_suites() + .with_safe_default_kx_groups() + .with_protocol_versions(versions) + .unwrap(); + finish_client_config_with_creds(kt, builder) +} + +pub fn make_pair(kt: KeyType) -> (ClientConnection, ServerConnection) { + make_pair_for_configs(make_client_config(kt), make_server_config(kt)) +} + +pub fn make_pair_for_configs( + client_config: ClientConfig, + server_config: ServerConfig, +) -> (ClientConnection, ServerConnection) { + make_pair_for_arc_configs(&Arc::new(client_config), &Arc::new(server_config)) +} + +pub fn make_pair_for_arc_configs( + client_config: &Arc, + server_config: &Arc, +) -> (ClientConnection, ServerConnection) { + ( + ClientConnection::new(Arc::clone(client_config), dns_name("localhost")).unwrap(), + ServerConnection::new(Arc::clone(server_config)).unwrap(), + ) +} + +pub fn do_handshake( + client: &mut (impl DerefMut + Deref>), + server: &mut (impl DerefMut + Deref>), +) -> (usize, usize) { + let (mut to_client, mut to_server) = (0, 0); + while server.is_handshaking() || client.is_handshaking() { + to_server += transfer(client, server); + server.process_new_packets().unwrap(); + to_client += transfer(server, client); + client.process_new_packets().unwrap(); + } + (to_server, to_client) +} + +#[derive(PartialEq, Debug)] +pub enum ErrorFromPeer { + Client(Error), + Server(Error), +} + +pub fn do_handshake_until_error( + client: &mut ClientConnection, + server: &mut ServerConnection, +) -> Result<(), ErrorFromPeer> { + while server.is_handshaking() || client.is_handshaking() { + transfer(client, server); + server + .process_new_packets() + .map_err(ErrorFromPeer::Server)?; + transfer(server, client); + client + .process_new_packets() + .map_err(ErrorFromPeer::Client)?; + } + + Ok(()) +} + +pub fn do_handshake_until_both_error( + client: &mut ClientConnection, + server: &mut ServerConnection, +) -> Result<(), Vec> { + match do_handshake_until_error(client, server) { + Err(server_err @ ErrorFromPeer::Server(_)) => { + let mut errors = vec![server_err]; + transfer(server, client); + let client_err = client + .process_new_packets() + .map_err(ErrorFromPeer::Client) + .expect_err("client didn't produce error after server error"); + errors.push(client_err); + Err(errors) + } + + Err(client_err @ ErrorFromPeer::Client(_)) => { + let mut errors = vec![client_err]; + transfer(client, server); + let server_err = server + .process_new_packets() + .map_err(ErrorFromPeer::Server) + .expect_err("server didn't produce error after client error"); + errors.push(server_err); + Err(errors) + } + + Ok(()) => Ok(()), + } +} + +pub fn dns_name(name: &'static str) -> rustls::ServerName { + name.try_into().unwrap() +} + +pub struct FailsReads { + errkind: io::ErrorKind, +} + +impl FailsReads { + pub fn new(errkind: io::ErrorKind) -> Self { + FailsReads { errkind } + } +} + +impl io::Read for FailsReads { + fn read(&mut self, _b: &mut [u8]) -> io::Result { + Err(io::Error::from(self.errkind)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/key_log_file_env.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/key_log_file_env.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc15f5331c8fa311798edbde3445c822d126cdae --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/key_log_file_env.rs @@ -0,0 +1,104 @@ +//! Tests of [`rustls::KeyLogFile`] that require us to set environment variables. +//! +//! vvvv +//! Every test you add to this file MUST execute through `serialized()`. +//! ^^^^ +//! +//! See https://github.com/rust-lang/rust/issues/90308; despite not being marked +//! `unsafe`, `env::var::set_var` is an unsafe function. These tests are separated +//! from the rest of the tests so that their use of `set_ver` is less likely to +//! affect them; as of the time these tests were moved to this file, Cargo will +//! compile each test suite file to a separate executable, so these will be run +//! in a completely separate process. This way, executing every test through +//! `serialized()` will cause them to be run one at a time. +//! +//! Note: If/when we add new constructors to `KeyLogFile` to allow constructing +//! one from a path directly (without using an environment variable), then those +//! tests SHOULD NOT go in this file. +//! +//! XXX: These tests don't actually test the functionality; they just ensure +//! the code coverage doesn't complain it isn't covered. TODO: Verify that the +//! file was created successfully, with the right permissions, etc., and that it +//! contains something like what we expect. + +#[allow(dead_code)] +mod common; + +use crate::common::{ + do_handshake, make_client_config_with_versions, make_pair_for_arc_configs, make_server_config, + transfer, KeyType, +}; +use std::{ + env, + io::Write, + sync::{Arc, Mutex, Once}, +}; + +/// Approximates `#[serial]` from the `serial_test` crate. +/// +/// No attempt is made to recover from a poisoned mutex, which will +/// happen when `f` panics. In other words, all the tests that use +/// `serialized` will start failing after one test panics. +fn serialized(f: impl FnOnce()) { + // Ensure every test is run serialized + // TODO: Use `std::sync::Lazy` once that is stable. + static mut MUTEX: Option> = None; + static ONCE: Once = Once::new(); + ONCE.call_once(|| unsafe { + MUTEX = Some(Mutex::new(())); + }); + let mutex = unsafe { MUTEX.as_mut() }; + + let _guard = mutex.unwrap().lock().unwrap(); + + // XXX: NOT thread safe. + env::set_var("SSLKEYLOGFILE", "./sslkeylogfile.txt"); + + f() +} + +#[test] +fn exercise_key_log_file_for_client() { + serialized(|| { + let server_config = Arc::new(make_server_config(KeyType::Rsa)); + env::set_var("SSLKEYLOGFILE", "./sslkeylogfile.txt"); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + client_config.key_log = Arc::new(rustls::KeyLogFile::new()); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!(5, client.writer().write(b"hello").unwrap()); + + do_handshake(&mut client, &mut server); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + } + }) +} + +#[test] +fn exercise_key_log_file_for_server() { + serialized(|| { + let mut server_config = make_server_config(KeyType::Rsa); + + env::set_var("SSLKEYLOGFILE", "./sslkeylogfile.txt"); + server_config.key_log = Arc::new(rustls::KeyLogFile::new()); + + let server_config = Arc::new(server_config); + + for version in rustls::ALL_VERSIONS { + let client_config = make_client_config_with_versions(KeyType::Rsa, &[version]); + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + + assert_eq!(5, client.writer().write(b"hello").unwrap()); + + do_handshake(&mut client, &mut server); + transfer(&mut client, &mut server); + server.process_new_packets().unwrap(); + } + }) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/server_cert_verifier.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/server_cert_verifier.rs new file mode 100644 index 0000000000000000000000000000000000000000..65d635cc60974544826e1720d4e38de7970d5b0b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rustls-0.20.9/tests/server_cert_verifier.rs @@ -0,0 +1,272 @@ +//! Tests for configuring and using a [`ServerCertVerifier`] for a client. + +#![cfg(feature = "dangerous_configuration")] + +mod common; +use crate::common::{ + do_handshake, do_handshake_until_both_error, make_client_config_with_versions, + make_pair_for_arc_configs, make_server_config, ErrorFromPeer, ALL_KEY_TYPES, +}; +use rustls::client::{ + HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier, WebPkiVerifier, +}; +use rustls::internal::msgs::handshake::DigitallySignedStruct; +use rustls::AlertDescription; +use rustls::{Certificate, Error, SignatureScheme}; +use std::sync::Arc; + +#[test] +fn client_can_override_certificate_verification() { + for kt in ALL_KEY_TYPES.iter() { + let verifier = Arc::new(MockServerVerifier::accepts_anything()); + + let server_config = Arc::new(make_server_config(*kt)); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(*kt, &[version]); + client_config + .dangerous() + .set_certificate_verifier(verifier.clone()); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + do_handshake(&mut client, &mut server); + } + } +} + +#[test] +fn client_can_override_certificate_verification_and_reject_certificate() { + for kt in ALL_KEY_TYPES.iter() { + let verifier = Arc::new(MockServerVerifier::rejects_certificate( + Error::CorruptMessage, + )); + + let server_config = Arc::new(make_server_config(*kt)); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(*kt, &[version]); + client_config + .dangerous() + .set_certificate_verifier(verifier.clone()); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Client(Error::CorruptMessage), + ErrorFromPeer::Server(Error::AlertReceived(AlertDescription::BadCertificate)) + ]) + ); + } + } +} + +#[cfg(feature = "tls12")] +#[test] +fn client_can_override_certificate_verification_and_reject_tls12_signatures() { + for kt in ALL_KEY_TYPES.iter() { + let mut client_config = make_client_config_with_versions(*kt, &[&rustls::version::TLS12]); + let verifier = Arc::new(MockServerVerifier::rejects_tls12_signatures( + Error::CorruptMessage, + )); + + client_config + .dangerous() + .set_certificate_verifier(verifier); + + let server_config = Arc::new(make_server_config(*kt)); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Client(Error::CorruptMessage), + ErrorFromPeer::Server(Error::AlertReceived(AlertDescription::BadCertificate)) + ]) + ); + } +} + +#[test] +fn client_can_override_certificate_verification_and_reject_tls13_signatures() { + for kt in ALL_KEY_TYPES.iter() { + let mut client_config = make_client_config_with_versions(*kt, &[&rustls::version::TLS13]); + let verifier = Arc::new(MockServerVerifier::rejects_tls13_signatures( + Error::CorruptMessage, + )); + + client_config + .dangerous() + .set_certificate_verifier(verifier); + + let server_config = Arc::new(make_server_config(*kt)); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Client(Error::CorruptMessage), + ErrorFromPeer::Server(Error::AlertReceived(AlertDescription::BadCertificate)) + ]) + ); + } +} + +#[test] +fn client_can_override_certificate_verification_and_offer_no_signature_schemes() { + for kt in ALL_KEY_TYPES.iter() { + let verifier = Arc::new(MockServerVerifier::offers_no_signature_schemes()); + + let server_config = Arc::new(make_server_config(*kt)); + + for version in rustls::ALL_VERSIONS { + let mut client_config = make_client_config_with_versions(*kt, &[version]); + client_config + .dangerous() + .set_certificate_verifier(verifier.clone()); + + let (mut client, mut server) = + make_pair_for_arc_configs(&Arc::new(client_config), &server_config); + let errs = do_handshake_until_both_error(&mut client, &mut server); + assert_eq!( + errs, + Err(vec![ + ErrorFromPeer::Server(Error::PeerIncompatibleError( + "no overlapping sigschemes".into() + )), + ErrorFromPeer::Client(Error::AlertReceived(AlertDescription::HandshakeFailure)), + ]) + ); + } + } +} + +pub struct MockServerVerifier { + cert_rejection_error: Option, + tls12_signature_error: Option, + tls13_signature_error: Option, + wants_scts: bool, + signature_schemes: Vec, +} + +impl ServerCertVerifier for MockServerVerifier { + fn verify_server_cert( + &self, + end_entity: &rustls::Certificate, + intermediates: &[rustls::Certificate], + server_name: &rustls::ServerName, + scts: &mut dyn Iterator, + oscp_response: &[u8], + now: std::time::SystemTime, + ) -> Result { + let scts: Vec> = scts.map(|x| x.to_owned()).collect(); + println!( + "verify_server_cert({:?}, {:?}, {:?}, {:?}, {:?}, {:?})", + end_entity, intermediates, server_name, scts, oscp_response, now + ); + if let Some(error) = &self.cert_rejection_error { + Err(error.clone()) + } else { + Ok(ServerCertVerified::assertion()) + } + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + println!( + "verify_tls12_signature({:?}, {:?}, {:?})", + message, cert, dss + ); + if let Some(error) = &self.tls12_signature_error { + Err(error.clone()) + } else { + Ok(HandshakeSignatureValid::assertion()) + } + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &Certificate, + dss: &DigitallySignedStruct, + ) -> Result { + println!( + "verify_tls13_signature({:?}, {:?}, {:?})", + message, cert, dss + ); + if let Some(error) = &self.tls13_signature_error { + Err(error.clone()) + } else { + Ok(HandshakeSignatureValid::assertion()) + } + } + + fn supported_verify_schemes(&self) -> Vec { + self.signature_schemes.clone() + } + + fn request_scts(&self) -> bool { + println!("request_scts? {:?}", self.wants_scts); + self.wants_scts + } +} + +impl MockServerVerifier { + pub fn accepts_anything() -> Self { + MockServerVerifier { + cert_rejection_error: None, + ..Default::default() + } + } + + pub fn rejects_certificate(err: Error) -> Self { + MockServerVerifier { + cert_rejection_error: Some(err), + ..Default::default() + } + } + + pub fn rejects_tls12_signatures(err: Error) -> Self { + MockServerVerifier { + tls12_signature_error: Some(err), + ..Default::default() + } + } + + pub fn rejects_tls13_signatures(err: Error) -> Self { + MockServerVerifier { + tls13_signature_error: Some(err), + ..Default::default() + } + } + + pub fn offers_no_signature_schemes() -> Self { + MockServerVerifier { + signature_schemes: vec![], + ..Default::default() + } + } +} + +impl Default for MockServerVerifier { + fn default() -> Self { + MockServerVerifier { + cert_rejection_error: None, + tls12_signature_error: None, + tls13_signature_error: None, + wants_scts: false, + signature_schemes: WebPkiVerifier::verification_schemes(), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/FUNDING.yml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..750707701cdae985156601cc906195021ba6a6e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/workflows/ci.yml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/workflows/ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..d9c6c786102d8d94c4c6a9ff52a443bfa7cdf466 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/.github/workflows/ci.yml @@ -0,0 +1,140 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [nightly, beta, stable, 1.61.0] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + - name: Enable type layout randomization + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV + if: matrix.rust == 'nightly' + - run: cargo test + - run: cargo check --no-default-features + - run: cargo check --features serde + - run: cargo check --no-default-features --features serde + - uses: actions/upload-artifact@v4 + if: matrix.rust == 'nightly' && always() + with: + name: Cargo.lock + path: Cargo.lock + continue-on-error: true + + node: + name: Node + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - run: npm install semver + - run: cargo test + env: + RUSTFLAGS: --cfg test_node_semver ${{env.RUSTFLAGS}} + + minimal: + name: Minimal versions + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo generate-lockfile -Z minimal-versions + - run: cargo check --locked --features serde + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@clippy + - run: cargo clippy --tests --benches -- -Dclippy::all -Dclippy::pedantic + + miri: + name: Miri + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + env: + MIRIFLAGS: -Zmiri-strict-provenance + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@miri + - name: Run cargo miri test (64-bit little endian) + run: cargo miri test --target x86_64-unknown-linux-gnu + - name: Run cargo miri test (64-bit big endian) + run: cargo miri test --target powerpc64-unknown-linux-gnu + - name: Run cargo miri test (32-bit little endian) + run: cargo miri test --target i686-unknown-linux-gnu + - name: Run cargo miri test (32-bit big endian) + run: cargo miri test --target mips-unknown-linux-gnu + + fuzz: + name: Fuzz + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/install@cargo-fuzz + - run: cargo fuzz check + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 + - run: cargo outdated --manifest-path fuzz/Cargo.toml --exit-code 1 diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/benches/parse.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/benches/parse.rs new file mode 100644 index 0000000000000000000000000000000000000000..d6aded7802f07b03168a15d05bf73c251354f499 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/benches/parse.rs @@ -0,0 +1,24 @@ +#![feature(test)] + +extern crate test; + +use semver::{Prerelease, Version, VersionReq}; +use test::{black_box, Bencher}; + +#[bench] +fn parse_prerelease(b: &mut Bencher) { + let text = "x.7.z.92"; + b.iter(|| black_box(text).parse::().unwrap()); +} + +#[bench] +fn parse_version(b: &mut Bencher) { + let text = "1.0.2021-beta+exp.sha.5114f85"; + b.iter(|| black_box(text).parse::().unwrap()); +} + +#[bench] +fn parse_version_req(b: &mut Bencher) { + let text = ">=1.2.3, <2.0.0"; + b.iter(|| black_box(text).parse::().unwrap()); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/display.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/display.rs new file mode 100644 index 0000000000000000000000000000000000000000..cbf5f3022649651db53562b6c7e6b3dc55986365 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/display.rs @@ -0,0 +1,163 @@ +use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq}; +use core::fmt::{self, Alignment, Debug, Display, Write}; + +impl Display for Version { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let do_display = |formatter: &mut fmt::Formatter| -> fmt::Result { + write!(formatter, "{}.{}.{}", self.major, self.minor, self.patch)?; + if !self.pre.is_empty() { + write!(formatter, "-{}", self.pre)?; + } + if !self.build.is_empty() { + write!(formatter, "+{}", self.build)?; + } + Ok(()) + }; + + let do_len = || -> usize { + digits(self.major) + + 1 + + digits(self.minor) + + 1 + + digits(self.patch) + + !self.pre.is_empty() as usize + + self.pre.len() + + !self.build.is_empty() as usize + + self.build.len() + }; + + pad(formatter, do_display, do_len) + } +} + +impl Display for VersionReq { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.comparators.is_empty() { + return formatter.write_str("*"); + } + for (i, comparator) in self.comparators.iter().enumerate() { + if i > 0 { + formatter.write_str(", ")?; + } + write!(formatter, "{}", comparator)?; + } + Ok(()) + } +} + +impl Display for Comparator { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let op = match self.op { + Op::Exact => "=", + Op::Greater => ">", + Op::GreaterEq => ">=", + Op::Less => "<", + Op::LessEq => "<=", + Op::Tilde => "~", + Op::Caret => "^", + Op::Wildcard => "", + }; + formatter.write_str(op)?; + write!(formatter, "{}", self.major)?; + if let Some(minor) = &self.minor { + write!(formatter, ".{}", minor)?; + if let Some(patch) = &self.patch { + write!(formatter, ".{}", patch)?; + if !self.pre.is_empty() { + write!(formatter, "-{}", self.pre)?; + } + } else if self.op == Op::Wildcard { + formatter.write_str(".*")?; + } + } else if self.op == Op::Wildcard { + formatter.write_str(".*")?; + } + Ok(()) + } +} + +impl Display for Prerelease { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.as_str()) + } +} + +impl Display for BuildMetadata { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.as_str()) + } +} + +impl Debug for Version { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut debug = formatter.debug_struct("Version"); + debug + .field("major", &self.major) + .field("minor", &self.minor) + .field("patch", &self.patch); + if !self.pre.is_empty() { + debug.field("pre", &self.pre); + } + if !self.build.is_empty() { + debug.field("build", &self.build); + } + debug.finish() + } +} + +impl Debug for Prerelease { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "Prerelease(\"{}\")", self) + } +} + +impl Debug for BuildMetadata { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "BuildMetadata(\"{}\")", self) + } +} + +fn pad( + formatter: &mut fmt::Formatter, + do_display: impl FnOnce(&mut fmt::Formatter) -> fmt::Result, + do_len: impl FnOnce() -> usize, +) -> fmt::Result { + let min_width = match formatter.width() { + Some(min_width) => min_width, + None => return do_display(formatter), + }; + + let len = do_len(); + if len >= min_width { + return do_display(formatter); + } + + let default_align = Alignment::Left; + let align = formatter.align().unwrap_or(default_align); + let padding = min_width - len; + let (pre_pad, post_pad) = match align { + Alignment::Left => (0, padding), + Alignment::Right => (padding, 0), + Alignment::Center => (padding / 2, (padding + 1) / 2), + }; + + let fill = formatter.fill(); + for _ in 0..pre_pad { + formatter.write_char(fill)?; + } + + do_display(formatter)?; + + for _ in 0..post_pad { + formatter.write_char(fill)?; + } + Ok(()) +} + +fn digits(val: u64) -> usize { + if val < 10 { + 1 + } else { + 1 + digits(val / 10) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..44c3b587177f69113bfc5e0f84ee7777d1f96abd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/error.rs @@ -0,0 +1,126 @@ +use crate::parse::Error; +use core::fmt::{self, Debug, Display}; + +pub(crate) enum ErrorKind { + Empty, + UnexpectedEnd(Position), + UnexpectedChar(Position, char), + UnexpectedCharAfter(Position, char), + ExpectedCommaFound(Position, char), + LeadingZero(Position), + Overflow(Position), + EmptySegment(Position), + IllegalCharacter(Position), + WildcardNotTheOnlyComparator(char), + UnexpectedAfterWildcard, + ExcessiveComparators, +} + +#[derive(Copy, Clone, Eq, PartialEq)] +pub(crate) enum Position { + Major, + Minor, + Patch, + Pre, + Build, +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl std::error::Error for Error {} + +impl Display for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match &self.kind { + ErrorKind::Empty => formatter.write_str("empty string, expected a semver version"), + ErrorKind::UnexpectedEnd(pos) => { + write!(formatter, "unexpected end of input while parsing {}", pos) + } + ErrorKind::UnexpectedChar(pos, ch) => { + write!( + formatter, + "unexpected character {} while parsing {}", + QuotedChar(*ch), + pos, + ) + } + ErrorKind::UnexpectedCharAfter(pos, ch) => { + write!( + formatter, + "unexpected character {} after {}", + QuotedChar(*ch), + pos, + ) + } + ErrorKind::ExpectedCommaFound(pos, ch) => { + write!( + formatter, + "expected comma after {}, found {}", + pos, + QuotedChar(*ch), + ) + } + ErrorKind::LeadingZero(pos) => { + write!(formatter, "invalid leading zero in {}", pos) + } + ErrorKind::Overflow(pos) => { + write!(formatter, "value of {} exceeds u64::MAX", pos) + } + ErrorKind::EmptySegment(pos) => { + write!(formatter, "empty identifier segment in {}", pos) + } + ErrorKind::IllegalCharacter(pos) => { + write!(formatter, "unexpected character in {}", pos) + } + ErrorKind::WildcardNotTheOnlyComparator(ch) => { + write!( + formatter, + "wildcard req ({}) must be the only comparator in the version req", + ch, + ) + } + ErrorKind::UnexpectedAfterWildcard => { + formatter.write_str("unexpected character after wildcard in version req") + } + ErrorKind::ExcessiveComparators => { + formatter.write_str("excessive number of version comparators") + } + } + } +} + +impl Display for Position { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(match self { + Position::Major => "major version number", + Position::Minor => "minor version number", + Position::Patch => "patch version number", + Position::Pre => "pre-release identifier", + Position::Build => "build metadata", + }) + } +} + +impl Debug for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Error(\"")?; + Display::fmt(self, formatter)?; + formatter.write_str("\")")?; + Ok(()) + } +} + +struct QuotedChar(char); + +impl Display for QuotedChar { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + // Standard library versions prior to https://github.com/rust-lang/rust/pull/95345 + // print character 0 as '\u{0}'. We prefer '\0' to keep error messages + // the same across all supported Rust versions. + if self.0 == '\0' { + formatter.write_str("'\\0'") + } else { + write!(formatter, "{:?}", self.0) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/eval.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/eval.rs new file mode 100644 index 0000000000000000000000000000000000000000..270c693975f5c11e7ec3b14faf5040323d4721d9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/eval.rs @@ -0,0 +1,179 @@ +use crate::{Comparator, Op, Version, VersionReq}; + +pub(crate) fn matches_req(req: &VersionReq, ver: &Version) -> bool { + for cmp in &req.comparators { + if !matches_impl(cmp, ver) { + return false; + } + } + + if ver.pre.is_empty() { + return true; + } + + // If a version has a prerelease tag (for example, 1.2.3-alpha.3) then it + // will only be allowed to satisfy req if at least one comparator with the + // same major.minor.patch also has a prerelease tag. + for cmp in &req.comparators { + if pre_is_compatible(cmp, ver) { + return true; + } + } + + false +} + +pub(crate) fn matches_comparator(cmp: &Comparator, ver: &Version) -> bool { + matches_impl(cmp, ver) && (ver.pre.is_empty() || pre_is_compatible(cmp, ver)) +} + +fn matches_impl(cmp: &Comparator, ver: &Version) -> bool { + match cmp.op { + Op::Exact | Op::Wildcard => matches_exact(cmp, ver), + Op::Greater => matches_greater(cmp, ver), + Op::GreaterEq => matches_exact(cmp, ver) || matches_greater(cmp, ver), + Op::Less => matches_less(cmp, ver), + Op::LessEq => matches_exact(cmp, ver) || matches_less(cmp, ver), + Op::Tilde => matches_tilde(cmp, ver), + Op::Caret => matches_caret(cmp, ver), + } +} + +fn matches_exact(cmp: &Comparator, ver: &Version) -> bool { + if ver.major != cmp.major { + return false; + } + + if let Some(minor) = cmp.minor { + if ver.minor != minor { + return false; + } + } + + if let Some(patch) = cmp.patch { + if ver.patch != patch { + return false; + } + } + + ver.pre == cmp.pre +} + +fn matches_greater(cmp: &Comparator, ver: &Version) -> bool { + if ver.major != cmp.major { + return ver.major > cmp.major; + } + + match cmp.minor { + None => return false, + Some(minor) => { + if ver.minor != minor { + return ver.minor > minor; + } + } + } + + match cmp.patch { + None => return false, + Some(patch) => { + if ver.patch != patch { + return ver.patch > patch; + } + } + } + + ver.pre > cmp.pre +} + +fn matches_less(cmp: &Comparator, ver: &Version) -> bool { + if ver.major != cmp.major { + return ver.major < cmp.major; + } + + match cmp.minor { + None => return false, + Some(minor) => { + if ver.minor != minor { + return ver.minor < minor; + } + } + } + + match cmp.patch { + None => return false, + Some(patch) => { + if ver.patch != patch { + return ver.patch < patch; + } + } + } + + ver.pre < cmp.pre +} + +fn matches_tilde(cmp: &Comparator, ver: &Version) -> bool { + if ver.major != cmp.major { + return false; + } + + if let Some(minor) = cmp.minor { + if ver.minor != minor { + return false; + } + } + + if let Some(patch) = cmp.patch { + if ver.patch != patch { + return ver.patch > patch; + } + } + + ver.pre >= cmp.pre +} + +fn matches_caret(cmp: &Comparator, ver: &Version) -> bool { + if ver.major != cmp.major { + return false; + } + + let minor = match cmp.minor { + None => return true, + Some(minor) => minor, + }; + + let patch = match cmp.patch { + None => { + if cmp.major > 0 { + return ver.minor >= minor; + } else { + return ver.minor == minor; + } + } + Some(patch) => patch, + }; + + if cmp.major > 0 { + if ver.minor != minor { + return ver.minor > minor; + } else if ver.patch != patch { + return ver.patch > patch; + } + } else if minor > 0 { + if ver.minor != minor { + return false; + } else if ver.patch != patch { + return ver.patch > patch; + } + } else if ver.minor != minor || ver.patch != patch { + return false; + } + + ver.pre >= cmp.pre +} + +fn pre_is_compatible(cmp: &Comparator, ver: &Version) -> bool { + cmp.major == ver.major + && cmp.minor == Some(ver.minor) + && cmp.patch == Some(ver.patch) + && !cmp.pre.is_empty() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/identifier.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/identifier.rs new file mode 100644 index 0000000000000000000000000000000000000000..862cd87b080fd8feb29510a24b412d5717515765 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/identifier.rs @@ -0,0 +1,412 @@ +// This module implements Identifier, a short-optimized string allowed to +// contain only the ASCII characters hyphen, dot, 0-9, A-Z, a-z. +// +// As of mid-2021, the distribution of pre-release lengths on crates.io is: +// +// length count length count length count +// 0 355929 11 81 24 2 +// 1 208 12 48 25 6 +// 2 236 13 55 26 10 +// 3 1909 14 25 27 4 +// 4 1284 15 15 28 1 +// 5 1742 16 35 30 1 +// 6 3440 17 9 31 5 +// 7 5624 18 6 32 1 +// 8 1321 19 12 36 2 +// 9 179 20 2 37 379 +// 10 65 23 11 +// +// and the distribution of build metadata lengths is: +// +// length count length count length count +// 0 364445 8 7725 18 1 +// 1 72 9 16 19 1 +// 2 7 10 85 20 1 +// 3 28 11 17 22 4 +// 4 9 12 10 26 1 +// 5 68 13 9 27 1 +// 6 73 14 10 40 5 +// 7 53 15 6 +// +// Therefore it really behooves us to be able to use the entire 8 bytes of a +// pointer for inline storage. For both pre-release and build metadata there are +// vastly more strings with length exactly 8 bytes than the sum over all lengths +// longer than 8 bytes. +// +// To differentiate the inline representation from the heap allocated long +// representation, we'll allocate heap pointers with 2-byte alignment so that +// they are guaranteed to have an unset least significant bit. Then in the repr +// we store for pointers, we rotate a 1 into the most significant bit of the +// most significant byte, which is never set for an ASCII byte. +// +// Inline repr: +// +// 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx 0xxxxxxx +// +// Heap allocated repr: +// +// 1ppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp pppppppp 0 +// ^ most significant bit least significant bit of orig ptr, rotated out ^ +// +// Since the most significant bit doubles as a sign bit for the similarly sized +// signed integer type, the CPU has an efficient instruction for inspecting it, +// meaning we can differentiate between an inline repr and a heap allocated repr +// in one instruction. Effectively an inline repr always looks like a positive +// i64 while a heap allocated repr always looks like a negative i64. +// +// For the inline repr, we store \0 padding on the end of the stored characters, +// and thus the string length is readily determined efficiently by a cttz (count +// trailing zeros) or bsf (bit scan forward) instruction. +// +// For the heap allocated repr, the length is encoded as a base-128 varint at +// the head of the allocation. +// +// Empty strings are stored as an all-1 bit pattern, corresponding to -1i64. +// Consequently the all-0 bit pattern is never a legal representation in any +// repr, leaving it available as a niche for downstream code. For example this +// allows size_of::() == size_of::>(). + +use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout}; +use core::mem; +use core::num::{NonZeroU64, NonZeroUsize}; +use core::ptr::{self, NonNull}; +use core::slice; +use core::str; + +const PTR_BYTES: usize = mem::size_of::>(); + +// If pointers are already 8 bytes or bigger, then 0. If pointers are smaller +// than 8 bytes, then Identifier will contain a byte array to raise its size up +// to 8 bytes total. +const TAIL_BYTES: usize = 8 * (PTR_BYTES < 8) as usize - PTR_BYTES * (PTR_BYTES < 8) as usize; + +#[repr(C, align(8))] +pub(crate) struct Identifier { + head: NonNull, + tail: [u8; TAIL_BYTES], +} + +impl Identifier { + pub(crate) const fn empty() -> Self { + // This is a separate constant because unsafe function calls are not + // allowed in a const fn body, only in a const, until later rustc than + // what we support. + const HEAD: NonNull = unsafe { NonNull::new_unchecked(!0 as *mut u8) }; + + // `mov rax, -1` + Identifier { + head: HEAD, + tail: [!0; TAIL_BYTES], + } + } + + // SAFETY: string must be ASCII and not contain \0 bytes. + pub(crate) unsafe fn new_unchecked(string: &str) -> Self { + let len = string.len(); + debug_assert!(len <= isize::MAX as usize); + match len as u64 { + 0 => Self::empty(), + 1..=8 => { + let mut bytes = [0u8; mem::size_of::()]; + // SAFETY: string is big enough to read len bytes, bytes is big + // enough to write len bytes, and they do not overlap. + unsafe { ptr::copy_nonoverlapping(string.as_ptr(), bytes.as_mut_ptr(), len) }; + // SAFETY: the head field is nonzero because the input string + // was at least 1 byte of ASCII and did not contain \0. + unsafe { mem::transmute::<[u8; mem::size_of::()], Identifier>(bytes) } + } + 9..=0xff_ffff_ffff_ffff => { + // SAFETY: len is in a range that does not contain 0. + let size = bytes_for_varint(unsafe { NonZeroUsize::new_unchecked(len) }) + len; + let align = 2; + // On 32-bit and 16-bit architecture, check for size overflowing + // isize::MAX. Making an allocation request bigger than this to + // the allocator is considered UB. All allocations (including + // static ones) are limited to isize::MAX so we're guaranteed + // len <= isize::MAX, and we know bytes_for_varint(len) <= 5 + // because 128**5 > isize::MAX, which means the only problem + // that can arise is when isize::MAX - 5 <= len <= isize::MAX. + // This is pretty much guaranteed to be malicious input so we + // don't need to care about returning a good error message. + if mem::size_of::() < 8 { + let max_alloc = usize::MAX / 2 - align; + assert!(size <= max_alloc); + } + // SAFETY: align is not zero, align is a power of two, and + // rounding size up to align does not overflow isize::MAX. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + // SAFETY: layout's size is nonzero. + let ptr = unsafe { alloc(layout) }; + if ptr.is_null() { + handle_alloc_error(layout); + } + let mut write = ptr; + let mut varint_remaining = len; + while varint_remaining > 0 { + // SAFETY: size is bytes_for_varint(len) bytes + len bytes. + // This is writing the first bytes_for_varint(len) bytes. + unsafe { ptr::write(write, varint_remaining as u8 | 0x80) }; + varint_remaining >>= 7; + // SAFETY: still in bounds of the same allocation. + write = unsafe { write.add(1) }; + } + // SAFETY: size is bytes_for_varint(len) bytes + len bytes. This + // is writing to the last len bytes. + unsafe { ptr::copy_nonoverlapping(string.as_ptr(), write, len) }; + Identifier { + head: ptr_to_repr(ptr), + tail: [0; TAIL_BYTES], + } + } + 0x100_0000_0000_0000..=0xffff_ffff_ffff_ffff => { + unreachable!("please refrain from storing >64 petabytes of text in semver version"); + } + } + } + + pub(crate) fn is_empty(&self) -> bool { + // `cmp rdi, -1` -- basically: `repr as i64 == -1` + let empty = Self::empty(); + let is_empty = self.head == empty.head && self.tail == empty.tail; + // The empty representation does nothing on Drop. We can't let this one + // drop normally because `impl Drop for Identifier` calls is_empty; that + // would be an infinite recursion. + mem::forget(empty); + is_empty + } + + fn is_inline(&self) -> bool { + // `test rdi, rdi` -- basically: `repr as i64 >= 0` + self.head.as_ptr() as usize >> (PTR_BYTES * 8 - 1) == 0 + } + + fn is_empty_or_inline(&self) -> bool { + // `cmp rdi, -2` -- basically: `repr as i64 > -2` + self.is_empty() || self.is_inline() + } + + pub(crate) fn as_str(&self) -> &str { + if self.is_empty() { + "" + } else if self.is_inline() { + // SAFETY: repr is in the inline representation. + unsafe { inline_as_str(self) } + } else { + // SAFETY: repr is in the heap allocated representation. + unsafe { ptr_as_str(&self.head) } + } + } + + pub(crate) fn ptr_eq(&self, rhs: &Self) -> bool { + self.head == rhs.head && self.tail == rhs.tail + } +} + +impl Clone for Identifier { + fn clone(&self) -> Self { + if self.is_empty_or_inline() { + Identifier { + head: self.head, + tail: self.tail, + } + } else { + let ptr = repr_to_ptr(self.head); + // SAFETY: ptr is one of our own heap allocations. + let len = unsafe { decode_len(ptr) }; + let size = bytes_for_varint(len) + len.get(); + let align = 2; + // SAFETY: align is not zero, align is a power of two, and rounding + // size up to align does not overflow isize::MAX. This is just + // duplicating a previous allocation where all of these guarantees + // were already made. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + // SAFETY: layout's size is nonzero. + let clone = unsafe { alloc(layout) }; + if clone.is_null() { + handle_alloc_error(layout); + } + // SAFETY: new allocation cannot overlap the previous one (this was + // not a realloc). The argument ptrs are readable/writeable + // respectively for size bytes. + unsafe { ptr::copy_nonoverlapping(ptr, clone, size) } + Identifier { + head: ptr_to_repr(clone), + tail: [0; TAIL_BYTES], + } + } + } +} + +impl Drop for Identifier { + fn drop(&mut self) { + if self.is_empty_or_inline() { + return; + } + let ptr = repr_to_ptr_mut(self.head); + // SAFETY: ptr is one of our own heap allocations. + let len = unsafe { decode_len(ptr) }; + let size = bytes_for_varint(len) + len.get(); + let align = 2; + // SAFETY: align is not zero, align is a power of two, and rounding + // size up to align does not overflow isize::MAX. These guarantees were + // made when originally allocating this memory. + let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; + // SAFETY: ptr was previously allocated by the same allocator with the + // same layout. + unsafe { dealloc(ptr, layout) } + } +} + +impl PartialEq for Identifier { + fn eq(&self, rhs: &Self) -> bool { + if self.ptr_eq(rhs) { + // Fast path (most common) + true + } else if self.is_empty_or_inline() || rhs.is_empty_or_inline() { + false + } else { + // SAFETY: both reprs are in the heap allocated representation. + unsafe { ptr_as_str(&self.head) == ptr_as_str(&rhs.head) } + } + } +} + +unsafe impl Send for Identifier {} +unsafe impl Sync for Identifier {} + +// We use heap pointers that are 2-byte aligned, meaning they have an +// insignificant 0 in the least significant bit. We take advantage of that +// unneeded bit to rotate a 1 into the most significant bit to make the repr +// distinguishable from ASCII bytes. +fn ptr_to_repr(original: *mut u8) -> NonNull { + // `mov eax, 1` + // `shld rax, rdi, 63` + let modified = (original as usize | 1).rotate_right(1); + + // `original + (modified - original)`, but being mindful of provenance. + let diff = modified.wrapping_sub(original as usize); + let modified = original.wrapping_add(diff); + + // SAFETY: the most significant bit of repr is known to be set, so the value + // is not zero. + unsafe { NonNull::new_unchecked(modified) } +} + +// Shift out the 1 previously placed into the most significant bit of the least +// significant byte. Shift in a low 0 bit to reconstruct the original 2-byte +// aligned pointer. +fn repr_to_ptr(modified: NonNull) -> *const u8 { + // `lea rax, [rdi + rdi]` + let modified = modified.as_ptr(); + let original = (modified as usize) << 1; + + // `modified + (original - modified)`, but being mindful of provenance. + let diff = original.wrapping_sub(modified as usize); + modified.wrapping_add(diff) +} + +fn repr_to_ptr_mut(repr: NonNull) -> *mut u8 { + repr_to_ptr(repr) as *mut u8 +} + +// Compute the length of the inline string, assuming the argument is in short +// string representation. Short strings are stored as 1 to 8 nonzero ASCII +// bytes, followed by \0 padding for the remaining bytes. +// +// SAFETY: the identifier must indeed be in the inline representation. +unsafe fn inline_len(repr: &Identifier) -> NonZeroUsize { + // SAFETY: Identifier's layout is align(8) and at least size 8. We're doing + // an aligned read of the first 8 bytes from it. The bytes are not all zero + // because inline strings are at least 1 byte long and cannot contain \0. + let repr = unsafe { ptr::read(repr as *const Identifier as *const NonZeroU64) }; + + #[cfg(target_endian = "little")] + let zero_bits_on_string_end = repr.leading_zeros(); + #[cfg(target_endian = "big")] + let zero_bits_on_string_end = repr.trailing_zeros(); + + let nonzero_bytes = 8 - zero_bits_on_string_end as usize / 8; + + // SAFETY: repr is nonzero, so it has at most 63 zero bits on either end, + // thus at least one nonzero byte. + unsafe { NonZeroUsize::new_unchecked(nonzero_bytes) } +} + +// SAFETY: repr must be in the inline representation, i.e. at least 1 and at +// most 8 nonzero ASCII bytes padded on the end with \0 bytes. +unsafe fn inline_as_str(repr: &Identifier) -> &str { + let ptr = repr as *const Identifier as *const u8; + let len = unsafe { inline_len(repr) }.get(); + // SAFETY: we are viewing the nonzero ASCII prefix of the inline repr's + // contents as a slice of bytes. Input/output lifetimes are correctly + // associated. + let slice = unsafe { slice::from_raw_parts(ptr, len) }; + // SAFETY: the string contents are known to be only ASCII bytes, which are + // always valid UTF-8. + unsafe { str::from_utf8_unchecked(slice) } +} + +// Decode varint. Varints consist of between one and eight base-128 digits, each +// of which is stored in a byte with most significant bit set. Adjacent to the +// varint in memory there is guaranteed to be at least 9 ASCII bytes, each of +// which has an unset most significant bit. +// +// SAFETY: ptr must be one of our own heap allocations, with the varint header +// already written. +unsafe fn decode_len(ptr: *const u8) -> NonZeroUsize { + // SAFETY: There is at least one byte of varint followed by at least 9 bytes + // of string content, which is at least 10 bytes total for the allocation, + // so reading the first two is no problem. + let [first, second] = unsafe { ptr::read(ptr as *const [u8; 2]) }; + if second < 0x80 { + // SAFETY: the length of this heap allocated string has been encoded as + // one base-128 digit, so the length is at least 9 and at most 127. It + // cannot be zero. + unsafe { NonZeroUsize::new_unchecked((first & 0x7f) as usize) } + } else { + return unsafe { decode_len_cold(ptr) }; + + // Identifiers 128 bytes or longer. This is not exercised by any crate + // version currently published to crates.io. + #[cold] + #[inline(never)] + unsafe fn decode_len_cold(mut ptr: *const u8) -> NonZeroUsize { + let mut len = 0; + let mut shift = 0; + loop { + // SAFETY: varint continues while there are bytes having the + // most significant bit set, i.e. until we start hitting the + // ASCII string content with msb unset. + let byte = unsafe { *ptr }; + if byte < 0x80 { + // SAFETY: the string length is known to be 128 bytes or + // longer. + return unsafe { NonZeroUsize::new_unchecked(len) }; + } + // SAFETY: still in bounds of the same allocation. + ptr = unsafe { ptr.add(1) }; + len += ((byte & 0x7f) as usize) << shift; + shift += 7; + } + } + } +} + +// SAFETY: repr must be in the heap allocated representation, with varint header +// and string contents already written. +unsafe fn ptr_as_str(repr: &NonNull) -> &str { + let ptr = repr_to_ptr(*repr); + let len = unsafe { decode_len(ptr) }; + let header = bytes_for_varint(len); + let slice = unsafe { slice::from_raw_parts(ptr.add(header), len.get()) }; + // SAFETY: all identifier contents are ASCII bytes, which are always valid + // UTF-8. + unsafe { str::from_utf8_unchecked(slice) } +} + +// Number of base-128 digits required for the varint representation of a length. +fn bytes_for_varint(len: NonZeroUsize) -> usize { + let usize_bits = mem::size_of::() * 8; + let len_bits = usize_bits - len.leading_zeros() as usize; + (len_bits + 6) / 7 +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/impls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..cdf93c9df09b54d2ea7f974c14a3467576d6f6d0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/impls.rs @@ -0,0 +1,163 @@ +use crate::identifier::Identifier; +use crate::{BuildMetadata, Comparator, Prerelease, VersionReq}; +use alloc::vec::Vec; +use core::cmp::Ordering; +use core::hash::{Hash, Hasher}; +use core::iter::FromIterator; +use core::ops::Deref; + +impl Default for Identifier { + fn default() -> Self { + Identifier::empty() + } +} + +impl Eq for Identifier {} + +impl Hash for Identifier { + fn hash(&self, hasher: &mut H) { + self.as_str().hash(hasher); + } +} + +impl Deref for Prerelease { + type Target = str; + + fn deref(&self) -> &Self::Target { + self.identifier.as_str() + } +} + +impl Deref for BuildMetadata { + type Target = str; + + fn deref(&self) -> &Self::Target { + self.identifier.as_str() + } +} + +impl PartialOrd for Prerelease { + fn partial_cmp(&self, rhs: &Self) -> Option { + Some(self.cmp(rhs)) + } +} + +impl PartialOrd for BuildMetadata { + fn partial_cmp(&self, rhs: &Self) -> Option { + Some(self.cmp(rhs)) + } +} + +impl Ord for Prerelease { + fn cmp(&self, rhs: &Self) -> Ordering { + if self.identifier.ptr_eq(&rhs.identifier) { + return Ordering::Equal; + } + + match self.is_empty() { + // A real release compares greater than prerelease. + true => return Ordering::Greater, + // Prerelease compares less than the real release. + false if rhs.is_empty() => return Ordering::Less, + false => {} + } + + let lhs = self.as_str().split('.'); + let mut rhs = rhs.as_str().split('.'); + + for lhs in lhs { + let rhs = match rhs.next() { + // Spec: "A larger set of pre-release fields has a higher + // precedence than a smaller set, if all of the preceding + // identifiers are equal." + None => return Ordering::Greater, + Some(rhs) => rhs, + }; + + let string_cmp = || Ord::cmp(lhs, rhs); + let is_ascii_digit = |b: u8| b.is_ascii_digit(); + let ordering = match ( + lhs.bytes().all(is_ascii_digit), + rhs.bytes().all(is_ascii_digit), + ) { + // Respect numeric ordering, for example 99 < 100. Spec says: + // "Identifiers consisting of only digits are compared + // numerically." + (true, true) => Ord::cmp(&lhs.len(), &rhs.len()).then_with(string_cmp), + // Spec: "Numeric identifiers always have lower precedence than + // non-numeric identifiers." + (true, false) => return Ordering::Less, + (false, true) => return Ordering::Greater, + // Spec: "Identifiers with letters or hyphens are compared + // lexically in ASCII sort order." + (false, false) => string_cmp(), + }; + + if ordering != Ordering::Equal { + return ordering; + } + } + + if rhs.next().is_none() { + Ordering::Equal + } else { + Ordering::Less + } + } +} + +impl Ord for BuildMetadata { + fn cmp(&self, rhs: &Self) -> Ordering { + if self.identifier.ptr_eq(&rhs.identifier) { + return Ordering::Equal; + } + + let lhs = self.as_str().split('.'); + let mut rhs = rhs.as_str().split('.'); + + for lhs in lhs { + let rhs = match rhs.next() { + None => return Ordering::Greater, + Some(rhs) => rhs, + }; + + let is_ascii_digit = |b: u8| b.is_ascii_digit(); + let ordering = match ( + lhs.bytes().all(is_ascii_digit), + rhs.bytes().all(is_ascii_digit), + ) { + (true, true) => { + // 0 < 00 < 1 < 01 < 001 < 2 < 02 < 002 < 10 + let lhval = lhs.trim_start_matches('0'); + let rhval = rhs.trim_start_matches('0'); + Ord::cmp(&lhval.len(), &rhval.len()) + .then_with(|| Ord::cmp(lhval, rhval)) + .then_with(|| Ord::cmp(&lhs.len(), &rhs.len())) + } + (true, false) => return Ordering::Less, + (false, true) => return Ordering::Greater, + (false, false) => Ord::cmp(lhs, rhs), + }; + + if ordering != Ordering::Equal { + return ordering; + } + } + + if rhs.next().is_none() { + Ordering::Equal + } else { + Ordering::Less + } + } +} + +impl FromIterator for VersionReq { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let comparators = Vec::from_iter(iter); + VersionReq { comparators } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a3605628303df5f9d06cb6f1acc0df0c50cf809 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/lib.rs @@ -0,0 +1,570 @@ +//! [![github]](https://github.com/dtolnay/semver) [![crates-io]](https://crates.io/crates/semver) [![docs-rs]](https://docs.rs/semver) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! A parser and evaluator for Cargo's flavor of Semantic Versioning. +//! +//! Semantic Versioning (see ) is a guideline for how +//! version numbers are assigned and incremented. It is widely followed within +//! the Cargo/crates.io ecosystem for Rust. +//! +//!
+//! +//! # Example +//! +//! ``` +//! use semver::{BuildMetadata, Prerelease, Version, VersionReq}; +//! +//! fn main() { +//! let req = VersionReq::parse(">=1.2.3, <1.8.0").unwrap(); +//! +//! // Check whether this requirement matches version 1.2.3-alpha.1 (no) +//! let version = Version { +//! major: 1, +//! minor: 2, +//! patch: 3, +//! pre: Prerelease::new("alpha.1").unwrap(), +//! build: BuildMetadata::EMPTY, +//! }; +//! assert!(!req.matches(&version)); +//! +//! // Check whether it matches 1.3.0 (yes it does) +//! let version = Version::parse("1.3.0").unwrap(); +//! assert!(req.matches(&version)); +//! } +//! ``` +//! +//!

+//! +//! # Scope of this crate +//! +//! Besides Cargo, several other package ecosystems and package managers for +//! other languages also use SemVer: RubyGems/Bundler for Ruby, npm for +//! JavaScript, Composer for PHP, CocoaPods for Objective-C... +//! +//! The `semver` crate is specifically intended to implement Cargo's +//! interpretation of Semantic Versioning. +//! +//! Where the various tools differ in their interpretation or implementation of +//! the spec, this crate follows the implementation choices made by Cargo. If +//! you are operating on version numbers from some other package ecosystem, you +//! will want to use a different semver library which is appropriate to that +//! ecosystem. +//! +//! The extent of Cargo's SemVer support is documented in the *[Specifying +//! Dependencies]* chapter of the Cargo reference. +//! +//! [Specifying Dependencies]: https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html + +#![doc(html_root_url = "https://docs.rs/semver/1.0.27")] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(unsafe_op_in_unsafe_fn)] +#![allow( + clippy::cast_lossless, + clippy::cast_possible_truncation, + clippy::checked_conversions, + clippy::doc_markdown, + clippy::incompatible_msrv, + clippy::items_after_statements, + clippy::manual_map, + clippy::manual_range_contains, + clippy::match_bool, + clippy::missing_errors_doc, + clippy::must_use_candidate, + clippy::needless_doctest_main, + clippy::ptr_as_ptr, + clippy::redundant_else, + clippy::semicolon_if_nothing_returned, // https://github.com/rust-lang/rust-clippy/issues/7324 + clippy::similar_names, + clippy::uninlined_format_args, + clippy::unnested_or_patterns, + clippy::unseparated_literal_suffix, + clippy::wildcard_imports +)] + +extern crate alloc; + +mod display; +mod error; +mod eval; +mod identifier; +mod impls; +mod parse; + +#[cfg(feature = "serde")] +mod serde; + +use crate::identifier::Identifier; +use alloc::vec::Vec; +use core::cmp::Ordering; +use core::str::FromStr; + +pub use crate::parse::Error; + +/// **SemVer version** as defined by . +/// +/// # Syntax +/// +/// - The major, minor, and patch numbers may be any integer 0 through u64::MAX. +/// When representing a SemVer version as a string, each number is written as +/// a base 10 integer. For example, `1.0.119`. +/// +/// - Leading zeros are forbidden in those positions. For example `1.01.00` is +/// invalid as a SemVer version. +/// +/// - The pre-release identifier, if present, must conform to the syntax +/// documented for [`Prerelease`]. +/// +/// - The build metadata, if present, must conform to the syntax documented for +/// [`BuildMetadata`]. +/// +/// - Whitespace is not allowed anywhere in the version. +/// +/// # Total ordering +/// +/// Given any two SemVer versions, one is less than, greater than, or equal to +/// the other. Versions may be compared against one another using Rust's usual +/// comparison operators. +/// +/// - The major, minor, and patch number are compared numerically from left to +/// right, lexicographically ordered as a 3-tuple of integers. So for example +/// version `1.5.0` is less than version `1.19.0`, despite the fact that +/// "1.19.0" < "1.5.0" as ASCIIbetically compared strings and 1.19 < 1.5 +/// as real numbers. +/// +/// - When major, minor, and patch are equal, a pre-release version is +/// considered less than the ordinary release: version `1.0.0-alpha.1` is +/// less than version `1.0.0`. +/// +/// - Two pre-releases of the same major, minor, patch are compared by +/// lexicographic ordering of dot-separated components of the pre-release +/// string. +/// +/// - Identifiers consisting of only digits are compared +/// numerically: `1.0.0-pre.8` is less than `1.0.0-pre.12`. +/// +/// - Identifiers that contain a letter or hyphen are compared in ASCII sort +/// order: `1.0.0-pre12` is less than `1.0.0-pre8`. +/// +/// - Any numeric identifier is always less than any non-numeric +/// identifier: `1.0.0-pre.1` is less than `1.0.0-pre.x`. +/// +/// Example: `1.0.0-alpha` < `1.0.0-alpha.1` < `1.0.0-alpha.beta` < `1.0.0-beta` < `1.0.0-beta.2` < `1.0.0-beta.11` < `1.0.0-rc.1` < `1.0.0` +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Version { + pub major: u64, + pub minor: u64, + pub patch: u64, + pub pre: Prerelease, + pub build: BuildMetadata, +} + +/// **SemVer version requirement** describing the intersection of some version +/// comparators, such as `>=1.2.3, <1.8`. +/// +/// # Syntax +/// +/// - Either `*` (meaning "any"), or one or more comma-separated comparators. +/// +/// - A [`Comparator`] is an operator ([`Op`]) and a partial version, separated +/// by optional whitespace. For example `>=1.0.0` or `>=1.0`. +/// +/// - Build metadata is syntactically permitted on the partial versions, but is +/// completely ignored, as it's never relevant to whether any comparator +/// matches a particular version. +/// +/// - Whitespace is permitted around commas and around operators. Whitespace is +/// not permitted within a partial version, i.e. anywhere between the major +/// version number and its minor, patch, pre-release, or build metadata. +#[derive(Clone, Eq, PartialEq, Hash, Debug)] +pub struct VersionReq { + pub comparators: Vec, +} + +/// A pair of comparison operator and partial version, such as `>=1.2`. Forms +/// one piece of a VersionReq. +#[derive(Clone, Eq, PartialEq, Hash, Debug)] +pub struct Comparator { + pub op: Op, + pub major: u64, + pub minor: Option, + /// Patch is only allowed if minor is Some. + pub patch: Option, + /// Non-empty pre-release is only allowed if patch is Some. + pub pre: Prerelease, +} + +/// SemVer comparison operator: `=`, `>`, `>=`, `<`, `<=`, `~`, `^`, `*`. +/// +/// # Op::Exact +/// -  **`=I.J.K`** — exactly the version I.J.K +/// -  **`=I.J`** — equivalent to `>=I.J.0, =I.0.0, <(I+1).0.0` +/// +/// # Op::Greater +/// -  **`>I.J.K`** +/// -  **`>I.J`** — equivalent to `>=I.(J+1).0` +/// -  **`>I`** — equivalent to `>=(I+1).0.0` +/// +/// # Op::GreaterEq +/// -  **`>=I.J.K`** +/// -  **`>=I.J`** — equivalent to `>=I.J.0` +/// -  **`>=I`** — equivalent to `>=I.0.0` +/// +/// # Op::Less +/// -  **`=I.J.K, 0) — equivalent to `>=I.J.K, <(I+1).0.0` +/// -  **`^0.J.K`** (for J\>0) — equivalent to `>=0.J.K, <0.(J+1).0` +/// -  **`^0.0.K`** — equivalent to `=0.0.K` +/// -  **`^I.J`** (for I\>0 or J\>0) — equivalent to `^I.J.0` +/// -  **`^0.0`** — equivalent to `=0.0` +/// -  **`^I`** — equivalent to `=I` +/// +/// # Op::Wildcard +/// -  **`I.J.*`** — equivalent to `=I.J` +/// -  **`I.*`** or **`I.*.*`** — equivalent to `=I` +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +#[non_exhaustive] +pub enum Op { + Exact, + Greater, + GreaterEq, + Less, + LessEq, + Tilde, + Caret, + Wildcard, +} + +/// Optional pre-release identifier on a version string. This comes after `-` in +/// a SemVer version, like `1.0.0-alpha.1` +/// +/// # Examples +/// +/// Some real world pre-release idioms drawn from crates.io: +/// +/// - **[mio]** 0.7.0-alpha.1 — the most common style +/// for numbering pre-releases. +/// +/// - **[pest]** 1.0.0-beta.8, 1.0.0-rc.0 +/// — this crate makes a distinction between betas and release +/// candidates. +/// +/// - **[sassers]** 0.11.0-shitshow — ???. +/// +/// - **[atomic-utils]** 0.0.0-reserved — a squatted +/// crate name. +/// +/// [mio]: https://crates.io/crates/mio +/// [pest]: https://crates.io/crates/pest +/// [atomic-utils]: https://crates.io/crates/atomic-utils +/// [sassers]: https://crates.io/crates/sassers +/// +/// *Tip:* Be aware that if you are planning to number your own pre-releases, +/// you should prefer to separate the numeric part from any non-numeric +/// identifiers by using a dot in between. That is, prefer pre-releases +/// `alpha.1`, `alpha.2`, etc rather than `alpha1`, `alpha2` etc. The SemVer +/// spec's rule for pre-release precedence has special treatment of numeric +/// components in the pre-release string, but only if there are no non-digit +/// characters in the same dot-separated component. So you'd have `alpha.2` < +/// `alpha.11` as intended, but `alpha11` < `alpha2`. +/// +/// # Syntax +/// +/// Pre-release strings are a series of dot separated identifiers immediately +/// following the patch version. Identifiers must comprise only ASCII +/// alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must not be +/// empty. Numeric identifiers must not include leading zeros. +/// +/// # Total ordering +/// +/// Pre-releases have a total order defined by the SemVer spec. It uses +/// lexicographic ordering of dot-separated components. Identifiers consisting +/// of only digits are compared numerically. Otherwise, identifiers are compared +/// in ASCII sort order. Any numeric identifier is always less than any +/// non-numeric identifier. +/// +/// Example: `alpha` < `alpha.85` < `alpha.90` < `alpha.200` < `alpha.0a` < `alpha.1a0` < `alpha.a` < `beta` +#[derive(Default, Clone, Eq, PartialEq, Hash)] +pub struct Prerelease { + identifier: Identifier, +} + +/// Optional build metadata identifier. This comes after `+` in a SemVer +/// version, as in `0.8.1+zstd.1.5.0`. +/// +/// # Examples +/// +/// Some real world build metadata idioms drawn from crates.io: +/// +/// - **[libgit2-sys]** 0.12.20+1.1.0 — for this +/// crate, the build metadata indicates the version of the C libgit2 library +/// that the Rust crate is built against. +/// +/// - **[mashup]** 0.1.13+deprecated — just the word +/// "deprecated" for a crate that has been superseded by another. Eventually +/// people will take notice of this in Cargo's build output where it lists the +/// crates being compiled. +/// +/// - **[google-bigquery2]** 2.0.4+20210327 — this +/// library is automatically generated from an official API schema, and the +/// build metadata indicates the date on which that schema was last captured. +/// +/// - **[fbthrift-git]** 0.0.6+c7fcc0e — this crate is +/// published from snapshots of a big company monorepo. In monorepo +/// development, there is no concept of versions, and all downstream code is +/// just updated atomically in the same commit that breaking changes to a +/// library are landed. Therefore for crates.io purposes, every published +/// version must be assumed to be incompatible with the previous. The build +/// metadata provides the source control hash of the snapshotted code. +/// +/// [libgit2-sys]: https://crates.io/crates/libgit2-sys +/// [mashup]: https://crates.io/crates/mashup +/// [google-bigquery2]: https://crates.io/crates/google-bigquery2 +/// [fbthrift-git]: https://crates.io/crates/fbthrift-git +/// +/// # Syntax +/// +/// Build metadata is a series of dot separated identifiers immediately +/// following the patch or pre-release version. Identifiers must comprise only +/// ASCII alphanumerics and hyphens: `0-9`, `A-Z`, `a-z`, `-`. Identifiers must +/// not be empty. Leading zeros *are* allowed, unlike any other place in the +/// SemVer grammar. +/// +/// # Total ordering +/// +/// Build metadata is ignored in evaluating `VersionReq`; it plays no role in +/// whether a `Version` matches any one of the comparison operators. +/// +/// However for comparing build metadatas among one another, they do have a +/// total order which is determined by lexicographic ordering of dot-separated +/// components. Identifiers consisting of only digits are compared numerically. +/// Otherwise, identifiers are compared in ASCII sort order. Any numeric +/// identifier is always less than any non-numeric identifier. +/// +/// Example: `demo` < `demo.85` < `demo.90` < `demo.090` < `demo.200` < `demo.1a0` < `demo.a` < `memo` +#[derive(Default, Clone, Eq, PartialEq, Hash)] +pub struct BuildMetadata { + identifier: Identifier, +} + +impl Version { + /// Create `Version` with an empty pre-release and build metadata. + /// + /// Equivalent to: + /// + /// ``` + /// # use semver::{BuildMetadata, Prerelease, Version}; + /// # + /// # const fn new(major: u64, minor: u64, patch: u64) -> Version { + /// Version { + /// major, + /// minor, + /// patch, + /// pre: Prerelease::EMPTY, + /// build: BuildMetadata::EMPTY, + /// } + /// # } + /// ``` + pub const fn new(major: u64, minor: u64, patch: u64) -> Self { + Version { + major, + minor, + patch, + pre: Prerelease::EMPTY, + build: BuildMetadata::EMPTY, + } + } + + /// Create `Version` by parsing from string representation. + /// + /// # Errors + /// + /// Possible reasons for the parse to fail include: + /// + /// - `1.0` — too few numeric components. A SemVer version must have + /// exactly three. If you are looking at something that has fewer than + /// three numbers in it, it's possible it is a `VersionReq` instead (with + /// an implicit default `^` comparison operator). + /// + /// - `1.0.01` — a numeric component has a leading zero. + /// + /// - `1.0.unknown` — unexpected character in one of the components. + /// + /// - `1.0.0-` or `1.0.0+` — the pre-release or build metadata are + /// indicated present but empty. + /// + /// - `1.0.0-alpha_123` — pre-release or build metadata have something + /// outside the allowed characters, which are `0-9`, `A-Z`, `a-z`, `-`, + /// and `.` (dot). + /// + /// - `23456789999999999999.0.0` — overflow of a u64. + pub fn parse(text: &str) -> Result { + Version::from_str(text) + } + + /// Compare the major, minor, patch, and pre-release value of two versions, + /// disregarding build metadata. Versions that differ only in build metadata + /// are considered equal. This comparison is what the SemVer spec refers to + /// as "precedence". + /// + /// # Example + /// + /// ``` + /// use semver::Version; + /// + /// let mut versions = [ + /// "1.20.0+c144a98".parse::().unwrap(), + /// "1.20.0".parse().unwrap(), + /// "1.0.0".parse().unwrap(), + /// "1.0.0-alpha".parse().unwrap(), + /// "1.20.0+bc17664".parse().unwrap(), + /// ]; + /// + /// // This is a stable sort, so it preserves the relative order of equal + /// // elements. The three 1.20.0 versions differ only in build metadata so + /// // they are not reordered relative to one another. + /// versions.sort_by(Version::cmp_precedence); + /// assert_eq!(versions, [ + /// "1.0.0-alpha".parse().unwrap(), + /// "1.0.0".parse().unwrap(), + /// "1.20.0+c144a98".parse().unwrap(), + /// "1.20.0".parse().unwrap(), + /// "1.20.0+bc17664".parse().unwrap(), + /// ]); + /// + /// // Totally order the versions, including comparing the build metadata. + /// versions.sort(); + /// assert_eq!(versions, [ + /// "1.0.0-alpha".parse().unwrap(), + /// "1.0.0".parse().unwrap(), + /// "1.20.0".parse().unwrap(), + /// "1.20.0+bc17664".parse().unwrap(), + /// "1.20.0+c144a98".parse().unwrap(), + /// ]); + /// ``` + pub fn cmp_precedence(&self, other: &Self) -> Ordering { + Ord::cmp( + &(self.major, self.minor, self.patch, &self.pre), + &(other.major, other.minor, other.patch, &other.pre), + ) + } +} + +impl VersionReq { + /// A `VersionReq` with no constraint on the version numbers it matches. + /// Equivalent to `VersionReq::parse("*").unwrap()`. + /// + /// In terms of comparators this is equivalent to `>=0.0.0`. + /// + /// Counterintuitively a `*` VersionReq does not match every possible + /// version number. In particular, in order for *any* `VersionReq` to match + /// a pre-release version, the `VersionReq` must contain at least one + /// `Comparator` that has an explicit major, minor, and patch version + /// identical to the pre-release being matched, and that has a nonempty + /// pre-release component. Since `*` is not written with an explicit major, + /// minor, and patch version, and does not contain a nonempty pre-release + /// component, it does not match any pre-release versions. + pub const STAR: Self = VersionReq { + comparators: Vec::new(), + }; + + /// Create `VersionReq` by parsing from string representation. + /// + /// # Errors + /// + /// Possible reasons for the parse to fail include: + /// + /// - `>a.b` — unexpected characters in the partial version. + /// + /// - `@1.0.0` — unrecognized comparison operator. + /// + /// - `^1.0.0, ` — unexpected end of input. + /// + /// - `>=1.0 <2.0` — missing comma between comparators. + /// + /// - `*.*` — unsupported wildcard syntax. + pub fn parse(text: &str) -> Result { + VersionReq::from_str(text) + } + + /// Evaluate whether the given `Version` satisfies the version requirement + /// described by `self`. + pub fn matches(&self, version: &Version) -> bool { + eval::matches_req(self, version) + } +} + +/// The default VersionReq is the same as [`VersionReq::STAR`]. +impl Default for VersionReq { + fn default() -> Self { + VersionReq::STAR + } +} + +impl Comparator { + pub fn parse(text: &str) -> Result { + Comparator::from_str(text) + } + + pub fn matches(&self, version: &Version) -> bool { + eval::matches_comparator(self, version) + } +} + +impl Prerelease { + pub const EMPTY: Self = Prerelease { + identifier: Identifier::empty(), + }; + + pub fn new(text: &str) -> Result { + Prerelease::from_str(text) + } + + pub fn as_str(&self) -> &str { + self.identifier.as_str() + } + + pub fn is_empty(&self) -> bool { + self.identifier.is_empty() + } +} + +impl BuildMetadata { + pub const EMPTY: Self = BuildMetadata { + identifier: Identifier::empty(), + }; + + pub fn new(text: &str) -> Result { + BuildMetadata::from_str(text) + } + + pub fn as_str(&self) -> &str { + self.identifier.as_str() + } + + pub fn is_empty(&self) -> bool { + self.identifier.is_empty() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/parse.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/parse.rs new file mode 100644 index 0000000000000000000000000000000000000000..12e8840044480d37e1969ff1283c0def3863d0b3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/parse.rs @@ -0,0 +1,404 @@ +use crate::error::{ErrorKind, Position}; +use crate::identifier::Identifier; +use crate::{BuildMetadata, Comparator, Op, Prerelease, Version, VersionReq}; +use alloc::vec::Vec; +use core::str::FromStr; + +/// Error parsing a SemVer version or version requirement. +/// +/// # Example +/// +/// ``` +/// use semver::Version; +/// +/// fn main() { +/// let err = Version::parse("1.q.r").unwrap_err(); +/// +/// // "unexpected character 'q' while parsing minor version number" +/// eprintln!("{}", err); +/// } +/// ``` +pub struct Error { + pub(crate) kind: ErrorKind, +} + +impl FromStr for Version { + type Err = Error; + + fn from_str(text: &str) -> Result { + if text.is_empty() { + return Err(Error::new(ErrorKind::Empty)); + } + + let mut pos = Position::Major; + let (major, text) = numeric_identifier(text, pos)?; + let text = dot(text, pos)?; + + pos = Position::Minor; + let (minor, text) = numeric_identifier(text, pos)?; + let text = dot(text, pos)?; + + pos = Position::Patch; + let (patch, text) = numeric_identifier(text, pos)?; + + if text.is_empty() { + return Ok(Version::new(major, minor, patch)); + } + + let (pre, text) = if let Some(text) = text.strip_prefix('-') { + pos = Position::Pre; + let (pre, text) = prerelease_identifier(text)?; + if pre.is_empty() { + return Err(Error::new(ErrorKind::EmptySegment(pos))); + } + (pre, text) + } else { + (Prerelease::EMPTY, text) + }; + + let (build, text) = if let Some(text) = text.strip_prefix('+') { + pos = Position::Build; + let (build, text) = build_identifier(text)?; + if build.is_empty() { + return Err(Error::new(ErrorKind::EmptySegment(pos))); + } + (build, text) + } else { + (BuildMetadata::EMPTY, text) + }; + + if let Some(unexpected) = text.chars().next() { + return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected))); + } + + Ok(Version { + major, + minor, + patch, + pre, + build, + }) + } +} + +impl FromStr for VersionReq { + type Err = Error; + + fn from_str(text: &str) -> Result { + let text = text.trim_start_matches(' '); + if let Some((ch, text)) = wildcard(text) { + let rest = text.trim_start_matches(' '); + if rest.is_empty() { + return Ok(VersionReq::STAR); + } else if rest.starts_with(',') { + return Err(Error::new(ErrorKind::WildcardNotTheOnlyComparator(ch))); + } else { + return Err(Error::new(ErrorKind::UnexpectedAfterWildcard)); + } + } + + let depth = 0; + let mut comparators = Vec::new(); + let len = version_req(text, &mut comparators, depth)?; + unsafe { comparators.set_len(len) } + Ok(VersionReq { comparators }) + } +} + +impl FromStr for Comparator { + type Err = Error; + + fn from_str(text: &str) -> Result { + let text = text.trim_start_matches(' '); + let (comparator, pos, rest) = comparator(text)?; + if !rest.is_empty() { + let unexpected = rest.chars().next().unwrap(); + return Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected))); + } + Ok(comparator) + } +} + +impl FromStr for Prerelease { + type Err = Error; + + fn from_str(text: &str) -> Result { + let (pre, rest) = prerelease_identifier(text)?; + if !rest.is_empty() { + return Err(Error::new(ErrorKind::IllegalCharacter(Position::Pre))); + } + Ok(pre) + } +} + +impl FromStr for BuildMetadata { + type Err = Error; + + fn from_str(text: &str) -> Result { + let (build, rest) = build_identifier(text)?; + if !rest.is_empty() { + return Err(Error::new(ErrorKind::IllegalCharacter(Position::Build))); + } + Ok(build) + } +} + +impl Error { + fn new(kind: ErrorKind) -> Self { + Error { kind } + } +} + +impl Op { + const DEFAULT: Self = Op::Caret; +} + +fn numeric_identifier(input: &str, pos: Position) -> Result<(u64, &str), Error> { + let mut len = 0; + let mut value = 0u64; + + while let Some(&digit) = input.as_bytes().get(len) { + if digit < b'0' || digit > b'9' { + break; + } + if value == 0 && len > 0 { + return Err(Error::new(ErrorKind::LeadingZero(pos))); + } + match value + .checked_mul(10) + .and_then(|value| value.checked_add((digit - b'0') as u64)) + { + Some(sum) => value = sum, + None => return Err(Error::new(ErrorKind::Overflow(pos))), + } + len += 1; + } + + if len > 0 { + Ok((value, &input[len..])) + } else if let Some(unexpected) = input[len..].chars().next() { + Err(Error::new(ErrorKind::UnexpectedChar(pos, unexpected))) + } else { + Err(Error::new(ErrorKind::UnexpectedEnd(pos))) + } +} + +fn wildcard(input: &str) -> Option<(char, &str)> { + if let Some(rest) = input.strip_prefix('*') { + Some(('*', rest)) + } else if let Some(rest) = input.strip_prefix('x') { + Some(('x', rest)) + } else if let Some(rest) = input.strip_prefix('X') { + Some(('X', rest)) + } else { + None + } +} + +fn dot(input: &str, pos: Position) -> Result<&str, Error> { + if let Some(rest) = input.strip_prefix('.') { + Ok(rest) + } else if let Some(unexpected) = input.chars().next() { + Err(Error::new(ErrorKind::UnexpectedCharAfter(pos, unexpected))) + } else { + Err(Error::new(ErrorKind::UnexpectedEnd(pos))) + } +} + +fn prerelease_identifier(input: &str) -> Result<(Prerelease, &str), Error> { + let (string, rest) = identifier(input, Position::Pre)?; + let identifier = unsafe { Identifier::new_unchecked(string) }; + Ok((Prerelease { identifier }, rest)) +} + +fn build_identifier(input: &str) -> Result<(BuildMetadata, &str), Error> { + let (string, rest) = identifier(input, Position::Build)?; + let identifier = unsafe { Identifier::new_unchecked(string) }; + Ok((BuildMetadata { identifier }, rest)) +} + +fn identifier(input: &str, pos: Position) -> Result<(&str, &str), Error> { + let mut accumulated_len = 0; + let mut segment_len = 0; + let mut segment_has_nondigit = false; + + loop { + match input.as_bytes().get(accumulated_len + segment_len) { + Some(b'A'..=b'Z') | Some(b'a'..=b'z') | Some(b'-') => { + segment_len += 1; + segment_has_nondigit = true; + } + Some(b'0'..=b'9') => { + segment_len += 1; + } + boundary => { + if segment_len == 0 { + if accumulated_len == 0 && boundary != Some(&b'.') { + return Ok(("", input)); + } else { + return Err(Error::new(ErrorKind::EmptySegment(pos))); + } + } + if pos == Position::Pre + && segment_len > 1 + && !segment_has_nondigit + && input[accumulated_len..].starts_with('0') + { + return Err(Error::new(ErrorKind::LeadingZero(pos))); + } + accumulated_len += segment_len; + if boundary == Some(&b'.') { + accumulated_len += 1; + segment_len = 0; + segment_has_nondigit = false; + } else { + return Ok(input.split_at(accumulated_len)); + } + } + } + } +} + +fn op(input: &str) -> (Op, &str) { + let bytes = input.as_bytes(); + if bytes.first() == Some(&b'=') { + (Op::Exact, &input[1..]) + } else if bytes.first() == Some(&b'>') { + if bytes.get(1) == Some(&b'=') { + (Op::GreaterEq, &input[2..]) + } else { + (Op::Greater, &input[1..]) + } + } else if bytes.first() == Some(&b'<') { + if bytes.get(1) == Some(&b'=') { + (Op::LessEq, &input[2..]) + } else { + (Op::Less, &input[1..]) + } + } else if bytes.first() == Some(&b'~') { + (Op::Tilde, &input[1..]) + } else if bytes.first() == Some(&b'^') { + (Op::Caret, &input[1..]) + } else { + (Op::DEFAULT, input) + } +} + +fn comparator(input: &str) -> Result<(Comparator, Position, &str), Error> { + let (mut op, text) = op(input); + let default_op = input.len() == text.len(); + let text = text.trim_start_matches(' '); + + let mut pos = Position::Major; + let (major, text) = numeric_identifier(text, pos)?; + let mut has_wildcard = false; + + let (minor, text) = if let Some(text) = text.strip_prefix('.') { + pos = Position::Minor; + if let Some((_, text)) = wildcard(text) { + has_wildcard = true; + if default_op { + op = Op::Wildcard; + } + (None, text) + } else { + let (minor, text) = numeric_identifier(text, pos)?; + (Some(minor), text) + } + } else { + (None, text) + }; + + let (patch, text) = if let Some(text) = text.strip_prefix('.') { + pos = Position::Patch; + if let Some((_, text)) = wildcard(text) { + if default_op { + op = Op::Wildcard; + } + (None, text) + } else if has_wildcard { + return Err(Error::new(ErrorKind::UnexpectedAfterWildcard)); + } else { + let (patch, text) = numeric_identifier(text, pos)?; + (Some(patch), text) + } + } else { + (None, text) + }; + + let (pre, text) = if patch.is_some() && text.starts_with('-') { + pos = Position::Pre; + let text = &text[1..]; + let (pre, text) = prerelease_identifier(text)?; + if pre.is_empty() { + return Err(Error::new(ErrorKind::EmptySegment(pos))); + } + (pre, text) + } else { + (Prerelease::EMPTY, text) + }; + + let text = if patch.is_some() && text.starts_with('+') { + pos = Position::Build; + let text = &text[1..]; + let (build, text) = build_identifier(text)?; + if build.is_empty() { + return Err(Error::new(ErrorKind::EmptySegment(pos))); + } + text + } else { + text + }; + + let text = text.trim_start_matches(' '); + + let comparator = Comparator { + op, + major, + minor, + patch, + pre, + }; + + Ok((comparator, pos, text)) +} + +fn version_req(input: &str, out: &mut Vec, depth: usize) -> Result { + let (comparator, pos, text) = match comparator(input) { + Ok(success) => success, + Err(mut error) => { + if let Some((ch, mut rest)) = wildcard(input) { + rest = rest.trim_start_matches(' '); + if rest.is_empty() || rest.starts_with(',') { + error.kind = ErrorKind::WildcardNotTheOnlyComparator(ch); + } + } + return Err(error); + } + }; + + if text.is_empty() { + out.reserve_exact(depth + 1); + unsafe { out.as_mut_ptr().add(depth).write(comparator) } + return Ok(depth + 1); + } + + let text = if let Some(text) = text.strip_prefix(',') { + text.trim_start_matches(' ') + } else { + let unexpected = text.chars().next().unwrap(); + return Err(Error::new(ErrorKind::ExpectedCommaFound(pos, unexpected))); + }; + + const MAX_COMPARATORS: usize = 32; + if depth + 1 == MAX_COMPARATORS { + return Err(Error::new(ErrorKind::ExcessiveComparators)); + } + + // Recurse to collect parsed Comparator objects on the stack. We perform a + // single allocation to allocate exactly the right sized Vec only once the + // total number of comparators is known. + let len = version_req(text, out, depth + 1)?; + unsafe { out.as_mut_ptr().add(depth).write(comparator) } + Ok(len) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/serde.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/serde.rs new file mode 100644 index 0000000000000000000000000000000000000000..1fcc7d87f624088da616387da4cc17f791dd79d4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/src/serde.rs @@ -0,0 +1,109 @@ +use crate::{Comparator, Version, VersionReq}; +use core::fmt; +use serde::de::{Deserialize, Deserializer, Error, Visitor}; +use serde::ser::{Serialize, Serializer}; + +impl Serialize for Version { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } +} + +impl Serialize for VersionReq { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } +} + +impl Serialize for Comparator { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } +} + +impl<'de> Deserialize<'de> for Version { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VersionVisitor; + + impl<'de> Visitor<'de> for VersionVisitor { + type Value = Version; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("semver version") + } + + fn visit_str(self, string: &str) -> Result + where + E: Error, + { + string.parse().map_err(Error::custom) + } + } + + deserializer.deserialize_str(VersionVisitor) + } +} + +impl<'de> Deserialize<'de> for VersionReq { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VersionReqVisitor; + + impl<'de> Visitor<'de> for VersionReqVisitor { + type Value = VersionReq; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("semver version") + } + + fn visit_str(self, string: &str) -> Result + where + E: Error, + { + string.parse().map_err(Error::custom) + } + } + + deserializer.deserialize_str(VersionReqVisitor) + } +} + +impl<'de> Deserialize<'de> for Comparator { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct ComparatorVisitor; + + impl<'de> Visitor<'de> for ComparatorVisitor { + type Value = Comparator; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("semver comparator") + } + + fn visit_str(self, string: &str) -> Result + where + E: Error, + { + string.parse().map_err(Error::custom) + } + } + + deserializer.deserialize_str(ComparatorVisitor) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/node/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/node/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e9c897bf2135e404e01a42398f5a764f7e849e61 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/node/mod.rs @@ -0,0 +1,43 @@ +#![cfg(test_node_semver)] + +use semver::Version; +use std::fmt::{self, Display}; +use std::process::Command; + +#[derive(Default, Eq, PartialEq, Hash, Debug)] +pub(super) struct VersionReq(semver::VersionReq); + +impl VersionReq { + pub(super) const STAR: Self = VersionReq(semver::VersionReq::STAR); + + pub(super) fn matches(&self, version: &Version) -> bool { + let out = Command::new("node") + .arg("-e") + .arg(format!( + "console.log(require('semver').satisfies('{}', '{}'))", + version, + self.to_string().replace(',', ""), + )) + .output() + .unwrap(); + if out.stdout == b"true\n" { + true + } else if out.stdout == b"false\n" { + false + } else { + let s = String::from_utf8_lossy(&out.stdout) + String::from_utf8_lossy(&out.stderr); + panic!("unexpected output: {}", s); + } + } +} + +impl Display for VersionReq { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(&self.0, formatter) + } +} + +#[track_caller] +pub(super) fn req(text: &str) -> VersionReq { + VersionReq(crate::util::req(text)) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_autotrait.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_autotrait.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d16689129499500828ff51438da08aadf36d451 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_autotrait.rs @@ -0,0 +1,14 @@ +#![allow(clippy::extra_unused_type_parameters)] + +fn assert_send_sync() {} + +#[test] +fn test() { + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::(); + assert_send_sync::(); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_identifier.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_identifier.rs new file mode 100644 index 0000000000000000000000000000000000000000..40d859654880bc3befd590b9506edeffe5a6f948 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_identifier.rs @@ -0,0 +1,51 @@ +#![allow( + clippy::eq_op, + clippy::needless_pass_by_value, + clippy::toplevel_ref_arg, + clippy::wildcard_imports +)] + +mod util; + +use crate::util::*; +use semver::Prerelease; + +#[test] +fn test_new() { + fn test(identifier: Prerelease, expected: &str) { + assert_eq!(identifier.is_empty(), expected.is_empty()); + assert_eq!(identifier.len(), expected.len()); + assert_eq!(identifier.as_str(), expected); + assert_eq!(identifier, identifier); + assert_eq!(identifier, identifier.clone()); + } + + let ref mut string = String::new(); + let limit = if cfg!(miri) { 40 } else { 280 }; // miri is slow + for _ in 0..limit { + test(prerelease(string), string); + string.push('1'); + } + + if !cfg!(miri) { + let ref string = string.repeat(20000); + test(prerelease(string), string); + } +} + +#[test] +fn test_eq() { + assert_eq!(prerelease("-"), prerelease("-")); + assert_ne!(prerelease("a"), prerelease("aa")); + assert_ne!(prerelease("aa"), prerelease("a")); + assert_ne!(prerelease("aaaaaaaaa"), prerelease("a")); + assert_ne!(prerelease("a"), prerelease("aaaaaaaaa")); + assert_ne!(prerelease("aaaaaaaaa"), prerelease("bbbbbbbbb")); + assert_ne!(build_metadata("1"), build_metadata("001")); +} + +#[test] +fn test_prerelease() { + let err = prerelease_err("1.b\0"); + assert_to_string(err, "unexpected character in pre-release identifier"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version.rs new file mode 100644 index 0000000000000000000000000000000000000000..077ff323c560e8ec0327d29129adba5cfd6b3c95 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version.rs @@ -0,0 +1,251 @@ +#![allow( + clippy::nonminimal_bool, + clippy::too_many_lines, + clippy::uninlined_format_args, + clippy::wildcard_imports +)] + +mod util; + +use crate::util::*; +use semver::{BuildMetadata, Prerelease, Version}; + +#[test] +fn test_parse() { + let err = version_err(""); + assert_to_string(err, "empty string, expected a semver version"); + + let err = version_err(" "); + assert_to_string( + err, + "unexpected character ' ' while parsing major version number", + ); + + let err = version_err("1"); + assert_to_string( + err, + "unexpected end of input while parsing major version number", + ); + + let err = version_err("1.2"); + assert_to_string( + err, + "unexpected end of input while parsing minor version number", + ); + + let err = version_err("1.2.3-"); + assert_to_string(err, "empty identifier segment in pre-release identifier"); + + let err = version_err("a.b.c"); + assert_to_string( + err, + "unexpected character 'a' while parsing major version number", + ); + + let err = version_err("1.2.3 abc"); + assert_to_string(err, "unexpected character ' ' after patch version number"); + + let err = version_err("1.2.3-01"); + assert_to_string(err, "invalid leading zero in pre-release identifier"); + + let err = version_err("1.2.3++"); + assert_to_string(err, "empty identifier segment in build metadata"); + + let err = version_err("07"); + assert_to_string(err, "invalid leading zero in major version number"); + + let err = version_err("111111111111111111111.0.0"); + assert_to_string(err, "value of major version number exceeds u64::MAX"); + + let err = version_err("8\0"); + assert_to_string(err, "unexpected character '\\0' after major version number"); + + let parsed = version("1.2.3"); + let expected = Version::new(1, 2, 3); + assert_eq!(parsed, expected); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: Prerelease::EMPTY, + build: BuildMetadata::EMPTY, + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3-alpha1"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: prerelease("alpha1"), + build: BuildMetadata::EMPTY, + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3+build5"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: Prerelease::EMPTY, + build: build_metadata("build5"), + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3+5build"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: Prerelease::EMPTY, + build: build_metadata("5build"), + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3-alpha1+build5"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: prerelease("alpha1"), + build: build_metadata("build5"), + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3-1.alpha1.9+build5.7.3aedf"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: prerelease("1.alpha1.9"), + build: build_metadata("build5.7.3aedf"), + }; + assert_eq!(parsed, expected); + + let parsed = version("1.2.3-0a.alpha1.9+05build.7.3aedf"); + let expected = Version { + major: 1, + minor: 2, + patch: 3, + pre: prerelease("0a.alpha1.9"), + build: build_metadata("05build.7.3aedf"), + }; + assert_eq!(parsed, expected); + + let parsed = version("0.4.0-beta.1+0851523"); + let expected = Version { + major: 0, + minor: 4, + patch: 0, + pre: prerelease("beta.1"), + build: build_metadata("0851523"), + }; + assert_eq!(parsed, expected); + + // for https://nodejs.org/dist/index.json, where some older npm versions are "1.1.0-beta-10" + let parsed = version("1.1.0-beta-10"); + let expected = Version { + major: 1, + minor: 1, + patch: 0, + pre: prerelease("beta-10"), + build: BuildMetadata::EMPTY, + }; + assert_eq!(parsed, expected); +} + +#[test] +fn test_eq() { + assert_eq!(version("1.2.3"), version("1.2.3")); + assert_eq!(version("1.2.3-alpha1"), version("1.2.3-alpha1")); + assert_eq!(version("1.2.3+build.42"), version("1.2.3+build.42")); + assert_eq!(version("1.2.3-alpha1+42"), version("1.2.3-alpha1+42")); +} + +#[test] +fn test_ne() { + assert_ne!(version("0.0.0"), version("0.0.1")); + assert_ne!(version("0.0.0"), version("0.1.0")); + assert_ne!(version("0.0.0"), version("1.0.0")); + assert_ne!(version("1.2.3-alpha"), version("1.2.3-beta")); + assert_ne!(version("1.2.3+23"), version("1.2.3+42")); +} + +#[test] +fn test_display() { + assert_to_string(version("1.2.3"), "1.2.3"); + assert_to_string(version("1.2.3-alpha1"), "1.2.3-alpha1"); + assert_to_string(version("1.2.3+build.42"), "1.2.3+build.42"); + assert_to_string(version("1.2.3-alpha1+42"), "1.2.3-alpha1+42"); +} + +#[test] +fn test_lt() { + assert!(version("0.0.0") < version("1.2.3-alpha2")); + assert!(version("1.0.0") < version("1.2.3-alpha2")); + assert!(version("1.2.0") < version("1.2.3-alpha2")); + assert!(version("1.2.3-alpha1") < version("1.2.3")); + assert!(version("1.2.3-alpha1") < version("1.2.3-alpha2")); + assert!(!(version("1.2.3-alpha2") < version("1.2.3-alpha2"))); + assert!(version("1.2.3+23") < version("1.2.3+42")); +} + +#[test] +fn test_le() { + assert!(version("0.0.0") <= version("1.2.3-alpha2")); + assert!(version("1.0.0") <= version("1.2.3-alpha2")); + assert!(version("1.2.0") <= version("1.2.3-alpha2")); + assert!(version("1.2.3-alpha1") <= version("1.2.3-alpha2")); + assert!(version("1.2.3-alpha2") <= version("1.2.3-alpha2")); + assert!(version("1.2.3+23") <= version("1.2.3+42")); +} + +#[test] +fn test_gt() { + assert!(version("1.2.3-alpha2") > version("0.0.0")); + assert!(version("1.2.3-alpha2") > version("1.0.0")); + assert!(version("1.2.3-alpha2") > version("1.2.0")); + assert!(version("1.2.3-alpha2") > version("1.2.3-alpha1")); + assert!(version("1.2.3") > version("1.2.3-alpha2")); + assert!(!(version("1.2.3-alpha2") > version("1.2.3-alpha2"))); + assert!(!(version("1.2.3+23") > version("1.2.3+42"))); +} + +#[test] +fn test_ge() { + assert!(version("1.2.3-alpha2") >= version("0.0.0")); + assert!(version("1.2.3-alpha2") >= version("1.0.0")); + assert!(version("1.2.3-alpha2") >= version("1.2.0")); + assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha1")); + assert!(version("1.2.3-alpha2") >= version("1.2.3-alpha2")); + assert!(!(version("1.2.3+23") >= version("1.2.3+42"))); +} + +#[test] +fn test_spec_order() { + let vs = [ + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-alpha.beta", + "1.0.0-beta", + "1.0.0-beta.2", + "1.0.0-beta.11", + "1.0.0-rc.1", + "1.0.0", + ]; + let mut i = 1; + while i < vs.len() { + let a = version(vs[i - 1]); + let b = version(vs[i]); + assert!(a < b, "nope {:?} < {:?}", a, b); + i += 1; + } +} + +#[test] +fn test_align() { + let version = version("1.2.3-rc1"); + assert_eq!("1.2.3-rc1 ", format!("{:20}", version)); + assert_eq!("*****1.2.3-rc1******", format!("{:*^20}", version)); + assert_eq!(" 1.2.3-rc1", format!("{:>20}", version)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version_req.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version_req.rs new file mode 100644 index 0000000000000000000000000000000000000000..a138c80e56dce8afe0df0eab82fd88217d38eaa0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/test_version_req.rs @@ -0,0 +1,480 @@ +#![allow( + clippy::missing_panics_doc, + clippy::shadow_unrelated, + clippy::toplevel_ref_arg, + clippy::uninlined_format_args, + clippy::wildcard_imports +)] + +mod node; +mod util; + +use crate::util::*; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; + +#[cfg(test_node_semver)] +use node::{req, VersionReq}; +#[cfg(not(test_node_semver))] +use semver::VersionReq; + +#[track_caller] +fn assert_match_all(req: &VersionReq, versions: &[&str]) { + for string in versions { + let parsed = version(string); + assert!(req.matches(&parsed), "did not match {}", string); + } +} + +#[track_caller] +fn assert_match_none(req: &VersionReq, versions: &[&str]) { + for string in versions { + let parsed = version(string); + assert!(!req.matches(&parsed), "matched {}", string); + } +} + +#[test] +fn test_basic() { + let ref r = req("1.0.0"); + assert_to_string(r, "^1.0.0"); + assert_match_all(r, &["1.0.0", "1.1.0", "1.0.1"]); + assert_match_none(r, &["0.9.9", "0.10.0", "0.1.0", "1.0.0-pre", "1.0.1-pre"]); +} + +#[test] +fn test_default() { + let ref r = VersionReq::default(); + assert_eq!(r, &VersionReq::STAR); +} + +#[test] +fn test_exact() { + let ref r = req("=1.0.0"); + assert_to_string(r, "=1.0.0"); + assert_match_all(r, &["1.0.0"]); + assert_match_none(r, &["1.0.1", "0.9.9", "0.10.0", "0.1.0", "1.0.0-pre"]); + + let ref r = req("=0.9.0"); + assert_to_string(r, "=0.9.0"); + assert_match_all(r, &["0.9.0"]); + assert_match_none(r, &["0.9.1", "1.9.0", "0.0.9", "0.9.0-pre"]); + + let ref r = req("=0.0.2"); + assert_to_string(r, "=0.0.2"); + assert_match_all(r, &["0.0.2"]); + assert_match_none(r, &["0.0.1", "0.0.3", "0.0.2-pre"]); + + let ref r = req("=0.1.0-beta2.a"); + assert_to_string(r, "=0.1.0-beta2.a"); + assert_match_all(r, &["0.1.0-beta2.a"]); + assert_match_none(r, &["0.9.1", "0.1.0", "0.1.1-beta2.a", "0.1.0-beta2"]); + + let ref r = req("=0.1.0+meta"); + assert_to_string(r, "=0.1.0"); + assert_match_all(r, &["0.1.0", "0.1.0+meta", "0.1.0+any"]); +} + +#[test] +pub fn test_greater_than() { + let ref r = req(">= 1.0.0"); + assert_to_string(r, ">=1.0.0"); + assert_match_all(r, &["1.0.0", "2.0.0"]); + assert_match_none(r, &["0.1.0", "0.0.1", "1.0.0-pre", "2.0.0-pre"]); + + let ref r = req(">= 2.1.0-alpha2"); + assert_to_string(r, ">=2.1.0-alpha2"); + assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha3", "2.1.0", "3.0.0"]); + assert_match_none( + r, + &["2.0.0", "2.1.0-alpha1", "2.0.0-alpha2", "3.0.0-alpha2"], + ); +} + +#[test] +pub fn test_less_than() { + let ref r = req("< 1.0.0"); + assert_to_string(r, "<1.0.0"); + assert_match_all(r, &["0.1.0", "0.0.1"]); + assert_match_none(r, &["1.0.0", "1.0.0-beta", "1.0.1", "0.9.9-alpha"]); + + let ref r = req("<= 2.1.0-alpha2"); + assert_match_all(r, &["2.1.0-alpha2", "2.1.0-alpha1", "2.0.0", "1.0.0"]); + assert_match_none( + r, + &["2.1.0", "2.2.0-alpha1", "2.0.0-alpha2", "1.0.0-alpha2"], + ); + + let ref r = req(">1.0.0-alpha, <1.0.0"); + assert_match_all(r, &["1.0.0-beta"]); + + let ref r = req(">1.0.0-alpha, <1.0"); + assert_match_none(r, &["1.0.0-beta"]); + + let ref r = req(">1.0.0-alpha, <1"); + assert_match_none(r, &["1.0.0-beta"]); +} + +#[test] +pub fn test_multiple() { + let ref r = req("> 0.0.9, <= 2.5.3"); + assert_to_string(r, ">0.0.9, <=2.5.3"); + assert_match_all(r, &["0.0.10", "1.0.0", "2.5.3"]); + assert_match_none(r, &["0.0.8", "2.5.4"]); + + let ref r = req("0.3.0, 0.4.0"); + assert_to_string(r, "^0.3.0, ^0.4.0"); + assert_match_none(r, &["0.0.8", "0.3.0", "0.4.0"]); + + let ref r = req("<= 0.2.0, >= 0.5.0"); + assert_to_string(r, "<=0.2.0, >=0.5.0"); + assert_match_none(r, &["0.0.8", "0.3.0", "0.5.1"]); + + let ref r = req("0.1.0, 0.1.4, 0.1.6"); + assert_to_string(r, "^0.1.0, ^0.1.4, ^0.1.6"); + assert_match_all(r, &["0.1.6", "0.1.9"]); + assert_match_none(r, &["0.1.0", "0.1.4", "0.2.0"]); + + let err = req_err("> 0.1.0,"); + assert_to_string( + err, + "unexpected end of input while parsing major version number", + ); + + let err = req_err("> 0.3.0, ,"); + assert_to_string( + err, + "unexpected character ',' while parsing major version number", + ); + + let ref r = req(">=0.5.1-alpha3, <0.6"); + assert_to_string(r, ">=0.5.1-alpha3, <0.6"); + assert_match_all( + r, + &[ + "0.5.1-alpha3", + "0.5.1-alpha4", + "0.5.1-beta", + "0.5.1", + "0.5.5", + ], + ); + assert_match_none( + r, + &["0.5.1-alpha1", "0.5.2-alpha3", "0.5.5-pre", "0.5.0-pre"], + ); + assert_match_none(r, &["0.6.0", "0.6.0-pre"]); + + // https://github.com/steveklabnik/semver/issues/56 + let err = req_err("1.2.3 - 2.3.4"); + assert_to_string(err, "expected comma after patch version number, found '-'"); + + let err = req_err(">1, >2, >3, >4, >5, >6, >7, >8, >9, >10, >11, >12, >13, >14, >15, >16, >17, >18, >19, >20, >21, >22, >23, >24, >25, >26, >27, >28, >29, >30, >31, >32, >33"); + assert_to_string(err, "excessive number of version comparators"); +} + +#[test] +pub fn test_whitespace_delimited_comparator_sets() { + // https://github.com/steveklabnik/semver/issues/55 + let err = req_err("> 0.0.9 <= 2.5.3"); + assert_to_string(err, "expected comma after patch version number, found '<'"); +} + +#[test] +pub fn test_tilde() { + let ref r = req("~1"); + assert_match_all(r, &["1.0.0", "1.0.1", "1.1.1"]); + assert_match_none(r, &["0.9.1", "2.9.0", "0.0.9"]); + + let ref r = req("~1.2"); + assert_match_all(r, &["1.2.0", "1.2.1"]); + assert_match_none(r, &["1.1.1", "1.3.0", "0.0.9"]); + + let ref r = req("~1.2.2"); + assert_match_all(r, &["1.2.2", "1.2.4"]); + assert_match_none(r, &["1.2.1", "1.9.0", "1.0.9", "2.0.1", "0.1.3"]); + + let ref r = req("~1.2.3-beta.2"); + assert_match_all(r, &["1.2.3", "1.2.4", "1.2.3-beta.2", "1.2.3-beta.4"]); + assert_match_none(r, &["1.3.3", "1.1.4", "1.2.3-beta.1", "1.2.4-beta.2"]); +} + +#[test] +pub fn test_caret() { + let ref r = req("^1"); + assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1", "1.0.1"]); + assert_match_none(r, &["0.9.1", "2.9.0", "0.1.4"]); + assert_match_none(r, &["1.0.0-beta1", "0.1.0-alpha", "1.0.1-pre"]); + + let ref r = req("^1.1"); + assert_match_all(r, &["1.1.2", "1.1.0", "1.2.1"]); + assert_match_none(r, &["0.9.1", "2.9.0", "1.0.1", "0.1.4"]); + + let ref r = req("^1.1.2"); + assert_match_all(r, &["1.1.2", "1.1.4", "1.2.1"]); + assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); + assert_match_none(r, &["1.1.2-alpha1", "1.1.3-alpha1", "2.9.0-alpha1"]); + + let ref r = req("^0.1.2"); + assert_match_all(r, &["0.1.2", "0.1.4"]); + assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1"]); + assert_match_none(r, &["0.1.2-beta", "0.1.3-alpha", "0.2.0-pre"]); + + let ref r = req("^0.5.1-alpha3"); + assert_match_all( + r, + &[ + "0.5.1-alpha3", + "0.5.1-alpha4", + "0.5.1-beta", + "0.5.1", + "0.5.5", + ], + ); + assert_match_none( + r, + &[ + "0.5.1-alpha1", + "0.5.2-alpha3", + "0.5.5-pre", + "0.5.0-pre", + "0.6.0", + ], + ); + + let ref r = req("^0.0.2"); + assert_match_all(r, &["0.0.2"]); + assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.0.1", "0.1.4"]); + + let ref r = req("^0.0"); + assert_match_all(r, &["0.0.2", "0.0.0"]); + assert_match_none(r, &["0.9.1", "2.9.0", "1.1.1", "0.1.4"]); + + let ref r = req("^0"); + assert_match_all(r, &["0.9.1", "0.0.2", "0.0.0"]); + assert_match_none(r, &["2.9.0", "1.1.1"]); + + let ref r = req("^1.4.2-beta.5"); + assert_match_all( + r, + &["1.4.2", "1.4.3", "1.4.2-beta.5", "1.4.2-beta.6", "1.4.2-c"], + ); + assert_match_none( + r, + &[ + "0.9.9", + "2.0.0", + "1.4.2-alpha", + "1.4.2-beta.4", + "1.4.3-beta.5", + ], + ); +} + +#[test] +pub fn test_wildcard() { + let err = req_err(""); + assert_to_string( + err, + "unexpected end of input while parsing major version number", + ); + + let ref r = req("*"); + assert_match_all(r, &["0.9.1", "2.9.0", "0.0.9", "1.0.1", "1.1.1"]); + assert_match_none(r, &["1.0.0-pre"]); + + for s in &["x", "X"] { + assert_eq!(*r, req(s)); + } + + let ref r = req("1.*"); + assert_match_all(r, &["1.2.0", "1.2.1", "1.1.1", "1.3.0"]); + assert_match_none(r, &["0.0.9", "1.2.0-pre"]); + + for s in &["1.x", "1.X", "1.*.*"] { + assert_eq!(*r, req(s)); + } + + let ref r = req("1.2.*"); + assert_match_all(r, &["1.2.0", "1.2.2", "1.2.4"]); + assert_match_none(r, &["1.9.0", "1.0.9", "2.0.1", "0.1.3", "1.2.2-pre"]); + + for s in &["1.2.x", "1.2.X"] { + assert_eq!(*r, req(s)); + } +} + +#[test] +pub fn test_logical_or() { + // https://github.com/steveklabnik/semver/issues/57 + let err = req_err("=1.2.3 || =2.3.4"); + assert_to_string(err, "expected comma after patch version number, found '|'"); + + let err = req_err("1.1 || =1.2.3"); + assert_to_string(err, "expected comma after minor version number, found '|'"); + + let err = req_err("6.* || 8.* || >= 10.*"); + assert_to_string(err, "expected comma after minor version number, found '|'"); +} + +#[test] +pub fn test_any() { + let ref r = VersionReq::STAR; + assert_match_all(r, &["0.0.1", "0.1.0", "1.0.0"]); +} + +#[test] +pub fn test_pre() { + let ref r = req("=2.1.1-really.0"); + assert_match_all(r, &["2.1.1-really.0"]); +} + +#[test] +pub fn test_parse() { + let err = req_err("\0"); + assert_to_string( + err, + "unexpected character '\\0' while parsing major version number", + ); + + let err = req_err(">= >= 0.0.2"); + assert_to_string( + err, + "unexpected character '>' while parsing major version number", + ); + + let err = req_err(">== 0.0.2"); + assert_to_string( + err, + "unexpected character '=' while parsing major version number", + ); + + let err = req_err("a.0.0"); + assert_to_string( + err, + "unexpected character 'a' while parsing major version number", + ); + + let err = req_err("1.0.0-"); + assert_to_string(err, "empty identifier segment in pre-release identifier"); + + let err = req_err(">="); + assert_to_string( + err, + "unexpected end of input while parsing major version number", + ); +} + +#[test] +fn test_comparator_parse() { + let parsed = comparator("1.2.3-alpha"); + assert_to_string(parsed, "^1.2.3-alpha"); + + let parsed = comparator("2.X"); + assert_to_string(parsed, "2.*"); + + let parsed = comparator("2"); + assert_to_string(parsed, "^2"); + + let parsed = comparator("2.x.x"); + assert_to_string(parsed, "2.*"); + + let err = comparator_err("1.2.3-01"); + assert_to_string(err, "invalid leading zero in pre-release identifier"); + + let err = comparator_err("1.2.3+4."); + assert_to_string(err, "empty identifier segment in build metadata"); + + let err = comparator_err(">"); + assert_to_string( + err, + "unexpected end of input while parsing major version number", + ); + + let err = comparator_err("1."); + assert_to_string( + err, + "unexpected end of input while parsing minor version number", + ); + + let err = comparator_err("1.*."); + assert_to_string(err, "unexpected character after wildcard in version req"); + + let err = comparator_err("1.2.3+4ÿ"); + assert_to_string(err, "unexpected character 'ÿ' after build metadata"); +} + +#[test] +fn test_cargo3202() { + let ref r = req("0.*.*"); + assert_to_string(r, "0.*"); + assert_match_all(r, &["0.5.0"]); + + let ref r = req("0.0.*"); + assert_to_string(r, "0.0.*"); +} + +#[test] +fn test_digit_after_wildcard() { + let err = req_err("*.1"); + assert_to_string(err, "unexpected character after wildcard in version req"); + + let err = req_err("1.*.1"); + assert_to_string(err, "unexpected character after wildcard in version req"); + + let err = req_err(">=1.*.1"); + assert_to_string(err, "unexpected character after wildcard in version req"); +} + +#[test] +fn test_eq_hash() { + fn calculate_hash(value: impl Hash) -> u64 { + let mut hasher = DefaultHasher::new(); + value.hash(&mut hasher); + hasher.finish() + } + + assert!(req("^1") == req("^1")); + assert!(calculate_hash(req("^1")) == calculate_hash(req("^1"))); + assert!(req("^1") != req("^2")); +} + +#[test] +fn test_leading_digit_in_pre_and_build() { + for op in &["=", ">", ">=", "<", "<=", "~", "^"] { + // digit then alpha + req(&format!("{} 1.2.3-1a", op)); + req(&format!("{} 1.2.3+1a", op)); + + // digit then alpha (leading zero) + req(&format!("{} 1.2.3-01a", op)); + req(&format!("{} 1.2.3+01", op)); + + // multiple + req(&format!("{} 1.2.3-1+1", op)); + req(&format!("{} 1.2.3-1-1+1-1-1", op)); + req(&format!("{} 1.2.3-1a+1a", op)); + req(&format!("{} 1.2.3-1a-1a+1a-1a-1a", op)); + } +} + +#[test] +fn test_wildcard_and_another() { + let err = req_err("*, 0.20.0-any"); + assert_to_string( + err, + "wildcard req (*) must be the only comparator in the version req", + ); + + let err = req_err("0.20.0-any, *"); + assert_to_string( + err, + "wildcard req (*) must be the only comparator in the version req", + ); + + let err = req_err("0.20.0-any, *, 1.0"); + assert_to_string( + err, + "wildcard req (*) must be the only comparator in the version req", + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/util/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/util/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bc1cd760a4e251bd0c6e6165c756728f5c90259 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/semver-1.0.27/tests/util/mod.rs @@ -0,0 +1,54 @@ +#![allow(dead_code)] + +use semver::{BuildMetadata, Comparator, Error, Prerelease, Version, VersionReq}; +use std::fmt::Display; + +#[track_caller] +pub(super) fn version(text: &str) -> Version { + Version::parse(text).unwrap() +} + +#[track_caller] +pub(super) fn version_err(text: &str) -> Error { + Version::parse(text).unwrap_err() +} + +#[track_caller] +pub(super) fn req(text: &str) -> VersionReq { + VersionReq::parse(text).unwrap() +} + +#[track_caller] +pub(super) fn req_err(text: &str) -> Error { + VersionReq::parse(text).unwrap_err() +} + +#[track_caller] +pub(super) fn comparator(text: &str) -> Comparator { + Comparator::parse(text).unwrap() +} + +#[track_caller] +pub(super) fn comparator_err(text: &str) -> Error { + Comparator::parse(text).unwrap_err() +} + +#[track_caller] +pub(super) fn prerelease(text: &str) -> Prerelease { + Prerelease::new(text).unwrap() +} + +#[track_caller] +pub(super) fn prerelease_err(text: &str) -> Error { + Prerelease::new(text).unwrap_err() +} + +#[track_caller] +pub(super) fn build_metadata(text: &str) -> BuildMetadata { + BuildMetadata::new(text).unwrap() +} + +#[track_caller] +pub(super) fn assert_to_string(value: impl Display, expected: &str) { + assert_eq!(value.to_string(), expected); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..710a3aa568641e755ec35c12cbe93b7d96106328 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "a866b336f14aa57a07f0d0be9f8762746e64ecb4" + }, + "path_in_vcs": "serde" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..5ecd8d01338c31d5d441a2f7c478873fdcfce4a1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.lock @@ -0,0 +1,66 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "serde" +version = "1.0.228" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6ce71c7b6eb6a08361b6876c4f72f3c81c13d5f4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml @@ -0,0 +1,84 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "serde" +version = "1.0.228" +authors = [ + "Erick Tryzelaar ", + "David Tolnay ", +] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A generic serialization/deserialization framework" +homepage = "https://serde.rs" +documentation = "https://docs.rs/serde" +readme = "crates-io.md" +keywords = [ + "serde", + "serialization", + "no_std", +] +categories = [ + "encoding", + "no-std", + "no-std::no-alloc", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/serde-rs/serde" + +[package.metadata.playground] +features = [ + "derive", + "rc", +] + +[package.metadata.docs.rs] +features = [ + "derive", + "rc", + "unstable", +] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + +[features] +alloc = ["serde_core/alloc"] +default = ["std"] +derive = ["serde_derive"] +rc = ["serde_core/rc"] +std = ["serde_core/std"] +unstable = ["serde_core/unstable"] + +[lib] +name = "serde" +path = "src/lib.rs" + +[dependencies.serde_core] +version = "=1.0.228" +features = ["result"] +default-features = false + +[dependencies.serde_derive] +version = "1" +optional = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..0bffa22d4fd4dbce767b7d2c904e940cc02f4c51 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/Cargo.toml.orig @@ -0,0 +1,62 @@ +[package] +name = "serde" +version = "1.0.228" +authors = ["Erick Tryzelaar ", "David Tolnay "] +build = "build.rs" +categories = ["encoding", "no-std", "no-std::no-alloc"] +description = "A generic serialization/deserialization framework" +documentation = "https://docs.rs/serde" +edition = "2021" +homepage = "https://serde.rs" +keywords = ["serde", "serialization", "no_std"] +license = "MIT OR Apache-2.0" +readme = "crates-io.md" +repository = "https://github.com/serde-rs/serde" +rust-version = "1.56" + +[dependencies] +serde_core = { version = "=1.0.228", path = "../serde_core", default-features = false, features = ["result"] } +serde_derive = { version = "1", optional = true, path = "../serde_derive" } + +[package.metadata.playground] +features = ["derive", "rc"] + +[package.metadata.docs.rs] +features = ["derive", "rc", "unstable"] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", +] + + +### FEATURES ################################################################# + +[features] +default = ["std"] + +# Provide derive(Serialize, Deserialize) macros. +derive = ["serde_derive"] + +# Provide impls for common standard library types like Vec and HashMap. +# Requires a dependency on the Rust standard library. +std = ["serde_core/std"] + +# Provide impls for types that require unstable functionality. For tracking and +# discussion of unstable functionality please refer to this issue: +# +# https://github.com/serde-rs/serde/issues/812 +unstable = ["serde_core/unstable"] + +# Provide impls for types in the Rust core allocation and collections library +# including String, Box, Vec, and Cow. This is a subset of std but may +# be enabled without depending on all of std. +alloc = ["serde_core/alloc"] + +# Opt into impls for Rc and Arc. Serializing and deserializing these types +# does not preserve identity and may result in multiple copies of the same data. +# Be sure that this is what you want before enabling this feature. +rc = ["serde_core/rc"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..1b5ec8b78e237b5c3b3d812a7c0a6589d0f7161d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bf83d766272a07980127af4413582d212be10da2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/README.md @@ -0,0 +1,114 @@ +# Serde   [![Build Status]][actions] [![Latest Version]][crates.io] [![serde msrv]][Rust 1.56] [![serde_derive msrv]][Rust 1.61] + +[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/serde/ci.yml?branch=master +[actions]: https://github.com/serde-rs/serde/actions?query=branch%3Amaster +[Latest Version]: https://img.shields.io/crates/v/serde.svg +[crates.io]: https://crates.io/crates/serde +[serde msrv]: https://img.shields.io/crates/msrv/serde.svg?label=serde%20msrv&color=lightgray +[serde_derive msrv]: https://img.shields.io/crates/msrv/serde_derive.svg?label=serde_derive%20msrv&color=lightgray +[Rust 1.56]: https://blog.rust-lang.org/2021/10/21/Rust-1.56.0.html +[Rust 1.61]: https://blog.rust-lang.org/2022/05/19/Rust-1.61.0.html + +**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.** + +--- + +You may be looking for: + +- [An overview of Serde](https://serde.rs) +- [Data formats supported by Serde](https://serde.rs/#data-formats) +- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html) +- [Examples](https://serde.rs/examples.html) +- [API documentation](https://docs.rs/serde) +- [Release notes](https://github.com/serde-rs/serde/releases) + +## Serde in action + +
+ +Click to show Cargo.toml. +Run this code in the playground. + + +```toml +[dependencies] + +# The core APIs, including the Serialize and Deserialize traits. Always +# required when using Serde. The "derive" feature is only required when +# using #[derive(Serialize, Deserialize)] to make Serde work with structs +# and enums defined in your crate. +serde = { version = "1.0", features = ["derive"] } + +# Each data format lives in its own crate; the sample code below uses JSON +# but you may be using a different one. +serde_json = "1.0" +``` + +
+

+ +```rust +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +struct Point { + x: i32, + y: i32, +} + +fn main() { + let point = Point { x: 1, y: 2 }; + + // Convert the Point to a JSON string. + let serialized = serde_json::to_string(&point).unwrap(); + + // Prints serialized = {"x":1,"y":2} + println!("serialized = {}", serialized); + + // Convert the JSON string back to a Point. + let deserialized: Point = serde_json::from_str(&serialized).unwrap(); + + // Prints deserialized = Point { x: 1, y: 2 } + println!("deserialized = {:?}", deserialized); +} +``` + +## Getting help + +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#rust-questions] or [#rust-beginners] channels of the unofficial community +Discord (invite: ), the [#rust-usage] or +[#beginners] channels of the official Rust Project Discord (invite: +), or the [#general][zulip] stream in Zulip. For +asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the +[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust +[Discourse forum][discourse]. It's acceptable to file a support issue in this +repo but they tend not to get as many eyes as any of the above and may get +closed without a response after some time. + +[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513 +[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa0fd5a0d931b8b66c362af27930c7f358af1a7b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/build.rs @@ -0,0 +1,69 @@ +use std::env; +use std::fs; +use std::path::PathBuf; +use std::process::Command; +use std::str; + +const PRIVATE: &str = "\ +#[doc(hidden)] +pub mod __private$$ { + #[doc(hidden)] + pub use crate::private::*; +} +use serde_core::__private$$ as serde_core_private; +"; + +// The rustc-cfg strings below are *not* public API. Please let us know by +// opening a GitHub issue if your build environment requires some way to enable +// these cfgs other than by executing our build script. +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + println!("cargo:rustc-cfg=if_docsrs_then_no_serde_core"); + + let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + let patch_version = env::var("CARGO_PKG_VERSION_PATCH").unwrap(); + let module = PRIVATE.replace("$$", &patch_version); + fs::write(out_dir.join("private.rs"), module).unwrap(); + + let minor = match rustc_minor_version() { + Some(minor) => minor, + None => return, + }; + + if minor >= 77 { + println!("cargo:rustc-check-cfg=cfg(feature, values(\"result\"))"); + println!("cargo:rustc-check-cfg=cfg(if_docsrs_then_no_serde_core)"); + println!("cargo:rustc-check-cfg=cfg(no_core_cstr)"); + println!("cargo:rustc-check-cfg=cfg(no_core_error)"); + println!("cargo:rustc-check-cfg=cfg(no_core_net)"); + println!("cargo:rustc-check-cfg=cfg(no_core_num_saturating)"); + println!("cargo:rustc-check-cfg=cfg(no_diagnostic_namespace)"); + println!("cargo:rustc-check-cfg=cfg(no_serde_derive)"); + println!("cargo:rustc-check-cfg=cfg(no_std_atomic)"); + println!("cargo:rustc-check-cfg=cfg(no_std_atomic64)"); + println!("cargo:rustc-check-cfg=cfg(no_target_has_atomic)"); + } + + // Current minimum supported version of serde_derive crate is Rust 1.61. + if minor < 61 { + println!("cargo:rustc-cfg=no_serde_derive"); + } + + // Support for the `#[diagnostic]` tool attribute namespace + // https://blog.rust-lang.org/2024/05/02/Rust-1.78.0.html#diagnostic-attributes + if minor < 78 { + println!("cargo:rustc-cfg=no_diagnostic_namespace"); + } +} + +fn rustc_minor_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + pieces.next()?.parse().ok() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/crates-io.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/crates-io.md new file mode 100644 index 0000000000000000000000000000000000000000..e6e7d9fb6d6e10d5ae6ba139af2c518c01d0e75f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/crates-io.md @@ -0,0 +1,65 @@ + + +**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.** + +--- + +You may be looking for: + +- [An overview of Serde](https://serde.rs) +- [Data formats supported by Serde](https://serde.rs/#data-formats) +- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/derive.html) +- [Examples](https://serde.rs/examples.html) +- [API documentation](https://docs.rs/serde) +- [Release notes](https://github.com/serde-rs/serde/releases) + +## Serde in action + +```rust +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +struct Point { + x: i32, + y: i32, +} + +fn main() { + let point = Point { x: 1, y: 2 }; + + // Convert the Point to a JSON string. + let serialized = serde_json::to_string(&point).unwrap(); + + // Prints serialized = {"x":1,"y":2} + println!("serialized = {}", serialized); + + // Convert the JSON string back to a Point. + let deserialized: Point = serde_json::from_str(&serialized).unwrap(); + + // Prints deserialized = Point { x: 1, y: 2 } + println!("deserialized = {:?}", deserialized); +} +``` + +## Getting help + +Serde is one of the most widely used Rust libraries so any place that Rustaceans +congregate will be able to help you out. For chat, consider trying the +[#rust-questions] or [#rust-beginners] channels of the unofficial community +Discord (invite: ), the [#rust-usage] +or [#beginners] channels of the official Rust Project Discord (invite: +), or the [#general][zulip] stream in Zulip. For +asynchronous, consider the [\[rust\] tag on StackOverflow][stackoverflow], the +[/r/rust] subreddit which has a pinned weekly easy questions post, or the Rust +[Discourse forum][discourse]. It's acceptable to file a support issue in this +repo but they tend not to get as many eyes as any of the above and may get +closed without a response after some time. + +[#rust-questions]: https://discord.com/channels/273534239310479360/274215136414400513 +[#rust-beginners]: https://discord.com/channels/273534239310479360/273541522815713281 +[#rust-usage]: https://discord.com/channels/442252698964721669/443150878111694848 +[#beginners]: https://discord.com/channels/442252698964721669/448238009733742612 +[zulip]: https://rust-lang.zulipchat.com/#narrow/stream/122651-general +[stackoverflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://www.reddit.com/r/rust +[discourse]: https://users.rust-lang.org diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/crate_root.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/crate_root.rs new file mode 100644 index 0000000000000000000000000000000000000000..2cf75a40196a761e6666bd77358ac11e73be4fc5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/crate_root.rs @@ -0,0 +1,171 @@ +macro_rules! crate_root { + () => { + /// A facade around all the types we need from the `std`, `core`, and `alloc` + /// crates. This avoids elaborate import wrangling having to happen in every + /// module. + mod lib { + mod core { + #[cfg(not(feature = "std"))] + pub use core::*; + #[cfg(feature = "std")] + pub use std::*; + } + + pub use self::core::{f32, f64}; + pub use self::core::{iter, num, str}; + + #[cfg(any(feature = "std", feature = "alloc"))] + pub use self::core::{cmp, mem}; + + pub use self::core::cell::{Cell, RefCell}; + pub use self::core::cmp::Reverse; + pub use self::core::fmt::{self, Debug, Display, Write as FmtWrite}; + pub use self::core::marker::PhantomData; + pub use self::core::num::Wrapping; + pub use self::core::ops::{Bound, Range, RangeFrom, RangeInclusive, RangeTo}; + pub use self::core::result; + pub use self::core::time::Duration; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::borrow::{Cow, ToOwned}; + #[cfg(feature = "std")] + pub use std::borrow::{Cow, ToOwned}; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::string::{String, ToString}; + #[cfg(feature = "std")] + pub use std::string::{String, ToString}; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::vec::Vec; + #[cfg(feature = "std")] + pub use std::vec::Vec; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::boxed::Box; + #[cfg(feature = "std")] + pub use std::boxed::Box; + + #[cfg(all(feature = "rc", feature = "alloc", not(feature = "std")))] + pub use alloc::rc::{Rc, Weak as RcWeak}; + #[cfg(all(feature = "rc", feature = "std"))] + pub use std::rc::{Rc, Weak as RcWeak}; + + #[cfg(all(feature = "rc", feature = "alloc", not(feature = "std")))] + pub use alloc::sync::{Arc, Weak as ArcWeak}; + #[cfg(all(feature = "rc", feature = "std"))] + pub use std::sync::{Arc, Weak as ArcWeak}; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::collections::{BTreeMap, BTreeSet, BinaryHeap, LinkedList, VecDeque}; + #[cfg(feature = "std")] + pub use std::collections::{BTreeMap, BTreeSet, BinaryHeap, LinkedList, VecDeque}; + + #[cfg(all(not(no_core_cstr), not(feature = "std")))] + pub use self::core::ffi::CStr; + #[cfg(feature = "std")] + pub use std::ffi::CStr; + + #[cfg(all(not(no_core_cstr), feature = "alloc", not(feature = "std")))] + pub use alloc::ffi::CString; + #[cfg(feature = "std")] + pub use std::ffi::CString; + + #[cfg(all(not(no_core_net), not(feature = "std")))] + pub use self::core::net; + #[cfg(feature = "std")] + pub use std::net; + + #[cfg(feature = "std")] + pub use std::error; + + #[cfg(feature = "std")] + pub use std::collections::{HashMap, HashSet}; + #[cfg(feature = "std")] + pub use std::ffi::{OsStr, OsString}; + #[cfg(feature = "std")] + pub use std::hash::{BuildHasher, Hash}; + #[cfg(feature = "std")] + pub use std::io::Write; + #[cfg(feature = "std")] + pub use std::path::{Path, PathBuf}; + #[cfg(feature = "std")] + pub use std::sync::{Mutex, RwLock}; + #[cfg(feature = "std")] + pub use std::time::{SystemTime, UNIX_EPOCH}; + + #[cfg(all(feature = "std", no_target_has_atomic, not(no_std_atomic)))] + pub use std::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI32, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, + AtomicU8, AtomicUsize, Ordering, + }; + #[cfg(all(feature = "std", no_target_has_atomic, not(no_std_atomic64)))] + pub use std::sync::atomic::{AtomicI64, AtomicU64}; + + #[cfg(all(feature = "std", not(no_target_has_atomic)))] + pub use std::sync::atomic::Ordering; + #[cfg(all(feature = "std", not(no_target_has_atomic), target_has_atomic = "8"))] + pub use std::sync::atomic::{AtomicBool, AtomicI8, AtomicU8}; + #[cfg(all(feature = "std", not(no_target_has_atomic), target_has_atomic = "16"))] + pub use std::sync::atomic::{AtomicI16, AtomicU16}; + #[cfg(all(feature = "std", not(no_target_has_atomic), target_has_atomic = "32"))] + pub use std::sync::atomic::{AtomicI32, AtomicU32}; + #[cfg(all(feature = "std", not(no_target_has_atomic), target_has_atomic = "64"))] + pub use std::sync::atomic::{AtomicI64, AtomicU64}; + #[cfg(all(feature = "std", not(no_target_has_atomic), target_has_atomic = "ptr"))] + pub use std::sync::atomic::{AtomicIsize, AtomicUsize}; + + #[cfg(not(no_core_num_saturating))] + pub use self::core::num::Saturating; + } + + // None of this crate's error handling needs the `From::from` error conversion + // performed implicitly by the `?` operator or the standard library's `try!` + // macro. This simplified macro gives a 5.5% improvement in compile time + // compared to standard `try!`, and 9% improvement compared to `?`. + macro_rules! tri { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(err) => return Err(err), + } + }; + } + + #[cfg_attr(all(docsrs, if_docsrs_then_no_serde_core), path = "core/de/mod.rs")] + pub mod de; + #[cfg_attr(all(docsrs, if_docsrs_then_no_serde_core), path = "core/ser/mod.rs")] + pub mod ser; + + #[cfg_attr(all(docsrs, if_docsrs_then_no_serde_core), path = "core/format.rs")] + mod format; + + #[doc(inline)] + pub use crate::de::{Deserialize, Deserializer}; + #[doc(inline)] + pub use crate::ser::{Serialize, Serializer}; + + // Used by generated code. Not public API. + #[doc(hidden)] + #[cfg_attr( + all(docsrs, if_docsrs_then_no_serde_core), + path = "core/private/mod.rs" + )] + mod private; + + // Used by declarative macro generated code. Not public API. + #[doc(hidden)] + pub mod __private { + #[doc(hidden)] + pub use crate::private::doc; + #[doc(hidden)] + pub use core::result::Result; + } + + include!(concat!(env!("OUT_DIR"), "/private.rs")); + + #[cfg(all(not(feature = "std"), no_core_error))] + #[cfg_attr(all(docsrs, if_docsrs_then_no_serde_core), path = "core/std_error.rs")] + mod std_error; + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/ignored_any.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/ignored_any.rs new file mode 100644 index 0000000000000000000000000000000000000000..2360a1742362b1e0b31e0fd1f00e5246161851eb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/ignored_any.rs @@ -0,0 +1,238 @@ +use crate::lib::*; + +use crate::de::{ + Deserialize, Deserializer, EnumAccess, Error, MapAccess, SeqAccess, VariantAccess, Visitor, +}; + +/// An efficient way of discarding data from a deserializer. +/// +/// Think of this like `serde_json::Value` in that it can be deserialized from +/// any type, except that it does not store any information about the data that +/// gets deserialized. +/// +/// ```edition2021 +/// use serde::de::{ +/// self, Deserialize, DeserializeSeed, Deserializer, IgnoredAny, SeqAccess, Visitor, +/// }; +/// use std::fmt; +/// use std::marker::PhantomData; +/// +/// /// A seed that can be used to deserialize only the `n`th element of a sequence +/// /// while efficiently discarding elements of any type before or after index `n`. +/// /// +/// /// For example to deserialize only the element at index 3: +/// /// +/// /// ``` +/// /// NthElement::new(3).deserialize(deserializer) +/// /// ``` +/// pub struct NthElement { +/// n: usize, +/// marker: PhantomData, +/// } +/// +/// impl NthElement { +/// pub fn new(n: usize) -> Self { +/// NthElement { +/// n: n, +/// marker: PhantomData, +/// } +/// } +/// } +/// +/// impl<'de, T> Visitor<'de> for NthElement +/// where +/// T: Deserialize<'de>, +/// { +/// type Value = T; +/// +/// fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// write!( +/// formatter, +/// "a sequence in which we care about element {}", +/// self.n +/// ) +/// } +/// +/// fn visit_seq(self, mut seq: A) -> Result +/// where +/// A: SeqAccess<'de>, +/// { +/// // Skip over the first `n` elements. +/// for i in 0..self.n { +/// // It is an error if the sequence ends before we get to element `n`. +/// if seq.next_element::()?.is_none() { +/// return Err(de::Error::invalid_length(i, &self)); +/// } +/// } +/// +/// // Deserialize the one we care about. +/// let nth = match seq.next_element()? { +/// Some(nth) => nth, +/// None => { +/// return Err(de::Error::invalid_length(self.n, &self)); +/// } +/// }; +/// +/// // Skip over any remaining elements in the sequence after `n`. +/// while let Some(IgnoredAny) = seq.next_element()? { +/// // ignore +/// } +/// +/// Ok(nth) +/// } +/// } +/// +/// impl<'de, T> DeserializeSeed<'de> for NthElement +/// where +/// T: Deserialize<'de>, +/// { +/// type Value = T; +/// +/// fn deserialize(self, deserializer: D) -> Result +/// where +/// D: Deserializer<'de>, +/// { +/// deserializer.deserialize_seq(self) +/// } +/// } +/// +/// # fn example<'de, D>(deserializer: D) -> Result<(), D::Error> +/// # where +/// # D: Deserializer<'de>, +/// # { +/// // Deserialize only the sequence element at index 3 from this deserializer. +/// // The element at index 3 is required to be a string. Elements before and +/// // after index 3 are allowed to be of any type. +/// let s: String = NthElement::new(3).deserialize(deserializer)?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Copy, Clone, Debug, Default, PartialEq)] +pub struct IgnoredAny; + +impl<'de> Visitor<'de> for IgnoredAny { + type Value = IgnoredAny; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("anything at all") + } + + #[inline] + fn visit_bool(self, x: bool) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_i64(self, x: i64) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_i128(self, x: i128) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_u64(self, x: u64) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_u128(self, x: u128) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_f64(self, x: f64) -> Result { + let _ = x; + Ok(IgnoredAny) + } + + #[inline] + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + let _ = s; + Ok(IgnoredAny) + } + + #[inline] + fn visit_none(self) -> Result { + Ok(IgnoredAny) + } + + #[inline] + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + IgnoredAny::deserialize(deserializer) + } + + #[inline] + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + IgnoredAny::deserialize(deserializer) + } + + #[inline] + fn visit_unit(self) -> Result { + Ok(IgnoredAny) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + while let Some(IgnoredAny) = tri!(seq.next_element()) { + // Gobble + } + Ok(IgnoredAny) + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + while let Some((IgnoredAny, IgnoredAny)) = tri!(map.next_entry()) { + // Gobble + } + Ok(IgnoredAny) + } + + #[inline] + fn visit_bytes(self, bytes: &[u8]) -> Result + where + E: Error, + { + let _ = bytes; + Ok(IgnoredAny) + } + + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + tri!(data.variant::()).1.newtype_variant() + } +} + +impl<'de> Deserialize<'de> for IgnoredAny { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_ignored_any(IgnoredAny) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/impls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab1a893cc4dcde659409c4677a71715cc220e368 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/impls.rs @@ -0,0 +1,3173 @@ +use crate::lib::*; + +use crate::de::{ + Deserialize, Deserializer, EnumAccess, Error, MapAccess, SeqAccess, Unexpected, VariantAccess, + Visitor, +}; +use crate::private::{self, InPlaceSeed}; + +#[cfg(any(feature = "std", feature = "alloc"))] +use crate::private::size_hint; + +//////////////////////////////////////////////////////////////////////////////// + +struct UnitVisitor; + +impl<'de> Visitor<'de> for UnitVisitor { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("unit") + } + + fn visit_unit(self) -> Result + where + E: Error, + { + Ok(()) + } +} + +impl<'de> Deserialize<'de> for () { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_unit(UnitVisitor) + } +} + +#[cfg(feature = "unstable")] +#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))] +impl<'de> Deserialize<'de> for ! { + fn deserialize(_deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Err(Error::custom("cannot deserialize `!`")) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +struct BoolVisitor; + +impl<'de> Visitor<'de> for BoolVisitor { + type Value = bool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean") + } + + fn visit_bool(self, v: bool) -> Result + where + E: Error, + { + Ok(v) + } +} + +impl<'de> Deserialize<'de> for bool { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_bool(BoolVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! impl_deserialize_num { + ($primitive:ident, $nonzero:ident, $deserialize:ident $($method:ident!($($val:ident : $visit:ident)*);)*) => { + impl_deserialize_num!($primitive, $deserialize $($method!($($val : $visit)*);)*); + + impl<'de> Deserialize<'de> for num::$nonzero { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct NonZeroVisitor; + + impl<'de> Visitor<'de> for NonZeroVisitor { + type Value = num::$nonzero; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("a nonzero ", stringify!($primitive))) + } + + $($($method!(nonzero $primitive $val : $visit);)*)* + } + + deserializer.$deserialize(NonZeroVisitor) + } + } + + #[cfg(not(no_core_num_saturating))] + impl<'de> Deserialize<'de> for Saturating<$primitive> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct SaturatingVisitor; + + impl<'de> Visitor<'de> for SaturatingVisitor { + type Value = Saturating<$primitive>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("integer with support for saturating semantics") + } + + $($($method!(saturating $primitive $val : $visit);)*)* + } + + deserializer.$deserialize(SaturatingVisitor) + } + } + }; + + ($primitive:ident, $deserialize:ident $($method:ident!($($val:ident : $visit:ident)*);)*) => { + impl<'de> Deserialize<'de> for $primitive { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct PrimitiveVisitor; + + impl<'de> Visitor<'de> for PrimitiveVisitor { + type Value = $primitive; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(stringify!($primitive)) + } + + $($($method!($val : $visit);)*)* + } + + deserializer.$deserialize(PrimitiveVisitor) + } + } + }; +} + +macro_rules! num_self { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Ok(v) + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if let Some(nonzero) = Self::Value::new(v) { + Ok(nonzero) + } else { + Err(Error::invalid_value(Unexpected::Unsigned(0), &self)) + } + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Ok(Saturating(v)) + } + }; +} + +macro_rules! num_as_self { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Ok(v as Self::Value) + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if let Some(nonzero) = Self::Value::new(v as $primitive) { + Ok(nonzero) + } else { + Err(Error::invalid_value(Unexpected::Unsigned(0), &self)) + } + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Ok(Saturating(v as $primitive)) + } + }; +} + +macro_rules! num_as_copysign_self { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + #[cfg(not(feature = "std"))] + { + Ok(v as Self::Value) + } + + #[cfg(feature = "std")] + { + // Preserve sign of NaN. The `as` produces a nondeterministic sign. + let sign = if v.is_sign_positive() { 1.0 } else { -1.0 }; + Ok((v as Self::Value).copysign(sign)) + } + } + }; +} + +macro_rules! int_to_int { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Self::Value::try_from(v as i64) + .map_err(|_| Error::invalid_value(Unexpected::Signed(v as i64), &self)) + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if let Ok(v) = $primitive::try_from(v as i64) { + if let Some(nonzero) = Self::Value::new(v) { + return Ok(nonzero); + } + } + Err(Error::invalid_value(Unexpected::Signed(v as i64), &self)) + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if (v as i64) < $primitive::MIN as i64 { + Ok(Saturating($primitive::MIN)) + } else if ($primitive::MAX as i64) < v as i64 { + Ok(Saturating($primitive::MAX)) + } else { + Ok(Saturating(v as $primitive)) + } + } + }; +} + +macro_rules! int_to_uint { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if 0 <= v { + #[allow(irrefutable_let_patterns)] + if let Ok(v) = Self::Value::try_from(v as u64) { + return Ok(v as Self::Value); + } + } + Err(Error::invalid_value(Unexpected::Signed(v as i64), &self)) + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if 0 < v { + #[allow(irrefutable_let_patterns)] + if let Ok(v) = $primitive::try_from(v as u64) { + if let Some(nonzero) = Self::Value::new(v) { + return Ok(nonzero); + } + } + } + Err(Error::invalid_value(Unexpected::Signed(v as i64), &self)) + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if v < 0 { + Ok(Saturating(0)) + } else if ($primitive::MAX as u64) < v as u64 { + Ok(Saturating($primitive::MAX)) + } else { + Ok(Saturating(v as $primitive)) + } + } + }; +} + +macro_rules! uint_to_self { + ($ty:ident : $visit:ident) => { + #[inline] + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + Self::Value::try_from(v as u64) + .map_err(|_| Error::invalid_value(Unexpected::Unsigned(v as u64), &self)) + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if let Ok(v) = $primitive::try_from(v as u64) { + if let Some(nonzero) = Self::Value::new(v) { + return Ok(nonzero); + } + } + Err(Error::invalid_value(Unexpected::Unsigned(v as u64), &self)) + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if let Ok(v) = $primitive::try_from(v as u64) { + Ok(Saturating(v as $primitive)) + } else { + Ok(Saturating($primitive::MAX)) + } + } + }; +} + +impl_deserialize_num! { + i8, NonZeroI8, deserialize_i8 + num_self!(i8:visit_i8); + int_to_int!(i16:visit_i16 i32:visit_i32 i64:visit_i64); + uint_to_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + i16, NonZeroI16, deserialize_i16 + num_self!(i16:visit_i16); + num_as_self!(i8:visit_i8); + int_to_int!(i32:visit_i32 i64:visit_i64); + uint_to_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + i32, NonZeroI32, deserialize_i32 + num_self!(i32:visit_i32); + num_as_self!(i8:visit_i8 i16:visit_i16); + int_to_int!(i64:visit_i64); + uint_to_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + i64, NonZeroI64, deserialize_i64 + num_self!(i64:visit_i64); + num_as_self!(i8:visit_i8 i16:visit_i16 i32:visit_i32); + uint_to_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + isize, NonZeroIsize, deserialize_i64 + num_as_self!(i8:visit_i8 i16:visit_i16); + int_to_int!(i32:visit_i32 i64:visit_i64); + uint_to_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + u8, NonZeroU8, deserialize_u8 + num_self!(u8:visit_u8); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + uint_to_self!(u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + u16, NonZeroU16, deserialize_u16 + num_self!(u16:visit_u16); + num_as_self!(u8:visit_u8); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + uint_to_self!(u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + u32, NonZeroU32, deserialize_u32 + num_self!(u32:visit_u32); + num_as_self!(u8:visit_u8 u16:visit_u16); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + uint_to_self!(u64:visit_u64); +} + +impl_deserialize_num! { + u64, NonZeroU64, deserialize_u64 + num_self!(u64:visit_u64); + num_as_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); +} + +impl_deserialize_num! { + usize, NonZeroUsize, deserialize_u64 + num_as_self!(u8:visit_u8 u16:visit_u16); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + uint_to_self!(u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + f32, deserialize_f32 + num_self!(f32:visit_f32); + num_as_copysign_self!(f64:visit_f64); + num_as_self!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + num_as_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +impl_deserialize_num! { + f64, deserialize_f64 + num_self!(f64:visit_f64); + num_as_copysign_self!(f32:visit_f32); + num_as_self!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + num_as_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); +} + +macro_rules! num_128 { + ($ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if v as i128 >= Self::Value::MIN as i128 && v as u128 <= Self::Value::MAX as u128 { + Ok(v as Self::Value) + } else { + Err(Error::invalid_value( + Unexpected::Other(stringify!($ty)), + &self, + )) + } + } + }; + + (nonzero $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if v as i128 >= $primitive::MIN as i128 && v as u128 <= $primitive::MAX as u128 { + if let Some(nonzero) = Self::Value::new(v as $primitive) { + Ok(nonzero) + } else { + Err(Error::invalid_value(Unexpected::Unsigned(0), &self)) + } + } else { + Err(Error::invalid_value( + Unexpected::Other(stringify!($ty)), + &self, + )) + } + } + }; + + (saturating $primitive:ident $ty:ident : $visit:ident) => { + fn $visit(self, v: $ty) -> Result + where + E: Error, + { + if (v as i128) < $primitive::MIN as i128 { + Ok(Saturating($primitive::MIN)) + } else if ($primitive::MAX as u128) < v as u128 { + Ok(Saturating($primitive::MAX)) + } else { + Ok(Saturating(v as $primitive)) + } + } + }; +} + +impl_deserialize_num! { + i128, NonZeroI128, deserialize_i128 + num_self!(i128:visit_i128); + num_as_self!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + num_as_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); + num_128!(u128:visit_u128); +} + +impl_deserialize_num! { + u128, NonZeroU128, deserialize_u128 + num_self!(u128:visit_u128); + num_as_self!(u8:visit_u8 u16:visit_u16 u32:visit_u32 u64:visit_u64); + int_to_uint!(i8:visit_i8 i16:visit_i16 i32:visit_i32 i64:visit_i64); + num_128!(i128:visit_i128); +} + +//////////////////////////////////////////////////////////////////////////////// + +struct CharVisitor; + +impl<'de> Visitor<'de> for CharVisitor { + type Value = char; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a character") + } + + #[inline] + fn visit_char(self, v: char) -> Result + where + E: Error, + { + Ok(v) + } + + #[inline] + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + let mut iter = v.chars(); + match (iter.next(), iter.next()) { + (Some(c), None) => Ok(c), + _ => Err(Error::invalid_value(Unexpected::Str(v), &self)), + } + } +} + +impl<'de> Deserialize<'de> for char { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_char(CharVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +struct StringVisitor; +#[cfg(any(feature = "std", feature = "alloc"))] +struct StringInPlaceVisitor<'a>(&'a mut String); + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de> Visitor<'de> for StringVisitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + Ok(v.to_owned()) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + Ok(v) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + match str::from_utf8(v) { + Ok(s) => Ok(s.to_owned()), + Err(_) => Err(Error::invalid_value(Unexpected::Bytes(v), &self)), + } + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + match String::from_utf8(v) { + Ok(s) => Ok(s), + Err(e) => Err(Error::invalid_value( + Unexpected::Bytes(&e.into_bytes()), + &self, + )), + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, 'de> Visitor<'de> for StringInPlaceVisitor<'a> { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + self.0.clear(); + self.0.push_str(v); + Ok(()) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + *self.0 = v; + Ok(()) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + match str::from_utf8(v) { + Ok(s) => { + self.0.clear(); + self.0.push_str(s); + Ok(()) + } + Err(_) => Err(Error::invalid_value(Unexpected::Bytes(v), &self)), + } + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + match String::from_utf8(v) { + Ok(s) => { + *self.0 = s; + Ok(()) + } + Err(e) => Err(Error::invalid_value( + Unexpected::Bytes(&e.into_bytes()), + &self, + )), + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de> Deserialize<'de> for String { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_string(StringVisitor) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + deserializer.deserialize_string(StringInPlaceVisitor(place)) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +struct StrVisitor; + +impl<'a> Visitor<'a> for StrVisitor { + type Value = &'a str; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a borrowed string") + } + + fn visit_borrowed_str(self, v: &'a str) -> Result + where + E: Error, + { + Ok(v) // so easy + } + + fn visit_borrowed_bytes(self, v: &'a [u8]) -> Result + where + E: Error, + { + str::from_utf8(v).map_err(|_| Error::invalid_value(Unexpected::Bytes(v), &self)) + } +} + +impl<'de: 'a, 'a> Deserialize<'de> for &'a str { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(StrVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +struct BytesVisitor; + +impl<'a> Visitor<'a> for BytesVisitor { + type Value = &'a [u8]; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a borrowed byte array") + } + + fn visit_borrowed_bytes(self, v: &'a [u8]) -> Result + where + E: Error, + { + Ok(v) + } + + fn visit_borrowed_str(self, v: &'a str) -> Result + where + E: Error, + { + Ok(v.as_bytes()) + } +} + +impl<'de: 'a, 'a> Deserialize<'de> for &'a [u8] { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_bytes(BytesVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))] +struct CStringVisitor; + +#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))] +impl<'de> Visitor<'de> for CStringVisitor { + type Value = CString; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("byte array") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let capacity = size_hint::cautious::(seq.size_hint()); + let mut values = Vec::::with_capacity(capacity); + + while let Some(value) = tri!(seq.next_element()) { + values.push(value); + } + + CString::new(values).map_err(Error::custom) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + CString::new(v).map_err(Error::custom) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + CString::new(v).map_err(Error::custom) + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + CString::new(v).map_err(Error::custom) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + CString::new(v).map_err(Error::custom) + } +} + +#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de> Deserialize<'de> for CString { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_byte_buf(CStringVisitor) + } +} + +macro_rules! forwarded_impl { + ( + $(#[$attr:meta])* + ($($id:ident),*), $ty:ty, $func:expr + ) => { + $(#[$attr])* + impl<'de $(, $id : Deserialize<'de>,)*> Deserialize<'de> for $ty { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map($func) + } + } + } +} + +forwarded_impl! { + #[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + (), Box, CString::into_boxed_c_str +} + +forwarded_impl! { + (T), Reverse, Reverse +} + +//////////////////////////////////////////////////////////////////////////////// + +struct OptionVisitor { + marker: PhantomData, +} + +impl<'de, T> Visitor<'de> for OptionVisitor +where + T: Deserialize<'de>, +{ + type Value = Option; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("option") + } + + #[inline] + fn visit_unit(self) -> Result + where + E: Error, + { + Ok(None) + } + + #[inline] + fn visit_none(self) -> Result + where + E: Error, + { + Ok(None) + } + + #[inline] + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::deserialize(deserializer).map(Some) + } + + fn __private_visit_untagged_option(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ok(T::deserialize(deserializer).ok()) + } +} + +impl<'de, T> Deserialize<'de> for Option +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_option(OptionVisitor { + marker: PhantomData, + }) + } + + // The Some variant's repr is opaque, so we can't play cute tricks with its + // tag to have deserialize_in_place build the content in place unconditionally. + // + // FIXME: investigate whether branching on the old value being Some to + // deserialize_in_place the value is profitable (probably data-dependent?) +} + +//////////////////////////////////////////////////////////////////////////////// + +struct PhantomDataVisitor { + marker: PhantomData, +} + +impl<'de, T> Visitor<'de> for PhantomDataVisitor +where + T: ?Sized, +{ + type Value = PhantomData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("unit") + } + + #[inline] + fn visit_unit(self) -> Result + where + E: Error, + { + Ok(PhantomData) + } +} + +impl<'de, T> Deserialize<'de> for PhantomData +where + T: ?Sized, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let visitor = PhantomDataVisitor { + marker: PhantomData, + }; + deserializer.deserialize_unit_struct("PhantomData", visitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! seq_impl { + ( + $(#[$attr:meta])* + $ty:ident , + $access:ident, + $clear:expr, + $with_capacity:expr, + $reserve:expr, + $insert:expr + ) => { + $(#[$attr])* + impl<'de, T $(, $typaram)*> Deserialize<'de> for $ty + where + T: Deserialize<'de> $(+ $tbound1 $(+ $tbound2)*)*, + $($typaram: $bound1 $(+ $bound2)*,)* + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct SeqVisitor { + marker: PhantomData<$ty>, + } + + impl<'de, T $(, $typaram)*> Visitor<'de> for SeqVisitor + where + T: Deserialize<'de> $(+ $tbound1 $(+ $tbound2)*)*, + $($typaram: $bound1 $(+ $bound2)*,)* + { + type Value = $ty; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[inline] + fn visit_seq(self, mut $access: A) -> Result + where + A: SeqAccess<'de>, + { + let mut values = $with_capacity; + + while let Some(value) = tri!($access.next_element()) { + $insert(&mut values, value); + } + + Ok(values) + } + } + + let visitor = SeqVisitor { marker: PhantomData }; + deserializer.deserialize_seq(visitor) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct SeqInPlaceVisitor<'a, T: 'a $(, $typaram: 'a)*>(&'a mut $ty); + + impl<'a, 'de, T $(, $typaram)*> Visitor<'de> for SeqInPlaceVisitor<'a, T $(, $typaram)*> + where + T: Deserialize<'de> $(+ $tbound1 $(+ $tbound2)*)*, + $($typaram: $bound1 $(+ $bound2)*,)* + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[inline] + fn visit_seq(mut self, mut $access: A) -> Result + where + A: SeqAccess<'de>, + { + $clear(&mut self.0); + $reserve(&mut self.0, size_hint::cautious::($access.size_hint())); + + // FIXME: try to overwrite old values here? (Vec, VecDeque, LinkedList) + while let Some(value) = tri!($access.next_element()) { + $insert(&mut self.0, value); + } + + Ok(()) + } + } + + deserializer.deserialize_seq(SeqInPlaceVisitor(place)) + } + } + } +} + +// Dummy impl of reserve +#[cfg(any(feature = "std", feature = "alloc"))] +fn nop_reserve(_seq: T, _n: usize) {} + +seq_impl!( + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BinaryHeap, + seq, + BinaryHeap::clear, + BinaryHeap::with_capacity(size_hint::cautious::(seq.size_hint())), + BinaryHeap::reserve, + BinaryHeap::push +); + +seq_impl!( + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BTreeSet, + seq, + BTreeSet::clear, + BTreeSet::new(), + nop_reserve, + BTreeSet::insert +); + +seq_impl!( + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + LinkedList, + seq, + LinkedList::clear, + LinkedList::new(), + nop_reserve, + LinkedList::push_back +); + +seq_impl!( + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + HashSet, + seq, + HashSet::clear, + HashSet::with_capacity_and_hasher(size_hint::cautious::(seq.size_hint()), S::default()), + HashSet::reserve, + HashSet::insert +); + +seq_impl!( + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + VecDeque, + seq, + VecDeque::clear, + VecDeque::with_capacity(size_hint::cautious::(seq.size_hint())), + VecDeque::reserve, + VecDeque::push_back +); + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, T> Deserialize<'de> for Vec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor { + marker: PhantomData, + } + + impl<'de, T> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let capacity = size_hint::cautious::(seq.size_hint()); + let mut values = Vec::::with_capacity(capacity); + + while let Some(value) = tri!(seq.next_element()) { + values.push(value); + } + + Ok(values) + } + } + + let visitor = VecVisitor { + marker: PhantomData, + }; + deserializer.deserialize_seq(visitor) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct VecInPlaceVisitor<'a, T: 'a>(&'a mut Vec); + + impl<'a, 'de, T> Visitor<'de> for VecInPlaceVisitor<'a, T> + where + T: Deserialize<'de>, + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let hint = size_hint::cautious::(seq.size_hint()); + if let Some(additional) = hint.checked_sub(self.0.len()) { + self.0.reserve(additional); + } + + for i in 0..self.0.len() { + let next = { + let next_place = InPlaceSeed(&mut self.0[i]); + tri!(seq.next_element_seed(next_place)) + }; + if next.is_none() { + self.0.truncate(i); + return Ok(()); + } + } + + while let Some(value) = tri!(seq.next_element()) { + self.0.push(value); + } + + Ok(()) + } + } + + deserializer.deserialize_seq(VecInPlaceVisitor(place)) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +struct ArrayVisitor { + marker: PhantomData, +} +struct ArrayInPlaceVisitor<'a, A: 'a>(&'a mut A); + +impl ArrayVisitor { + fn new() -> Self { + ArrayVisitor { + marker: PhantomData, + } + } +} + +impl<'de, T> Visitor<'de> for ArrayVisitor<[T; 0]> { + type Value = [T; 0]; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an empty array") + } + + #[inline] + fn visit_seq(self, _: A) -> Result + where + A: SeqAccess<'de>, + { + Ok([]) + } +} + +// Does not require T: Deserialize<'de>. +impl<'de, T> Deserialize<'de> for [T; 0] { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_tuple(0, ArrayVisitor::<[T; 0]>::new()) + } +} + +macro_rules! array_impls { + ($($len:expr => ($($n:tt)+))+) => { + $( + impl<'de, T> Visitor<'de> for ArrayVisitor<[T; $len]> + where + T: Deserialize<'de>, + { + type Value = [T; $len]; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("an array of length ", $len)) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + Ok([$( + match tri!(seq.next_element()) { + Some(val) => val, + None => return Err(Error::invalid_length($n, &self)), + } + ),+]) + } + } + + impl<'a, 'de, T> Visitor<'de> for ArrayInPlaceVisitor<'a, [T; $len]> + where + T: Deserialize<'de>, + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("an array of length ", $len)) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut fail_idx = None; + for (idx, dest) in self.0[..].iter_mut().enumerate() { + if tri!(seq.next_element_seed(InPlaceSeed(dest))).is_none() { + fail_idx = Some(idx); + break; + } + } + if let Some(idx) = fail_idx { + return Err(Error::invalid_length(idx, &self)); + } + Ok(()) + } + } + + impl<'de, T> Deserialize<'de> for [T; $len] + where + T: Deserialize<'de>, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_tuple($len, ArrayVisitor::<[T; $len]>::new()) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + deserializer.deserialize_tuple($len, ArrayInPlaceVisitor(place)) + } + } + )+ + } +} + +array_impls! { + 1 => (0) + 2 => (0 1) + 3 => (0 1 2) + 4 => (0 1 2 3) + 5 => (0 1 2 3 4) + 6 => (0 1 2 3 4 5) + 7 => (0 1 2 3 4 5 6) + 8 => (0 1 2 3 4 5 6 7) + 9 => (0 1 2 3 4 5 6 7 8) + 10 => (0 1 2 3 4 5 6 7 8 9) + 11 => (0 1 2 3 4 5 6 7 8 9 10) + 12 => (0 1 2 3 4 5 6 7 8 9 10 11) + 13 => (0 1 2 3 4 5 6 7 8 9 10 11 12) + 14 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13) + 15 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14) + 16 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15) + 17 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16) + 18 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17) + 19 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18) + 20 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19) + 21 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20) + 22 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21) + 23 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22) + 24 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23) + 25 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24) + 26 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25) + 27 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26) + 28 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27) + 29 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28) + 30 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29) + 31 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30) + 32 => (0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31) +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! tuple_impls { + ($($len:tt => ($($n:tt $name:ident)+))+) => { + $( + #[cfg_attr(docsrs, doc(hidden))] + impl<'de, $($name),+> Deserialize<'de> for ($($name,)+) + where + $($name: Deserialize<'de>,)+ + { + tuple_impl_body!($len => ($($n $name)+)); + } + )+ + }; +} + +macro_rules! tuple_impl_body { + ($len:tt => ($($n:tt $name:ident)+)) => { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct TupleVisitor<$($name,)+> { + marker: PhantomData<($($name,)+)>, + } + + impl<'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleVisitor<$($name,)+> { + type Value = ($($name,)+); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("a tuple of size ", $len)) + } + + #[inline] + #[allow(non_snake_case)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + $( + let $name = match tri!(seq.next_element()) { + Some(value) => value, + None => return Err(Error::invalid_length($n, &self)), + }; + )+ + + Ok(($($name,)+)) + } + } + + deserializer.deserialize_tuple($len, TupleVisitor { marker: PhantomData }) + } + + #[inline] + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct TupleInPlaceVisitor<'a, $($name: 'a,)+>(&'a mut ($($name,)+)); + + impl<'a, 'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleInPlaceVisitor<'a, $($name,)+> { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("a tuple of size ", $len)) + } + + #[inline] + #[allow(non_snake_case)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + $( + if tri!(seq.next_element_seed(InPlaceSeed(&mut (self.0).$n))).is_none() { + return Err(Error::invalid_length($n, &self)); + } + )+ + + Ok(()) + } + } + + deserializer.deserialize_tuple($len, TupleInPlaceVisitor(place)) + } + }; +} + +#[cfg_attr(docsrs, doc(fake_variadic))] +#[cfg_attr( + docsrs, + doc = "This trait is implemented for tuples up to 16 items long." +)] +impl<'de, T> Deserialize<'de> for (T,) +where + T: Deserialize<'de>, +{ + tuple_impl_body!(1 => (0 T)); +} + +tuple_impls! { + 2 => (0 T0 1 T1) + 3 => (0 T0 1 T1 2 T2) + 4 => (0 T0 1 T1 2 T2 3 T3) + 5 => (0 T0 1 T1 2 T2 3 T3 4 T4) + 6 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5) + 7 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6) + 8 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7) + 9 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8) + 10 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9) + 11 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10) + 12 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11) + 13 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12) + 14 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13) + 15 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13 14 T14) + 16 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13 14 T14 15 T15) +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! map_impl { + ( + $(#[$attr:meta])* + $ty:ident , + $access:ident, + $with_capacity:expr, + ) => { + $(#[$attr])* + impl<'de, K, V $(, $typaram)*> Deserialize<'de> for $ty + where + K: Deserialize<'de> $(+ $kbound1 $(+ $kbound2)*)*, + V: Deserialize<'de>, + $($typaram: $bound1 $(+ $bound2)*),* + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct MapVisitor { + marker: PhantomData<$ty>, + } + + impl<'de, K, V $(, $typaram)*> Visitor<'de> for MapVisitor + where + K: Deserialize<'de> $(+ $kbound1 $(+ $kbound2)*)*, + V: Deserialize<'de>, + $($typaram: $bound1 $(+ $bound2)*),* + { + type Value = $ty; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a map") + } + + #[inline] + fn visit_map(self, mut $access: A) -> Result + where + A: MapAccess<'de>, + { + let mut values = $with_capacity; + + while let Some((key, value)) = tri!($access.next_entry()) { + values.insert(key, value); + } + + Ok(values) + } + } + + let visitor = MapVisitor { marker: PhantomData }; + deserializer.deserialize_map(visitor) + } + } + } +} + +map_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BTreeMap, + map, + BTreeMap::new(), +} + +map_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + HashMap, + map, + HashMap::with_capacity_and_hasher(size_hint::cautious::<(K, V)>(map.size_hint()), S::default()), +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", not(no_core_net)))] +macro_rules! parse_ip_impl { + ($ty:ty, $expecting:expr, $size:tt) => { + impl<'de> Deserialize<'de> for $ty { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(FromStrVisitor::new($expecting)) + } else { + <[u8; $size]>::deserialize(deserializer).map(<$ty>::from) + } + } + } + }; +} + +#[cfg(any(feature = "std", not(no_core_net)))] +macro_rules! variant_identifier { + ( + $name_kind:ident ($($variant:ident; $bytes:expr; $index:expr),*) + $expecting_message:expr, + $variants_name:ident + ) => { + enum $name_kind { + $($variant),* + } + + static $variants_name: &[&str] = &[$(stringify!($variant)),*]; + + impl<'de> Deserialize<'de> for $name_kind { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct KindVisitor; + + impl<'de> Visitor<'de> for KindVisitor { + type Value = $name_kind; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str($expecting_message) + } + + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + match value { + $( + $index => Ok($name_kind :: $variant), + )* + _ => Err(Error::invalid_value(Unexpected::Unsigned(value), &self),), + } + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + $( + stringify!($variant) => Ok($name_kind :: $variant), + )* + _ => Err(Error::unknown_variant(value, $variants_name)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + $( + $bytes => Ok($name_kind :: $variant), + )* + _ => { + match str::from_utf8(value) { + Ok(value) => Err(Error::unknown_variant(value, $variants_name)), + Err(_) => Err(Error::invalid_value(Unexpected::Bytes(value), &self)), + } + } + } + } + } + + deserializer.deserialize_identifier(KindVisitor) + } + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +macro_rules! deserialize_enum { + ( + $name:ident $name_kind:ident ($($variant:ident; $bytes:expr; $index:expr),*) + $expecting_message:expr, + $deserializer:expr + ) => { + variant_identifier! { + $name_kind ($($variant; $bytes; $index),*) + $expecting_message, + VARIANTS + } + + struct EnumVisitor; + impl<'de> Visitor<'de> for EnumVisitor { + type Value = $name; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(concat!("a ", stringify!($name))) + } + + + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + match tri!(data.variant()) { + $( + ($name_kind :: $variant, v) => v.newtype_variant().map($name :: $variant), + )* + } + } + } + $deserializer.deserialize_enum(stringify!($name), VARIANTS, EnumVisitor) + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl<'de> Deserialize<'de> for net::IpAddr { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(FromStrVisitor::new("IP address")) + } else { + use crate::lib::net::IpAddr; + deserialize_enum! { + IpAddr IpAddrKind (V4; b"V4"; 0, V6; b"V6"; 1) + "`V4` or `V6`", + deserializer + } + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +parse_ip_impl!(net::Ipv4Addr, "IPv4 address", 4); + +#[cfg(any(feature = "std", not(no_core_net)))] +parse_ip_impl!(net::Ipv6Addr, "IPv6 address", 16); + +#[cfg(any(feature = "std", not(no_core_net)))] +macro_rules! parse_socket_impl { + ( + $ty:ty, $expecting:tt, + $new:expr, + ) => { + impl<'de> Deserialize<'de> for $ty { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(FromStrVisitor::new($expecting)) + } else { + <(_, u16)>::deserialize(deserializer).map($new) + } + } + } + }; +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl<'de> Deserialize<'de> for net::SocketAddr { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + if deserializer.is_human_readable() { + deserializer.deserialize_str(FromStrVisitor::new("socket address")) + } else { + use crate::lib::net::SocketAddr; + deserialize_enum! { + SocketAddr SocketAddrKind (V4; b"V4"; 0, V6; b"V6"; 1) + "`V4` or `V6`", + deserializer + } + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +parse_socket_impl! { + net::SocketAddrV4, "IPv4 socket address", + |(ip, port)| net::SocketAddrV4::new(ip, port), +} + +#[cfg(any(feature = "std", not(no_core_net)))] +parse_socket_impl! { + net::SocketAddrV6, "IPv6 socket address", + |(ip, port)| net::SocketAddrV6::new(ip, port, 0, 0), +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "std")] +struct PathVisitor; + +#[cfg(feature = "std")] +impl<'a> Visitor<'a> for PathVisitor { + type Value = &'a Path; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a borrowed path") + } + + fn visit_borrowed_str(self, v: &'a str) -> Result + where + E: Error, + { + Ok(v.as_ref()) + } + + fn visit_borrowed_bytes(self, v: &'a [u8]) -> Result + where + E: Error, + { + str::from_utf8(v) + .map(AsRef::as_ref) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(v), &self)) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl<'de: 'a, 'a> Deserialize<'de> for &'a Path { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(PathVisitor) + } +} + +#[cfg(feature = "std")] +struct PathBufVisitor; + +#[cfg(feature = "std")] +impl<'de> Visitor<'de> for PathBufVisitor { + type Value = PathBuf; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("path string") + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + Ok(From::from(v)) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + Ok(From::from(v)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + str::from_utf8(v) + .map(From::from) + .map_err(|_| Error::invalid_value(Unexpected::Bytes(v), &self)) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + String::from_utf8(v) + .map(From::from) + .map_err(|e| Error::invalid_value(Unexpected::Bytes(&e.into_bytes()), &self)) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl<'de> Deserialize<'de> for PathBuf { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_string(PathBufVisitor) + } +} + +forwarded_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + (), Box, PathBuf::into_boxed_path +} + +//////////////////////////////////////////////////////////////////////////////// + +// If this were outside of the serde crate, it would just use: +// +// #[derive(Deserialize)] +// #[serde(variant_identifier)] +#[cfg(all(feature = "std", any(unix, windows)))] +variant_identifier! { + OsStringKind (Unix; b"Unix"; 0, Windows; b"Windows"; 1) + "`Unix` or `Windows`", + OSSTR_VARIANTS +} + +#[cfg(all(feature = "std", any(unix, windows)))] +struct OsStringVisitor; + +#[cfg(all(feature = "std", any(unix, windows)))] +impl<'de> Visitor<'de> for OsStringVisitor { + type Value = OsString; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("os string") + } + + #[cfg(unix)] + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + use std::os::unix::ffi::OsStringExt; + + match tri!(data.variant()) { + (OsStringKind::Unix, v) => v.newtype_variant().map(OsString::from_vec), + (OsStringKind::Windows, _) => Err(Error::custom( + "cannot deserialize Windows OS string on Unix", + )), + } + } + + #[cfg(windows)] + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + use std::os::windows::ffi::OsStringExt; + + match tri!(data.variant()) { + (OsStringKind::Windows, v) => v + .newtype_variant::>() + .map(|vec| OsString::from_wide(&vec)), + (OsStringKind::Unix, _) => Err(Error::custom( + "cannot deserialize Unix OS string on Windows", + )), + } + } +} + +#[cfg(all(feature = "std", any(unix, windows)))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))] +impl<'de> Deserialize<'de> for OsString { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_enum("OsString", OSSTR_VARIANTS, OsStringVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +forwarded_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + (T), Box, Box::new +} + +forwarded_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + (T), Box<[T]>, Vec::into_boxed_slice +} + +forwarded_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + (), Box, String::into_boxed_str +} + +forwarded_impl! { + #[cfg(all(feature = "std", any(unix, windows)))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))] + (), Box, OsString::into_boxed_os_str +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, 'a, T> Deserialize<'de> for Cow<'a, T> +where + T: ?Sized + ToOwned, + T::Owned: Deserialize<'de>, +{ + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::Owned::deserialize(deserializer).map(Cow::Owned) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// This impl requires the [`"rc"`] Cargo feature of Serde. The resulting +/// `Weak` has a reference count of 0 and cannot be upgraded. +/// +/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc +#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))) +)] +impl<'de, T> Deserialize<'de> for RcWeak +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + tri!(Option::::deserialize(deserializer)); + Ok(RcWeak::new()) + } +} + +/// This impl requires the [`"rc"`] Cargo feature of Serde. The resulting +/// `Weak` has a reference count of 0 and cannot be upgraded. +/// +/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc +#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))) +)] +impl<'de, T> Deserialize<'de> for ArcWeak +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + tri!(Option::::deserialize(deserializer)); + Ok(ArcWeak::new()) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! box_forwarded_impl { + ( + $(#[$attr:meta])* + $t:ident + ) => { + $(#[$attr])* + impl<'de, T> Deserialize<'de> for $t + where + T: ?Sized, + Box: Deserialize<'de>, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Box::deserialize(deserializer).map(Into::into) + } + } + }; +} + +box_forwarded_impl! { + /// This impl requires the [`"rc"`] Cargo feature of Serde. + /// + /// Deserializing a data structure containing `Rc` will not attempt to + /// deduplicate `Rc` references to the same data. Every deserialized `Rc` + /// will end up with a strong count of 1. + /// + /// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc + #[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))] + Rc +} + +box_forwarded_impl! { + /// This impl requires the [`"rc"`] Cargo feature of Serde. + /// + /// Deserializing a data structure containing `Arc` will not attempt to + /// deduplicate `Arc` references to the same data. Every deserialized `Arc` + /// will end up with a strong count of 1. + /// + /// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc + #[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))] + Arc +} + +//////////////////////////////////////////////////////////////////////////////// + +impl<'de, T> Deserialize<'de> for Cell +where + T: Deserialize<'de> + Copy, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::deserialize(deserializer).map(Cell::new) + } +} + +forwarded_impl! { + (T), RefCell, RefCell::new +} + +forwarded_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + (T), Mutex, Mutex::new +} + +forwarded_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + (T), RwLock, RwLock::new +} + +//////////////////////////////////////////////////////////////////////////////// + +// This is a cleaned-up version of the impl generated by: +// +// #[derive(Deserialize)] +// #[serde(deny_unknown_fields)] +// struct Duration { +// secs: u64, +// nanos: u32, +// } +impl<'de> Deserialize<'de> for Duration { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // If this were outside of the serde crate, it would just use: + // + // #[derive(Deserialize)] + // #[serde(field_identifier, rename_all = "lowercase")] + enum Field { + Secs, + Nanos, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`secs` or `nanos`") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "secs" => Ok(Field::Secs), + "nanos" => Ok(Field::Nanos), + _ => Err(Error::unknown_field(value, FIELDS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"secs" => Ok(Field::Secs), + b"nanos" => Ok(Field::Nanos), + _ => { + let value = private::string::from_utf8_lossy(value); + Err(Error::unknown_field(&*value, FIELDS)) + } + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + fn check_overflow(secs: u64, nanos: u32) -> Result<(), E> + where + E: Error, + { + static NANOS_PER_SEC: u32 = 1_000_000_000; + match secs.checked_add((nanos / NANOS_PER_SEC) as u64) { + Some(_) => Ok(()), + None => Err(E::custom("overflow deserializing Duration")), + } + } + + struct DurationVisitor; + + impl<'de> Visitor<'de> for DurationVisitor { + type Value = Duration; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct Duration") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let secs: u64 = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(0, &self)); + } + }; + let nanos: u32 = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(1, &self)); + } + }; + tri!(check_overflow(secs, nanos)); + Ok(Duration::new(secs, nanos)) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut secs: Option = None; + let mut nanos: Option = None; + while let Some(key) = tri!(map.next_key()) { + match key { + Field::Secs => { + if secs.is_some() { + return Err(::duplicate_field("secs")); + } + secs = Some(tri!(map.next_value())); + } + Field::Nanos => { + if nanos.is_some() { + return Err(::duplicate_field("nanos")); + } + nanos = Some(tri!(map.next_value())); + } + } + } + let secs = match secs { + Some(secs) => secs, + None => return Err(::missing_field("secs")), + }; + let nanos = match nanos { + Some(nanos) => nanos, + None => return Err(::missing_field("nanos")), + }; + tri!(check_overflow(secs, nanos)); + Ok(Duration::new(secs, nanos)) + } + } + + const FIELDS: &[&str] = &["secs", "nanos"]; + deserializer.deserialize_struct("Duration", FIELDS, DurationVisitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl<'de> Deserialize<'de> for SystemTime { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Reuse duration + enum Field { + Secs, + Nanos, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`secs_since_epoch` or `nanos_since_epoch`") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "secs_since_epoch" => Ok(Field::Secs), + "nanos_since_epoch" => Ok(Field::Nanos), + _ => Err(Error::unknown_field(value, FIELDS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"secs_since_epoch" => Ok(Field::Secs), + b"nanos_since_epoch" => Ok(Field::Nanos), + _ => { + let value = String::from_utf8_lossy(value); + Err(Error::unknown_field(&value, FIELDS)) + } + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + fn check_overflow(secs: u64, nanos: u32) -> Result<(), E> + where + E: Error, + { + static NANOS_PER_SEC: u32 = 1_000_000_000; + match secs.checked_add((nanos / NANOS_PER_SEC) as u64) { + Some(_) => Ok(()), + None => Err(E::custom("overflow deserializing SystemTime epoch offset")), + } + } + + struct DurationVisitor; + + impl<'de> Visitor<'de> for DurationVisitor { + type Value = Duration; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct SystemTime") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let secs: u64 = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(0, &self)); + } + }; + let nanos: u32 = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(1, &self)); + } + }; + tri!(check_overflow(secs, nanos)); + Ok(Duration::new(secs, nanos)) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut secs: Option = None; + let mut nanos: Option = None; + while let Some(key) = tri!(map.next_key()) { + match key { + Field::Secs => { + if secs.is_some() { + return Err(::duplicate_field( + "secs_since_epoch", + )); + } + secs = Some(tri!(map.next_value())); + } + Field::Nanos => { + if nanos.is_some() { + return Err(::duplicate_field( + "nanos_since_epoch", + )); + } + nanos = Some(tri!(map.next_value())); + } + } + } + let secs = match secs { + Some(secs) => secs, + None => return Err(::missing_field("secs_since_epoch")), + }; + let nanos = match nanos { + Some(nanos) => nanos, + None => return Err(::missing_field("nanos_since_epoch")), + }; + tri!(check_overflow(secs, nanos)); + Ok(Duration::new(secs, nanos)) + } + } + + const FIELDS: &[&str] = &["secs_since_epoch", "nanos_since_epoch"]; + let duration = tri!(deserializer.deserialize_struct("SystemTime", FIELDS, DurationVisitor)); + UNIX_EPOCH + .checked_add(duration) + .ok_or_else(|| D::Error::custom("overflow deserializing SystemTime")) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Similar to: +// +// #[derive(Deserialize)] +// #[serde(deny_unknown_fields)] +// struct Range { +// start: Idx, +// end: Idx, +// } +impl<'de, Idx> Deserialize<'de> for Range +where + Idx: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (start, end) = tri!(deserializer.deserialize_struct( + "Range", + range::FIELDS, + range::RangeVisitor { + expecting: "struct Range", + phantom: PhantomData, + }, + )); + Ok(start..end) + } +} + +impl<'de, Idx> Deserialize<'de> for RangeInclusive +where + Idx: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (start, end) = tri!(deserializer.deserialize_struct( + "RangeInclusive", + range::FIELDS, + range::RangeVisitor { + expecting: "struct RangeInclusive", + phantom: PhantomData, + }, + )); + Ok(RangeInclusive::new(start, end)) + } +} + +mod range { + use crate::lib::*; + + use crate::de::{Deserialize, Deserializer, Error, MapAccess, SeqAccess, Visitor}; + use crate::private; + + pub const FIELDS: &[&str] = &["start", "end"]; + + // If this were outside of the serde crate, it would just use: + // + // #[derive(Deserialize)] + // #[serde(field_identifier, rename_all = "lowercase")] + enum Field { + Start, + End, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`start` or `end`") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "start" => Ok(Field::Start), + "end" => Ok(Field::End), + _ => Err(Error::unknown_field(value, FIELDS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"start" => Ok(Field::Start), + b"end" => Ok(Field::End), + _ => { + let value = private::string::from_utf8_lossy(value); + Err(Error::unknown_field(&*value, FIELDS)) + } + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + pub struct RangeVisitor { + pub expecting: &'static str, + pub phantom: PhantomData, + } + + impl<'de, Idx> Visitor<'de> for RangeVisitor + where + Idx: Deserialize<'de>, + { + type Value = (Idx, Idx); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.expecting) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let start: Idx = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(0, &self)); + } + }; + let end: Idx = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(1, &self)); + } + }; + Ok((start, end)) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut start: Option = None; + let mut end: Option = None; + while let Some(key) = tri!(map.next_key()) { + match key { + Field::Start => { + if start.is_some() { + return Err(::duplicate_field("start")); + } + start = Some(tri!(map.next_value())); + } + Field::End => { + if end.is_some() { + return Err(::duplicate_field("end")); + } + end = Some(tri!(map.next_value())); + } + } + } + let start = match start { + Some(start) => start, + None => return Err(::missing_field("start")), + }; + let end = match end { + Some(end) => end, + None => return Err(::missing_field("end")), + }; + Ok((start, end)) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Similar to: +// +// #[derive(Deserialize)] +// #[serde(deny_unknown_fields)] +// struct RangeFrom { +// start: Idx, +// } +impl<'de, Idx> Deserialize<'de> for RangeFrom +where + Idx: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let start = tri!(deserializer.deserialize_struct( + "RangeFrom", + range_from::FIELDS, + range_from::RangeFromVisitor { + expecting: "struct RangeFrom", + phantom: PhantomData, + }, + )); + Ok(start..) + } +} + +mod range_from { + use crate::lib::*; + + use crate::de::{Deserialize, Deserializer, Error, MapAccess, SeqAccess, Visitor}; + use crate::private; + + pub const FIELDS: &[&str] = &["start"]; + + // If this were outside of the serde crate, it would just use: + // + // #[derive(Deserialize)] + // #[serde(field_identifier, rename_all = "lowercase")] + enum Field { + Start, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`start`") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "start" => Ok(Field::Start), + _ => Err(Error::unknown_field(value, FIELDS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"start" => Ok(Field::Start), + _ => { + let value = private::string::from_utf8_lossy(value); + Err(Error::unknown_field(&*value, FIELDS)) + } + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + pub struct RangeFromVisitor { + pub expecting: &'static str, + pub phantom: PhantomData, + } + + impl<'de, Idx> Visitor<'de> for RangeFromVisitor + where + Idx: Deserialize<'de>, + { + type Value = Idx; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.expecting) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let start: Idx = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(0, &self)); + } + }; + Ok(start) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut start: Option = None; + while let Some(key) = tri!(map.next_key()) { + match key { + Field::Start => { + if start.is_some() { + return Err(::duplicate_field("start")); + } + start = Some(tri!(map.next_value())); + } + } + } + let start = match start { + Some(start) => start, + None => return Err(::missing_field("start")), + }; + Ok(start) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Similar to: +// +// #[derive(Deserialize)] +// #[serde(deny_unknown_fields)] +// struct RangeTo { +// end: Idx, +// } +impl<'de, Idx> Deserialize<'de> for RangeTo +where + Idx: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let end = tri!(deserializer.deserialize_struct( + "RangeTo", + range_to::FIELDS, + range_to::RangeToVisitor { + expecting: "struct RangeTo", + phantom: PhantomData, + }, + )); + Ok(..end) + } +} + +mod range_to { + use crate::lib::*; + + use crate::de::{Deserialize, Deserializer, Error, MapAccess, SeqAccess, Visitor}; + use crate::private; + + pub const FIELDS: &[&str] = &["end"]; + + // If this were outside of the serde crate, it would just use: + // + // #[derive(Deserialize)] + // #[serde(field_identifier, rename_all = "lowercase")] + enum Field { + End, + } + + impl<'de> Deserialize<'de> for Field { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`end`") + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "end" => Ok(Field::End), + _ => Err(Error::unknown_field(value, FIELDS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"end" => Ok(Field::End), + _ => { + let value = private::string::from_utf8_lossy(value); + Err(Error::unknown_field(&*value, FIELDS)) + } + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + pub struct RangeToVisitor { + pub expecting: &'static str, + pub phantom: PhantomData, + } + + impl<'de, Idx> Visitor<'de> for RangeToVisitor + where + Idx: Deserialize<'de>, + { + type Value = Idx; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.expecting) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let end: Idx = match tri!(seq.next_element()) { + Some(value) => value, + None => { + return Err(Error::invalid_length(0, &self)); + } + }; + Ok(end) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut end: Option = None; + while let Some(key) = tri!(map.next_key()) { + match key { + Field::End => { + if end.is_some() { + return Err(::duplicate_field("end")); + } + end = Some(tri!(map.next_value())); + } + } + } + let end = match end { + Some(end) => end, + None => return Err(::missing_field("end")), + }; + Ok(end) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl<'de, T> Deserialize<'de> for Bound +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + enum Field { + Unbounded, + Included, + Excluded, + } + + impl<'de> Deserialize<'de> for Field { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`Unbounded`, `Included` or `Excluded`") + } + + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + match value { + 0 => Ok(Field::Unbounded), + 1 => Ok(Field::Included), + 2 => Ok(Field::Excluded), + _ => Err(Error::invalid_value(Unexpected::Unsigned(value), &self)), + } + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "Unbounded" => Ok(Field::Unbounded), + "Included" => Ok(Field::Included), + "Excluded" => Ok(Field::Excluded), + _ => Err(Error::unknown_variant(value, VARIANTS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"Unbounded" => Ok(Field::Unbounded), + b"Included" => Ok(Field::Included), + b"Excluded" => Ok(Field::Excluded), + _ => match str::from_utf8(value) { + Ok(value) => Err(Error::unknown_variant(value, VARIANTS)), + Err(_) => { + Err(Error::invalid_value(Unexpected::Bytes(value), &self)) + } + }, + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + struct BoundVisitor(PhantomData>); + + impl<'de, T> Visitor<'de> for BoundVisitor + where + T: Deserialize<'de>, + { + type Value = Bound; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("enum Bound") + } + + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + match tri!(data.variant()) { + (Field::Unbounded, v) => v.unit_variant().map(|()| Bound::Unbounded), + (Field::Included, v) => v.newtype_variant().map(Bound::Included), + (Field::Excluded, v) => v.newtype_variant().map(Bound::Excluded), + } + } + } + + const VARIANTS: &[&str] = &["Unbounded", "Included", "Excluded"]; + + deserializer.deserialize_enum("Bound", VARIANTS, BoundVisitor(PhantomData)) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "result")] +#[cfg_attr(docsrs, doc(cfg(feature = "result")))] +impl<'de, T, E> Deserialize<'de> for Result +where + T: Deserialize<'de>, + E: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // If this were outside of the serde crate, it would just use: + // + // #[derive(Deserialize)] + // #[serde(variant_identifier)] + enum Field { + Ok, + Err, + } + + impl<'de> Deserialize<'de> for Field { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct FieldVisitor; + + impl<'de> Visitor<'de> for FieldVisitor { + type Value = Field; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("`Ok` or `Err`") + } + + fn visit_u64(self, value: u64) -> Result + where + E: Error, + { + match value { + 0 => Ok(Field::Ok), + 1 => Ok(Field::Err), + _ => Err(Error::invalid_value(Unexpected::Unsigned(value), &self)), + } + } + + fn visit_str(self, value: &str) -> Result + where + E: Error, + { + match value { + "Ok" => Ok(Field::Ok), + "Err" => Ok(Field::Err), + _ => Err(Error::unknown_variant(value, VARIANTS)), + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: Error, + { + match value { + b"Ok" => Ok(Field::Ok), + b"Err" => Ok(Field::Err), + _ => match str::from_utf8(value) { + Ok(value) => Err(Error::unknown_variant(value, VARIANTS)), + Err(_) => { + Err(Error::invalid_value(Unexpected::Bytes(value), &self)) + } + }, + } + } + } + + deserializer.deserialize_identifier(FieldVisitor) + } + } + + struct ResultVisitor(PhantomData>); + + impl<'de, T, E> Visitor<'de> for ResultVisitor + where + T: Deserialize<'de>, + E: Deserialize<'de>, + { + type Value = Result; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("enum Result") + } + + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + match tri!(data.variant()) { + (Field::Ok, v) => v.newtype_variant().map(Ok), + (Field::Err, v) => v.newtype_variant().map(Err), + } + } + } + + const VARIANTS: &[&str] = &["Ok", "Err"]; + + deserializer.deserialize_enum("Result", VARIANTS, ResultVisitor(PhantomData)) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl<'de, T> Deserialize<'de> for Wrapping +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(Wrapping) + } +} + +#[cfg(all(feature = "std", not(no_std_atomic)))] +macro_rules! atomic_impl { + ($($ty:ident $size:expr)*) => { + $( + #[cfg(any(no_target_has_atomic, target_has_atomic = $size))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "std", target_has_atomic = $size))))] + impl<'de> Deserialize<'de> for $ty { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(Self::new) + } + } + )* + }; +} + +#[cfg(all(feature = "std", not(no_std_atomic)))] +atomic_impl! { + AtomicBool "8" + AtomicI8 "8" + AtomicI16 "16" + AtomicI32 "32" + AtomicIsize "ptr" + AtomicU8 "8" + AtomicU16 "16" + AtomicU32 "32" + AtomicUsize "ptr" +} + +#[cfg(all(feature = "std", not(no_std_atomic64)))] +atomic_impl! { + AtomicI64 "64" + AtomicU64 "64" +} + +#[cfg(any(feature = "std", not(no_core_net)))] +struct FromStrVisitor { + expecting: &'static str, + ty: PhantomData, +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl FromStrVisitor { + fn new(expecting: &'static str) -> Self { + FromStrVisitor { + expecting, + ty: PhantomData, + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl<'de, T> Visitor<'de> for FromStrVisitor +where + T: str::FromStr, + T::Err: fmt::Display, +{ + type Value = T; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self.expecting) + } + + fn visit_str(self, s: &str) -> Result + where + E: Error, + { + s.parse().map_err(Error::custom) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..2518ae68274a9888dd1c18e05d62a6191d3c86b0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/mod.rs @@ -0,0 +1,2392 @@ +//! Generic data structure deserialization framework. +//! +//! The two most important traits in this module are [`Deserialize`] and +//! [`Deserializer`]. +//! +//! - **A type that implements `Deserialize` is a data structure** that can be +//! deserialized from any data format supported by Serde, and conversely +//! - **A type that implements `Deserializer` is a data format** that can +//! deserialize any data structure supported by Serde. +//! +//! # The Deserialize trait +//! +//! Serde provides [`Deserialize`] implementations for many Rust primitive and +//! standard library types. The complete list is below. All of these can be +//! deserialized using Serde out of the box. +//! +//! Additionally, Serde provides a procedural macro called [`serde_derive`] to +//! automatically generate [`Deserialize`] implementations for structs and enums +//! in your program. See the [derive section of the manual] for how to use this. +//! +//! In rare cases it may be necessary to implement [`Deserialize`] manually for +//! some type in your program. See the [Implementing `Deserialize`] section of +//! the manual for more about this. +//! +//! Third-party crates may provide [`Deserialize`] implementations for types +//! that they expose. For example the [`linked-hash-map`] crate provides a +//! [`LinkedHashMap`] type that is deserializable by Serde because the +//! crate provides an implementation of [`Deserialize`] for it. +//! +//! # The Deserializer trait +//! +//! [`Deserializer`] implementations are provided by third-party crates, for +//! example [`serde_json`], [`serde_yaml`] and [`postcard`]. +//! +//! A partial list of well-maintained formats is given on the [Serde +//! website][data formats]. +//! +//! # Implementations of Deserialize provided by Serde +//! +//! This is a slightly different set of types than what is supported for +//! serialization. Some types can be serialized by Serde but not deserialized. +//! One example is `OsStr`. +//! +//! - **Primitive types**: +//! - bool +//! - i8, i16, i32, i64, i128, isize +//! - u8, u16, u32, u64, u128, usize +//! - f32, f64 +//! - char +//! - **Compound types**: +//! - \[T; 0\] through \[T; 32\] +//! - tuples up to size 16 +//! - **Common standard library types**: +//! - String +//! - Option\ +//! - Result\ +//! - PhantomData\ +//! - **Wrapper types**: +//! - Box\ +//! - Box\<\[T\]\> +//! - Box\ +//! - Cow\<'a, T\> +//! - Cell\ +//! - RefCell\ +//! - Mutex\ +//! - RwLock\ +//! - Rc\ *(if* features = \["rc"\] *is enabled)* +//! - Arc\ *(if* features = \["rc"\] *is enabled)* +//! - **Collection types**: +//! - BTreeMap\ +//! - BTreeSet\ +//! - BinaryHeap\ +//! - HashMap\ +//! - HashSet\ +//! - LinkedList\ +//! - VecDeque\ +//! - Vec\ +//! - **Zero-copy types**: +//! - &str +//! - &\[u8\] +//! - **FFI types**: +//! - CString +//! - Box\ +//! - OsString +//! - **Miscellaneous standard library types**: +//! - Duration +//! - SystemTime +//! - Path +//! - PathBuf +//! - Range\ +//! - RangeInclusive\ +//! - Bound\ +//! - num::NonZero* +//! - `!` *(unstable)* +//! - **Net types**: +//! - IpAddr +//! - Ipv4Addr +//! - Ipv6Addr +//! - SocketAddr +//! - SocketAddrV4 +//! - SocketAddrV6 +//! +//! [Implementing `Deserialize`]: https://serde.rs/impl-deserialize.html +//! [`Deserialize`]: crate::Deserialize +//! [`Deserializer`]: crate::Deserializer +//! [`LinkedHashMap`]: https://docs.rs/linked-hash-map/*/linked_hash_map/struct.LinkedHashMap.html +//! [`postcard`]: https://github.com/jamesmunns/postcard +//! [`linked-hash-map`]: https://crates.io/crates/linked-hash-map +//! [`serde_derive`]: https://crates.io/crates/serde_derive +//! [`serde_json`]: https://github.com/serde-rs/json +//! [`serde_yaml`]: https://github.com/dtolnay/serde-yaml +//! [derive section of the manual]: https://serde.rs/derive.html +//! [data formats]: https://serde.rs/#data-formats + +use crate::lib::*; + +//////////////////////////////////////////////////////////////////////////////// + +pub mod value; + +mod ignored_any; +mod impls; + +pub use self::ignored_any::IgnoredAny; +#[cfg(all(not(feature = "std"), no_core_error))] +#[doc(no_inline)] +pub use crate::std_error::Error as StdError; +#[cfg(not(any(feature = "std", no_core_error)))] +#[doc(no_inline)] +pub use core::error::Error as StdError; +#[cfg(feature = "std")] +#[doc(no_inline)] +pub use std::error::Error as StdError; + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! declare_error_trait { + (Error: Sized $(+ $($supertrait:ident)::+)*) => { + /// The `Error` trait allows `Deserialize` implementations to create descriptive + /// error messages belonging to the `Deserializer` against which they are + /// currently running. + /// + /// Every `Deserializer` declares an `Error` type that encompasses both + /// general-purpose deserialization errors as well as errors specific to the + /// particular deserialization format. For example the `Error` type of + /// `serde_json` can represent errors like an invalid JSON escape sequence or an + /// unterminated string literal, in addition to the error cases that are part of + /// this trait. + /// + /// Most deserializers should only need to provide the `Error::custom` method + /// and inherit the default behavior for the other methods. + /// + /// # Example implementation + /// + /// The [example data format] presented on the website shows an error + /// type appropriate for a basic JSON data format. + /// + /// [example data format]: https://serde.rs/data-format.html + #[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::Error` is not satisfied", + ) + )] + pub trait Error: Sized $(+ $($supertrait)::+)* { + /// Raised when there is general error when deserializing a type. + /// + /// The message should not be capitalized and should not end with a period. + /// + /// ```edition2021 + /// # use std::str::FromStr; + /// # + /// # struct IpAddr; + /// # + /// # impl FromStr for IpAddr { + /// # type Err = String; + /// # + /// # fn from_str(_: &str) -> Result { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::de::{self, Deserialize, Deserializer}; + /// + /// impl<'de> Deserialize<'de> for IpAddr { + /// fn deserialize(deserializer: D) -> Result + /// where + /// D: Deserializer<'de>, + /// { + /// let s = String::deserialize(deserializer)?; + /// s.parse().map_err(de::Error::custom) + /// } + /// } + /// ``` + fn custom(msg: T) -> Self + where + T: Display; + + /// Raised when a `Deserialize` receives a type different from what it was + /// expecting. + /// + /// The `unexp` argument provides information about what type was received. + /// This is the type that was present in the input file or other source data + /// of the Deserializer. + /// + /// The `exp` argument provides information about what type was being + /// expected. This is the type that is written in the program. + /// + /// For example if we try to deserialize a String out of a JSON file + /// containing an integer, the unexpected type is the integer and the + /// expected type is the string. + #[cold] + fn invalid_type(unexp: Unexpected, exp: &dyn Expected) -> Self { + Error::custom(format_args!("invalid type: {}, expected {}", unexp, exp)) + } + + /// Raised when a `Deserialize` receives a value of the right type but that + /// is wrong for some other reason. + /// + /// The `unexp` argument provides information about what value was received. + /// This is the value that was present in the input file or other source + /// data of the Deserializer. + /// + /// The `exp` argument provides information about what value was being + /// expected. This is the type that is written in the program. + /// + /// For example if we try to deserialize a String out of some binary data + /// that is not valid UTF-8, the unexpected value is the bytes and the + /// expected value is a string. + #[cold] + fn invalid_value(unexp: Unexpected, exp: &dyn Expected) -> Self { + Error::custom(format_args!("invalid value: {}, expected {}", unexp, exp)) + } + + /// Raised when deserializing a sequence or map and the input data contains + /// too many or too few elements. + /// + /// The `len` argument is the number of elements encountered. The sequence + /// or map may have expected more arguments or fewer arguments. + /// + /// The `exp` argument provides information about what data was being + /// expected. For example `exp` might say that a tuple of size 6 was + /// expected. + #[cold] + fn invalid_length(len: usize, exp: &dyn Expected) -> Self { + Error::custom(format_args!("invalid length {}, expected {}", len, exp)) + } + + /// Raised when a `Deserialize` enum type received a variant with an + /// unrecognized name. + #[cold] + fn unknown_variant(variant: &str, expected: &'static [&'static str]) -> Self { + if expected.is_empty() { + Error::custom(format_args!( + "unknown variant `{}`, there are no variants", + variant + )) + } else { + Error::custom(format_args!( + "unknown variant `{}`, expected {}", + variant, + OneOf { names: expected } + )) + } + } + + /// Raised when a `Deserialize` struct type received a field with an + /// unrecognized name. + #[cold] + fn unknown_field(field: &str, expected: &'static [&'static str]) -> Self { + if expected.is_empty() { + Error::custom(format_args!( + "unknown field `{}`, there are no fields", + field + )) + } else { + Error::custom(format_args!( + "unknown field `{}`, expected {}", + field, + OneOf { names: expected } + )) + } + } + + /// Raised when a `Deserialize` struct type expected to receive a required + /// field with a particular name but that field was not present in the + /// input. + #[cold] + fn missing_field(field: &'static str) -> Self { + Error::custom(format_args!("missing field `{}`", field)) + } + + /// Raised when a `Deserialize` struct type received more than one of the + /// same field. + #[cold] + fn duplicate_field(field: &'static str) -> Self { + Error::custom(format_args!("duplicate field `{}`", field)) + } + } + } +} + +#[cfg(feature = "std")] +declare_error_trait!(Error: Sized + StdError); + +#[cfg(not(feature = "std"))] +declare_error_trait!(Error: Sized + Debug + Display); + +/// `Unexpected` represents an unexpected invocation of any one of the `Visitor` +/// trait methods. +/// +/// This is used as an argument to the `invalid_type`, `invalid_value`, and +/// `invalid_length` methods of the `Error` trait to build error messages. +/// +/// ```edition2021 +/// # use std::fmt; +/// # +/// # use serde::de::{self, Unexpected, Visitor}; +/// # +/// # struct Example; +/// # +/// # impl<'de> Visitor<'de> for Example { +/// # type Value = (); +/// # +/// # fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// # write!(formatter, "definitely not a boolean") +/// # } +/// # +/// fn visit_bool(self, v: bool) -> Result +/// where +/// E: de::Error, +/// { +/// Err(de::Error::invalid_type(Unexpected::Bool(v), &self)) +/// } +/// # } +/// ``` +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum Unexpected<'a> { + /// The input contained a boolean value that was not expected. + Bool(bool), + + /// The input contained an unsigned integer `u8`, `u16`, `u32` or `u64` that + /// was not expected. + Unsigned(u64), + + /// The input contained a signed integer `i8`, `i16`, `i32` or `i64` that + /// was not expected. + Signed(i64), + + /// The input contained a floating point `f32` or `f64` that was not + /// expected. + Float(f64), + + /// The input contained a `char` that was not expected. + Char(char), + + /// The input contained a `&str` or `String` that was not expected. + Str(&'a str), + + /// The input contained a `&[u8]` or `Vec` that was not expected. + Bytes(&'a [u8]), + + /// The input contained a unit `()` that was not expected. + Unit, + + /// The input contained an `Option` that was not expected. + Option, + + /// The input contained a newtype struct that was not expected. + NewtypeStruct, + + /// The input contained a sequence that was not expected. + Seq, + + /// The input contained a map that was not expected. + Map, + + /// The input contained an enum that was not expected. + Enum, + + /// The input contained a unit variant that was not expected. + UnitVariant, + + /// The input contained a newtype variant that was not expected. + NewtypeVariant, + + /// The input contained a tuple variant that was not expected. + TupleVariant, + + /// The input contained a struct variant that was not expected. + StructVariant, + + /// A message stating what uncategorized thing the input contained that was + /// not expected. + /// + /// The message should be a noun or noun phrase, not capitalized and without + /// a period. An example message is "unoriginal superhero". + Other(&'a str), +} + +impl<'a> fmt::Display for Unexpected<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + use self::Unexpected::*; + match *self { + Bool(b) => write!(formatter, "boolean `{}`", b), + Unsigned(i) => write!(formatter, "integer `{}`", i), + Signed(i) => write!(formatter, "integer `{}`", i), + Float(f) => write!(formatter, "floating point `{}`", WithDecimalPoint(f)), + Char(c) => write!(formatter, "character `{}`", c), + Str(s) => write!(formatter, "string {:?}", s), + Bytes(_) => formatter.write_str("byte array"), + Unit => formatter.write_str("unit value"), + Option => formatter.write_str("Option value"), + NewtypeStruct => formatter.write_str("newtype struct"), + Seq => formatter.write_str("sequence"), + Map => formatter.write_str("map"), + Enum => formatter.write_str("enum"), + UnitVariant => formatter.write_str("unit variant"), + NewtypeVariant => formatter.write_str("newtype variant"), + TupleVariant => formatter.write_str("tuple variant"), + StructVariant => formatter.write_str("struct variant"), + Other(other) => formatter.write_str(other), + } + } +} + +/// `Expected` represents an explanation of what data a `Visitor` was expecting +/// to receive. +/// +/// This is used as an argument to the `invalid_type`, `invalid_value`, and +/// `invalid_length` methods of the `Error` trait to build error messages. The +/// message should be a noun or noun phrase that completes the sentence "This +/// Visitor expects to receive ...", for example the message could be "an +/// integer between 0 and 64". The message should not be capitalized and should +/// not end with a period. +/// +/// Within the context of a `Visitor` implementation, the `Visitor` itself +/// (`&self`) is an implementation of this trait. +/// +/// ```edition2021 +/// # use serde::de::{self, Unexpected, Visitor}; +/// # use std::fmt; +/// # +/// # struct Example; +/// # +/// # impl<'de> Visitor<'de> for Example { +/// # type Value = (); +/// # +/// # fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// # write!(formatter, "definitely not a boolean") +/// # } +/// # +/// fn visit_bool(self, v: bool) -> Result +/// where +/// E: de::Error, +/// { +/// Err(de::Error::invalid_type(Unexpected::Bool(v), &self)) +/// } +/// # } +/// ``` +/// +/// Outside of a `Visitor`, `&"..."` can be used. +/// +/// ```edition2021 +/// # use serde::de::{self, Unexpected}; +/// # +/// # fn example() -> Result<(), E> +/// # where +/// # E: de::Error, +/// # { +/// # let v = true; +/// return Err(de::Error::invalid_type( +/// Unexpected::Bool(v), +/// &"a negative integer", +/// )); +/// # } +/// ``` +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::Expected` is not satisfied", + ) +)] +pub trait Expected { + /// Format an explanation of what data was being expected. Same signature as + /// the `Display` and `Debug` traits. + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result; +} + +impl<'de, T> Expected for T +where + T: Visitor<'de>, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + self.expecting(formatter) + } +} + +impl Expected for &str { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(self) + } +} + +impl Display for dyn Expected + '_ { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + Expected::fmt(self, formatter) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A **data structure** that can be deserialized from any data format supported +/// by Serde. +/// +/// Serde provides `Deserialize` implementations for many Rust primitive and +/// standard library types. The complete list is [here][crate::de]. All of these +/// can be deserialized using Serde out of the box. +/// +/// Additionally, Serde provides a procedural macro called `serde_derive` to +/// automatically generate `Deserialize` implementations for structs and enums +/// in your program. See the [derive section of the manual][derive] for how to +/// use this. +/// +/// In rare cases it may be necessary to implement `Deserialize` manually for +/// some type in your program. See the [Implementing +/// `Deserialize`][impl-deserialize] section of the manual for more about this. +/// +/// Third-party crates may provide `Deserialize` implementations for types that +/// they expose. For example the `linked-hash-map` crate provides a +/// `LinkedHashMap` type that is deserializable by Serde because the crate +/// provides an implementation of `Deserialize` for it. +/// +/// [derive]: https://serde.rs/derive.html +/// [impl-deserialize]: https://serde.rs/impl-deserialize.html +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by `Self` when deserialized. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + // Prevents `serde_core::de::Deserialize` appearing in the error message + // in projects with no direct dependency on serde_core. + message = "the trait bound `{Self}: serde::Deserialize<'de>` is not satisfied", + note = "for local types consider adding `#[derive(serde::Deserialize)]` to your `{Self}` type", + note = "for types from other crates check whether the crate offers a `serde` feature flag", + ) +)] +pub trait Deserialize<'de>: Sized { + /// Deserialize this value from the given Serde deserializer. + /// + /// See the [Implementing `Deserialize`][impl-deserialize] section of the + /// manual for more information about how to implement this method. + /// + /// [impl-deserialize]: https://serde.rs/impl-deserialize.html + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>; + + /// Deserializes a value into `self` from the given Deserializer. + /// + /// The purpose of this method is to allow the deserializer to reuse + /// resources and avoid copies. As such, if this method returns an error, + /// `self` will be in an indeterminate state where some parts of the struct + /// have been overwritten. Although whatever state that is will be + /// memory-safe. + /// + /// This is generally useful when repeatedly deserializing values that + /// are processed one at a time, where the value of `self` doesn't matter + /// when the next deserialization occurs. + /// + /// If you manually implement this, your recursive deserializations should + /// use `deserialize_in_place`. + /// + /// This method is stable and an official public API, but hidden from the + /// documentation because it is almost never what newbies are looking for. + /// Showing it in rustdoc would cause it to be featured more prominently + /// than it deserves. + #[doc(hidden)] + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + // Default implementation just delegates to `deserialize` impl. + *place = tri!(Deserialize::deserialize(deserializer)); + Ok(()) + } +} + +/// A data structure that can be deserialized without borrowing any data from +/// the deserializer. +/// +/// This is primarily useful for trait bounds on functions. For example a +/// `from_str` function may be able to deserialize a data structure that borrows +/// from the input string, but a `from_reader` function may only deserialize +/// owned data. +/// +/// ```edition2021 +/// # use serde::de::{Deserialize, DeserializeOwned}; +/// # use std::io::{Read, Result}; +/// # +/// # trait Ignore { +/// fn from_str<'a, T>(s: &'a str) -> Result +/// where +/// T: Deserialize<'a>; +/// +/// fn from_reader(rdr: R) -> Result +/// where +/// R: Read, +/// T: DeserializeOwned; +/// # } +/// ``` +/// +/// # Lifetime +/// +/// The relationship between `Deserialize` and `DeserializeOwned` in trait +/// bounds is explained in more detail on the page [Understanding deserializer +/// lifetimes]. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::DeserializeOwned` is not satisfied", + ) +)] +pub trait DeserializeOwned: for<'de> Deserialize<'de> {} +impl DeserializeOwned for T where T: for<'de> Deserialize<'de> {} + +/// `DeserializeSeed` is the stateful form of the `Deserialize` trait. If you +/// ever find yourself looking for a way to pass data into a `Deserialize` impl, +/// this trait is the way to do it. +/// +/// As one example of stateful deserialization consider deserializing a JSON +/// array into an existing buffer. Using the `Deserialize` trait we could +/// deserialize a JSON array into a `Vec` but it would be a freshly allocated +/// `Vec`; there is no way for `Deserialize` to reuse a previously allocated +/// buffer. Using `DeserializeSeed` instead makes this possible as in the +/// example code below. +/// +/// The canonical API for stateless deserialization looks like this: +/// +/// ```edition2021 +/// # use serde::Deserialize; +/// # +/// # enum Error {} +/// # +/// fn func<'de, T: Deserialize<'de>>() -> Result +/// # { +/// # unimplemented!() +/// # } +/// ``` +/// +/// Adjusting an API like this to support stateful deserialization is a matter +/// of accepting a seed as input: +/// +/// ```edition2021 +/// # use serde::de::DeserializeSeed; +/// # +/// # enum Error {} +/// # +/// fn func_seed<'de, T: DeserializeSeed<'de>>(seed: T) -> Result +/// # { +/// # let _ = seed; +/// # unimplemented!() +/// # } +/// ``` +/// +/// In practice the majority of deserialization is stateless. An API expecting a +/// seed can be appeased by passing `std::marker::PhantomData` as a seed in the +/// case of stateless deserialization. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by `Self::Value` when deserialized. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example +/// +/// Suppose we have JSON that looks like `[[1, 2], [3, 4, 5], [6]]` and we need +/// to deserialize it into a flat representation like `vec![1, 2, 3, 4, 5, 6]`. +/// Allocating a brand new `Vec` for each subarray would be slow. Instead we +/// would like to allocate a single `Vec` and then deserialize each subarray +/// into it. This requires stateful deserialization using the `DeserializeSeed` +/// trait. +/// +/// ```edition2021 +/// use serde::de::{Deserialize, DeserializeSeed, Deserializer, SeqAccess, Visitor}; +/// use std::fmt; +/// use std::marker::PhantomData; +/// +/// // A DeserializeSeed implementation that uses stateful deserialization to +/// // append array elements onto the end of an existing vector. The preexisting +/// // state ("seed") in this case is the Vec. The `deserialize` method of +/// // `ExtendVec` will be traversing the inner arrays of the JSON input and +/// // appending each integer into the existing Vec. +/// struct ExtendVec<'a, T: 'a>(&'a mut Vec); +/// +/// impl<'de, 'a, T> DeserializeSeed<'de> for ExtendVec<'a, T> +/// where +/// T: Deserialize<'de>, +/// { +/// // The return type of the `deserialize` method. This implementation +/// // appends onto an existing vector but does not create any new data +/// // structure, so the return type is (). +/// type Value = (); +/// +/// fn deserialize(self, deserializer: D) -> Result +/// where +/// D: Deserializer<'de>, +/// { +/// // Visitor implementation that will walk an inner array of the JSON +/// // input. +/// struct ExtendVecVisitor<'a, T: 'a>(&'a mut Vec); +/// +/// impl<'de, 'a, T> Visitor<'de> for ExtendVecVisitor<'a, T> +/// where +/// T: Deserialize<'de>, +/// { +/// type Value = (); +/// +/// fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// write!(formatter, "an array of integers") +/// } +/// +/// fn visit_seq(self, mut seq: A) -> Result<(), A::Error> +/// where +/// A: SeqAccess<'de>, +/// { +/// // Decrease the number of reallocations if there are many elements +/// if let Some(size_hint) = seq.size_hint() { +/// self.0.reserve(size_hint); +/// } +/// +/// // Visit each element in the inner array and push it onto +/// // the existing vector. +/// while let Some(elem) = seq.next_element()? { +/// self.0.push(elem); +/// } +/// Ok(()) +/// } +/// } +/// +/// deserializer.deserialize_seq(ExtendVecVisitor(self.0)) +/// } +/// } +/// +/// // Visitor implementation that will walk the outer array of the JSON input. +/// struct FlattenedVecVisitor(PhantomData); +/// +/// impl<'de, T> Visitor<'de> for FlattenedVecVisitor +/// where +/// T: Deserialize<'de>, +/// { +/// // This Visitor constructs a single Vec to hold the flattened +/// // contents of the inner arrays. +/// type Value = Vec; +/// +/// fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// write!(formatter, "an array of arrays") +/// } +/// +/// fn visit_seq(self, mut seq: A) -> Result, A::Error> +/// where +/// A: SeqAccess<'de>, +/// { +/// // Create a single Vec to hold the flattened contents. +/// let mut vec = Vec::new(); +/// +/// // Each iteration through this loop is one inner array. +/// while let Some(()) = seq.next_element_seed(ExtendVec(&mut vec))? { +/// // Nothing to do; inner array has been appended into `vec`. +/// } +/// +/// // Return the finished vec. +/// Ok(vec) +/// } +/// } +/// +/// # fn example<'de, D>(deserializer: D) -> Result<(), D::Error> +/// # where +/// # D: Deserializer<'de>, +/// # { +/// let visitor = FlattenedVecVisitor(PhantomData); +/// let flattened: Vec = deserializer.deserialize_seq(visitor)?; +/// # Ok(()) +/// # } +/// ``` +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::DeserializeSeed<'de>` is not satisfied", + ) +)] +pub trait DeserializeSeed<'de>: Sized { + /// The type produced by using this seed. + type Value; + + /// Equivalent to the more common `Deserialize::deserialize` method, except + /// with some initial piece of data (the seed) passed in. + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>; +} + +impl<'de, T> DeserializeSeed<'de> for PhantomData +where + T: Deserialize<'de>, +{ + type Value = T; + + #[inline] + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::deserialize(deserializer) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A **data format** that can deserialize any data structure supported by +/// Serde. +/// +/// The role of this trait is to define the deserialization half of the [Serde +/// data model], which is a way to categorize every Rust data type into one of +/// 29 possible types. Each method of the `Deserializer` trait corresponds to one +/// of the types of the data model. +/// +/// Implementations of `Deserialize` map themselves into this data model by +/// passing to the `Deserializer` a `Visitor` implementation that can receive +/// these various types. +/// +/// The types that make up the Serde data model are: +/// +/// - **14 primitive types** +/// - bool +/// - i8, i16, i32, i64, i128 +/// - u8, u16, u32, u64, u128 +/// - f32, f64 +/// - char +/// - **string** +/// - UTF-8 bytes with a length and no null terminator. +/// - When serializing, all strings are handled equally. When deserializing, +/// there are three flavors of strings: transient, owned, and borrowed. +/// - **byte array** - \[u8\] +/// - Similar to strings, during deserialization byte arrays can be +/// transient, owned, or borrowed. +/// - **option** +/// - Either none or some value. +/// - **unit** +/// - The type of `()` in Rust. It represents an anonymous value containing +/// no data. +/// - **unit_struct** +/// - For example `struct Unit` or `PhantomData`. It represents a named +/// value containing no data. +/// - **unit_variant** +/// - For example the `E::A` and `E::B` in `enum E { A, B }`. +/// - **newtype_struct** +/// - For example `struct Millimeters(u8)`. +/// - **newtype_variant** +/// - For example the `E::N` in `enum E { N(u8) }`. +/// - **seq** +/// - A variably sized heterogeneous sequence of values, for example `Vec` +/// or `HashSet`. When serializing, the length may or may not be known +/// before iterating through all the data. When deserializing, the length +/// is determined by looking at the serialized data. +/// - **tuple** +/// - A statically sized heterogeneous sequence of values for which the +/// length will be known at deserialization time without looking at the +/// serialized data, for example `(u8,)` or `(String, u64, Vec)` or +/// `[u64; 10]`. +/// - **tuple_struct** +/// - A named tuple, for example `struct Rgb(u8, u8, u8)`. +/// - **tuple_variant** +/// - For example the `E::T` in `enum E { T(u8, u8) }`. +/// - **map** +/// - A heterogeneous key-value pairing, for example `BTreeMap`. +/// - **struct** +/// - A heterogeneous key-value pairing in which the keys are strings and +/// will be known at deserialization time without looking at the serialized +/// data, for example `struct S { r: u8, g: u8, b: u8 }`. +/// - **struct_variant** +/// - For example the `E::S` in `enum E { S { r: u8, g: u8, b: u8 } }`. +/// +/// The `Deserializer` trait supports two entry point styles which enables +/// different kinds of deserialization. +/// +/// 1. The `deserialize_any` method. Self-describing data formats like JSON are +/// able to look at the serialized data and tell what it represents. For +/// example the JSON deserializer may see an opening curly brace (`{`) and +/// know that it is seeing a map. If the data format supports +/// `Deserializer::deserialize_any`, it will drive the Visitor using whatever +/// type it sees in the input. JSON uses this approach when deserializing +/// `serde_json::Value` which is an enum that can represent any JSON +/// document. Without knowing what is in a JSON document, we can deserialize +/// it to `serde_json::Value` by going through +/// `Deserializer::deserialize_any`. +/// +/// 2. The various `deserialize_*` methods. Non-self-describing formats like +/// Postcard need to be told what is in the input in order to deserialize it. +/// The `deserialize_*` methods are hints to the deserializer for how to +/// interpret the next piece of input. Non-self-describing formats are not +/// able to deserialize something like `serde_json::Value` which relies on +/// `Deserializer::deserialize_any`. +/// +/// When implementing `Deserialize`, you should avoid relying on +/// `Deserializer::deserialize_any` unless you need to be told by the +/// Deserializer what type is in the input. Know that relying on +/// `Deserializer::deserialize_any` means your data type will be able to +/// deserialize from self-describing formats only, ruling out Postcard and many +/// others. +/// +/// [Serde data model]: https://serde.rs/data-model.html +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed from the input when deserializing. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website contains example code for +/// a basic JSON `Deserializer`. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::Deserializer<'de>` is not satisfied", + ) +)] +pub trait Deserializer<'de>: Sized { + /// The error type that can be returned if some error occurs during + /// deserialization. + type Error: Error; + + /// Require the `Deserializer` to figure out how to drive the visitor based + /// on what data type is in the input. + /// + /// When implementing `Deserialize`, you should avoid relying on + /// `Deserializer::deserialize_any` unless you need to be told by the + /// Deserializer what type is in the input. Know that relying on + /// `Deserializer::deserialize_any` means your data type will be able to + /// deserialize from self-describing formats only, ruling out Postcard and + /// many others. + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `bool` value. + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `i8` value. + fn deserialize_i8(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `i16` value. + fn deserialize_i16(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `i32` value. + fn deserialize_i32(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `i64` value. + fn deserialize_i64(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `i128` value. + /// + /// The default behavior unconditionally returns an error. + fn deserialize_i128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let _ = visitor; + Err(Error::custom("i128 is not supported")) + } + + /// Hint that the `Deserialize` type is expecting a `u8` value. + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `u16` value. + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `u32` value. + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `u64` value. + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an `u128` value. + /// + /// The default behavior unconditionally returns an error. + fn deserialize_u128(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let _ = visitor; + Err(Error::custom("u128 is not supported")) + } + + /// Hint that the `Deserialize` type is expecting a `f32` value. + fn deserialize_f32(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `f64` value. + fn deserialize_f64(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a `char` value. + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a string value and does + /// not benefit from taking ownership of buffered data owned by the + /// `Deserializer`. + /// + /// If the `Visitor` would benefit from taking ownership of `String` data, + /// indicate this to the `Deserializer` by using `deserialize_string` + /// instead. + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a string value and would + /// benefit from taking ownership of buffered data owned by the + /// `Deserializer`. + /// + /// If the `Visitor` would not benefit from taking ownership of `String` + /// data, indicate that to the `Deserializer` by using `deserialize_str` + /// instead. + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a byte array and does not + /// benefit from taking ownership of buffered data owned by the + /// `Deserializer`. + /// + /// If the `Visitor` would benefit from taking ownership of `Vec` data, + /// indicate this to the `Deserializer` by using `deserialize_byte_buf` + /// instead. + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a byte array and would + /// benefit from taking ownership of buffered data owned by the + /// `Deserializer`. + /// + /// If the `Visitor` would not benefit from taking ownership of `Vec` + /// data, indicate that to the `Deserializer` by using `deserialize_bytes` + /// instead. + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an optional value. + /// + /// This allows deserializers that encode an optional value as a nullable + /// value to convert the null value into `None` and a regular value into + /// `Some(value)`. + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a unit value. + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a unit struct with a + /// particular name. + fn deserialize_unit_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a newtype struct with a + /// particular name. + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a sequence of values. + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a sequence of values and + /// knows how many values there are without looking at the serialized data. + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a tuple struct with a + /// particular name and number of fields. + fn deserialize_tuple_struct( + self, + name: &'static str, + len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a map of key-value pairs. + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting a struct with a particular + /// name and fields. + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting an enum value with a + /// particular name and possible variants. + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type is expecting the name of a struct + /// field or the discriminant of an enum variant. + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Hint that the `Deserialize` type needs to deserialize a value whose type + /// doesn't matter because it is ignored. + /// + /// Deserializers for non-self-describing formats may not support this mode. + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Determine whether `Deserialize` implementations should expect to + /// deserialize their human-readable form. + /// + /// Some types have a human-readable form that may be somewhat expensive to + /// construct, as well as a binary form that is compact and efficient. + /// Generally text-based formats like JSON and YAML will prefer to use the + /// human-readable one and binary formats like Postcard will prefer the + /// compact one. + /// + /// ```edition2021 + /// # use std::ops::Add; + /// # use std::str::FromStr; + /// # + /// # struct Timestamp; + /// # + /// # impl Timestamp { + /// # const EPOCH: Timestamp = Timestamp; + /// # } + /// # + /// # impl FromStr for Timestamp { + /// # type Err = String; + /// # fn from_str(_: &str) -> Result { + /// # unimplemented!() + /// # } + /// # } + /// # + /// # struct Duration; + /// # + /// # impl Duration { + /// # fn seconds(_: u64) -> Self { unimplemented!() } + /// # } + /// # + /// # impl Add for Timestamp { + /// # type Output = Timestamp; + /// # fn add(self, _: Duration) -> Self::Output { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::de::{self, Deserialize, Deserializer}; + /// + /// impl<'de> Deserialize<'de> for Timestamp { + /// fn deserialize(deserializer: D) -> Result + /// where + /// D: Deserializer<'de>, + /// { + /// if deserializer.is_human_readable() { + /// // Deserialize from a human-readable string like "2015-05-15T17:01:00Z". + /// let s = String::deserialize(deserializer)?; + /// Timestamp::from_str(&s).map_err(de::Error::custom) + /// } else { + /// // Deserialize from a compact binary representation, seconds since + /// // the Unix epoch. + /// let n = u64::deserialize(deserializer)?; + /// Ok(Timestamp::EPOCH + Duration::seconds(n)) + /// } + /// } + /// } + /// ``` + /// + /// The default implementation of this method returns `true`. Data formats + /// may override this to `false` to request a compact form for types that + /// support one. Note that modifying this method to change a format from + /// human-readable to compact or vice versa should be regarded as a breaking + /// change, as a value serialized in human-readable mode is not required to + /// deserialize from the same data in compact mode. + #[inline] + fn is_human_readable(&self) -> bool { + true + } + + // Not public API. + #[cfg(all(not(no_serde_derive), any(feature = "std", feature = "alloc")))] + #[doc(hidden)] + fn __deserialize_content_v1(self, visitor: V) -> Result + where + V: Visitor<'de, Value = crate::private::Content<'de>>, + { + self.deserialize_any(visitor) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// This trait represents a visitor that walks through a deserializer. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the requirement for lifetime of data +/// that may be borrowed by `Self::Value`. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example +/// +/// ```edition2021 +/// # use serde::de::{self, Unexpected, Visitor}; +/// # use std::fmt; +/// # +/// /// A visitor that deserializes a long string - a string containing at least +/// /// some minimum number of bytes. +/// struct LongString { +/// min: usize, +/// } +/// +/// impl<'de> Visitor<'de> for LongString { +/// type Value = String; +/// +/// fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +/// write!(formatter, "a string containing at least {} bytes", self.min) +/// } +/// +/// fn visit_str(self, s: &str) -> Result +/// where +/// E: de::Error, +/// { +/// if s.len() >= self.min { +/// Ok(s.to_owned()) +/// } else { +/// Err(de::Error::invalid_value(Unexpected::Str(s), &self)) +/// } +/// } +/// } +/// ``` +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::Visitor<'de>` is not satisfied", + ) +)] +pub trait Visitor<'de>: Sized { + /// The value produced by this visitor. + type Value; + + /// Format a message stating what data this Visitor expects to receive. + /// + /// This is used in error messages. The message should complete the sentence + /// "This Visitor expects to receive ...", for example the message could be + /// "an integer between 0 and 64". The message should not be capitalized and + /// should not end with a period. + /// + /// ```edition2021 + /// # use std::fmt; + /// # + /// # struct S { + /// # max: usize, + /// # } + /// # + /// # impl<'de> serde::de::Visitor<'de> for S { + /// # type Value = (); + /// # + /// fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// write!(formatter, "an integer between 0 and {}", self.max) + /// } + /// # } + /// ``` + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result; + + /// The input contains a boolean. + /// + /// The default implementation fails with a type error. + fn visit_bool(self, v: bool) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Bool(v), &self)) + } + + /// The input contains an `i8`. + /// + /// The default implementation forwards to [`visit_i64`]. + /// + /// [`visit_i64`]: #method.visit_i64 + fn visit_i8(self, v: i8) -> Result + where + E: Error, + { + self.visit_i64(v as i64) + } + + /// The input contains an `i16`. + /// + /// The default implementation forwards to [`visit_i64`]. + /// + /// [`visit_i64`]: #method.visit_i64 + fn visit_i16(self, v: i16) -> Result + where + E: Error, + { + self.visit_i64(v as i64) + } + + /// The input contains an `i32`. + /// + /// The default implementation forwards to [`visit_i64`]. + /// + /// [`visit_i64`]: #method.visit_i64 + fn visit_i32(self, v: i32) -> Result + where + E: Error, + { + self.visit_i64(v as i64) + } + + /// The input contains an `i64`. + /// + /// The default implementation fails with a type error. + fn visit_i64(self, v: i64) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Signed(v), &self)) + } + + /// The input contains a `i128`. + /// + /// The default implementation fails with a type error. + fn visit_i128(self, v: i128) -> Result + where + E: Error, + { + let mut buf = [0u8; 58]; + let mut writer = crate::format::Buf::new(&mut buf); + fmt::Write::write_fmt(&mut writer, format_args!("integer `{}` as i128", v)).unwrap(); + Err(Error::invalid_type( + Unexpected::Other(writer.as_str()), + &self, + )) + } + + /// The input contains a `u8`. + /// + /// The default implementation forwards to [`visit_u64`]. + /// + /// [`visit_u64`]: #method.visit_u64 + fn visit_u8(self, v: u8) -> Result + where + E: Error, + { + self.visit_u64(v as u64) + } + + /// The input contains a `u16`. + /// + /// The default implementation forwards to [`visit_u64`]. + /// + /// [`visit_u64`]: #method.visit_u64 + fn visit_u16(self, v: u16) -> Result + where + E: Error, + { + self.visit_u64(v as u64) + } + + /// The input contains a `u32`. + /// + /// The default implementation forwards to [`visit_u64`]. + /// + /// [`visit_u64`]: #method.visit_u64 + fn visit_u32(self, v: u32) -> Result + where + E: Error, + { + self.visit_u64(v as u64) + } + + /// The input contains a `u64`. + /// + /// The default implementation fails with a type error. + fn visit_u64(self, v: u64) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Unsigned(v), &self)) + } + + /// The input contains a `u128`. + /// + /// The default implementation fails with a type error. + fn visit_u128(self, v: u128) -> Result + where + E: Error, + { + let mut buf = [0u8; 57]; + let mut writer = crate::format::Buf::new(&mut buf); + fmt::Write::write_fmt(&mut writer, format_args!("integer `{}` as u128", v)).unwrap(); + Err(Error::invalid_type( + Unexpected::Other(writer.as_str()), + &self, + )) + } + + /// The input contains an `f32`. + /// + /// The default implementation forwards to [`visit_f64`]. + /// + /// [`visit_f64`]: #method.visit_f64 + fn visit_f32(self, v: f32) -> Result + where + E: Error, + { + self.visit_f64(v as f64) + } + + /// The input contains an `f64`. + /// + /// The default implementation fails with a type error. + fn visit_f64(self, v: f64) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Float(v), &self)) + } + + /// The input contains a `char`. + /// + /// The default implementation forwards to [`visit_str`] as a one-character + /// string. + /// + /// [`visit_str`]: #method.visit_str + #[inline] + fn visit_char(self, v: char) -> Result + where + E: Error, + { + self.visit_str(v.encode_utf8(&mut [0u8; 4])) + } + + /// The input contains a string. The lifetime of the string is ephemeral and + /// it may be destroyed after this method returns. + /// + /// This method allows the `Deserializer` to avoid a copy by retaining + /// ownership of any buffered data. `Deserialize` implementations that do + /// not benefit from taking ownership of `String` data should indicate that + /// to the deserializer by using `Deserializer::deserialize_str` rather than + /// `Deserializer::deserialize_string`. + /// + /// It is never correct to implement `visit_string` without implementing + /// `visit_str`. Implement neither, both, or just `visit_str`. + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Str(v), &self)) + } + + /// The input contains a string that lives at least as long as the + /// `Deserializer`. + /// + /// This enables zero-copy deserialization of strings in some formats. For + /// example JSON input containing the JSON string `"borrowed"` can be + /// deserialized with zero copying into a `&'a str` as long as the input + /// data outlives `'a`. + /// + /// The default implementation forwards to `visit_str`. + #[inline] + fn visit_borrowed_str(self, v: &'de str) -> Result + where + E: Error, + { + self.visit_str(v) + } + + /// The input contains a string and ownership of the string is being given + /// to the `Visitor`. + /// + /// This method allows the `Visitor` to avoid a copy by taking ownership of + /// a string created by the `Deserializer`. `Deserialize` implementations + /// that benefit from taking ownership of `String` data should indicate that + /// to the deserializer by using `Deserializer::deserialize_string` rather + /// than `Deserializer::deserialize_str`, although not every deserializer + /// will honor such a request. + /// + /// It is never correct to implement `visit_string` without implementing + /// `visit_str`. Implement neither, both, or just `visit_str`. + /// + /// The default implementation forwards to `visit_str` and then drops the + /// `String`. + #[inline] + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + fn visit_string(self, v: String) -> Result + where + E: Error, + { + self.visit_str(&v) + } + + /// The input contains a byte array. The lifetime of the byte array is + /// ephemeral and it may be destroyed after this method returns. + /// + /// This method allows the `Deserializer` to avoid a copy by retaining + /// ownership of any buffered data. `Deserialize` implementations that do + /// not benefit from taking ownership of `Vec` data should indicate that + /// to the deserializer by using `Deserializer::deserialize_bytes` rather + /// than `Deserializer::deserialize_byte_buf`. + /// + /// It is never correct to implement `visit_byte_buf` without implementing + /// `visit_bytes`. Implement neither, both, or just `visit_bytes`. + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Bytes(v), &self)) + } + + /// The input contains a byte array that lives at least as long as the + /// `Deserializer`. + /// + /// This enables zero-copy deserialization of bytes in some formats. For + /// example Postcard data containing bytes can be deserialized with zero + /// copying into a `&'a [u8]` as long as the input data outlives `'a`. + /// + /// The default implementation forwards to `visit_bytes`. + #[inline] + fn visit_borrowed_bytes(self, v: &'de [u8]) -> Result + where + E: Error, + { + self.visit_bytes(v) + } + + /// The input contains a byte array and ownership of the byte array is being + /// given to the `Visitor`. + /// + /// This method allows the `Visitor` to avoid a copy by taking ownership of + /// a byte buffer created by the `Deserializer`. `Deserialize` + /// implementations that benefit from taking ownership of `Vec` data + /// should indicate that to the deserializer by using + /// `Deserializer::deserialize_byte_buf` rather than + /// `Deserializer::deserialize_bytes`, although not every deserializer will + /// honor such a request. + /// + /// It is never correct to implement `visit_byte_buf` without implementing + /// `visit_bytes`. Implement neither, both, or just `visit_bytes`. + /// + /// The default implementation forwards to `visit_bytes` and then drops the + /// `Vec`. + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + self.visit_bytes(&v) + } + + /// The input contains an optional that is absent. + /// + /// The default implementation fails with a type error. + fn visit_none(self) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Option, &self)) + } + + /// The input contains an optional that is present. + /// + /// The default implementation fails with a type error. + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let _ = deserializer; + Err(Error::invalid_type(Unexpected::Option, &self)) + } + + /// The input contains a unit `()`. + /// + /// The default implementation fails with a type error. + fn visit_unit(self) -> Result + where + E: Error, + { + Err(Error::invalid_type(Unexpected::Unit, &self)) + } + + /// The input contains a newtype struct. + /// + /// The content of the newtype struct may be read from the given + /// `Deserializer`. + /// + /// The default implementation fails with a type error. + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let _ = deserializer; + Err(Error::invalid_type(Unexpected::NewtypeStruct, &self)) + } + + /// The input contains a sequence of elements. + /// + /// The default implementation fails with a type error. + fn visit_seq(self, seq: A) -> Result + where + A: SeqAccess<'de>, + { + let _ = seq; + Err(Error::invalid_type(Unexpected::Seq, &self)) + } + + /// The input contains a key-value map. + /// + /// The default implementation fails with a type error. + fn visit_map(self, map: A) -> Result + where + A: MapAccess<'de>, + { + let _ = map; + Err(Error::invalid_type(Unexpected::Map, &self)) + } + + /// The input contains an enum. + /// + /// The default implementation fails with a type error. + fn visit_enum(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + let _ = data; + Err(Error::invalid_type(Unexpected::Enum, &self)) + } + + // Used when deserializing a flattened Option field. Not public API. + #[doc(hidden)] + fn __private_visit_untagged_option(self, _: D) -> Result + where + D: Deserializer<'de>, + { + Err(()) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Provides a `Visitor` access to each element of a sequence in the input. +/// +/// This is a trait that a `Deserializer` passes to a `Visitor` implementation, +/// which deserializes each item in a sequence. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by deserialized sequence elements. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SeqAccess` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::SeqAccess<'de>` is not satisfied", + ) +)] +pub trait SeqAccess<'de> { + /// The error type that can be returned if some error occurs during + /// deserialization. + type Error: Error; + + /// This returns `Ok(Some(value))` for the next value in the sequence, or + /// `Ok(None)` if there are no more remaining items. + /// + /// `Deserialize` implementations should typically use + /// `SeqAccess::next_element` instead. + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>; + + /// This returns `Ok(Some(value))` for the next value in the sequence, or + /// `Ok(None)` if there are no more remaining items. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `SeqAccess` implementations should not override the default behavior. + #[inline] + fn next_element(&mut self) -> Result, Self::Error> + where + T: Deserialize<'de>, + { + self.next_element_seed(PhantomData) + } + + /// Returns the number of elements remaining in the sequence, if known. + #[inline] + fn size_hint(&self) -> Option { + None + } +} + +impl<'de, A> SeqAccess<'de> for &mut A +where + A: ?Sized + SeqAccess<'de>, +{ + type Error = A::Error; + + #[inline] + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + (**self).next_element_seed(seed) + } + + #[inline] + fn next_element(&mut self) -> Result, Self::Error> + where + T: Deserialize<'de>, + { + (**self).next_element() + } + + #[inline] + fn size_hint(&self) -> Option { + (**self).size_hint() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Provides a `Visitor` access to each entry of a map in the input. +/// +/// This is a trait that a `Deserializer` passes to a `Visitor` implementation. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by deserialized map entries. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `MapAccess` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::MapAccess<'de>` is not satisfied", + ) +)] +pub trait MapAccess<'de> { + /// The error type that can be returned if some error occurs during + /// deserialization. + type Error: Error; + + /// This returns `Ok(Some(key))` for the next key in the map, or `Ok(None)` + /// if there are no more remaining entries. + /// + /// `Deserialize` implementations should typically use + /// `MapAccess::next_key` or `MapAccess::next_entry` instead. + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: DeserializeSeed<'de>; + + /// This returns a `Ok(value)` for the next value in the map. + /// + /// `Deserialize` implementations should typically use + /// `MapAccess::next_value` instead. + /// + /// # Panics + /// + /// Calling `next_value_seed` before `next_key_seed` is incorrect and is + /// allowed to panic or return bogus results. + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>; + + /// This returns `Ok(Some((key, value)))` for the next (key-value) pair in + /// the map, or `Ok(None)` if there are no more remaining items. + /// + /// `MapAccess` implementations should override the default behavior if a + /// more efficient implementation is possible. + /// + /// `Deserialize` implementations should typically use + /// `MapAccess::next_entry` instead. + #[inline] + fn next_entry_seed( + &mut self, + kseed: K, + vseed: V, + ) -> Result, Self::Error> + where + K: DeserializeSeed<'de>, + V: DeserializeSeed<'de>, + { + match tri!(self.next_key_seed(kseed)) { + Some(key) => { + let value = tri!(self.next_value_seed(vseed)); + Ok(Some((key, value))) + } + None => Ok(None), + } + } + + /// This returns `Ok(Some(key))` for the next key in the map, or `Ok(None)` + /// if there are no more remaining entries. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `MapAccess` implementations should not override the default behavior. + #[inline] + fn next_key(&mut self) -> Result, Self::Error> + where + K: Deserialize<'de>, + { + self.next_key_seed(PhantomData) + } + + /// This returns a `Ok(value)` for the next value in the map. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `MapAccess` implementations should not override the default behavior. + /// + /// # Panics + /// + /// Calling `next_value` before `next_key` is incorrect and is allowed to + /// panic or return bogus results. + #[inline] + fn next_value(&mut self) -> Result + where + V: Deserialize<'de>, + { + self.next_value_seed(PhantomData) + } + + /// This returns `Ok(Some((key, value)))` for the next (key-value) pair in + /// the map, or `Ok(None)` if there are no more remaining items. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `MapAccess` implementations should not override the default behavior. + #[inline] + fn next_entry(&mut self) -> Result, Self::Error> + where + K: Deserialize<'de>, + V: Deserialize<'de>, + { + self.next_entry_seed(PhantomData, PhantomData) + } + + /// Returns the number of entries remaining in the map, if known. + #[inline] + fn size_hint(&self) -> Option { + None + } +} + +impl<'de, A> MapAccess<'de> for &mut A +where + A: ?Sized + MapAccess<'de>, +{ + type Error = A::Error; + + #[inline] + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: DeserializeSeed<'de>, + { + (**self).next_key_seed(seed) + } + + #[inline] + fn next_value_seed(&mut self, seed: V) -> Result + where + V: DeserializeSeed<'de>, + { + (**self).next_value_seed(seed) + } + + #[inline] + fn next_entry_seed( + &mut self, + kseed: K, + vseed: V, + ) -> Result, Self::Error> + where + K: DeserializeSeed<'de>, + V: DeserializeSeed<'de>, + { + (**self).next_entry_seed(kseed, vseed) + } + + #[inline] + fn next_entry(&mut self) -> Result, Self::Error> + where + K: Deserialize<'de>, + V: Deserialize<'de>, + { + (**self).next_entry() + } + + #[inline] + fn next_key(&mut self) -> Result, Self::Error> + where + K: Deserialize<'de>, + { + (**self).next_key() + } + + #[inline] + fn next_value(&mut self) -> Result + where + V: Deserialize<'de>, + { + (**self).next_value() + } + + #[inline] + fn size_hint(&self) -> Option { + (**self).size_hint() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Provides a `Visitor` access to the data of an enum in the input. +/// +/// `EnumAccess` is created by the `Deserializer` and passed to the +/// `Visitor` in order to identify which variant of an enum to deserialize. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by the deserialized enum variant. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `EnumAccess` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::EnumAccess<'de>` is not satisfied", + ) +)] +pub trait EnumAccess<'de>: Sized { + /// The error type that can be returned if some error occurs during + /// deserialization. + type Error: Error; + /// The `Visitor` that will be used to deserialize the content of the enum + /// variant. + type Variant: VariantAccess<'de, Error = Self::Error>; + + /// `variant` is called to identify which variant to deserialize. + /// + /// `Deserialize` implementations should typically use `EnumAccess::variant` + /// instead. + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: DeserializeSeed<'de>; + + /// `variant` is called to identify which variant to deserialize. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `EnumAccess` implementations should not override the default behavior. + #[inline] + fn variant(self) -> Result<(V, Self::Variant), Self::Error> + where + V: Deserialize<'de>, + { + self.variant_seed(PhantomData) + } +} + +/// `VariantAccess` is a visitor that is created by the `Deserializer` and +/// passed to the `Deserialize` to deserialize the content of a particular enum +/// variant. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed by the deserialized enum variant. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `VariantAccess` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::de::VariantAccess<'de>` is not satisfied", + ) +)] +pub trait VariantAccess<'de>: Sized { + /// The error type that can be returned if some error occurs during + /// deserialization. Must match the error type of our `EnumAccess`. + type Error: Error; + + /// Called when deserializing a variant with no values. + /// + /// If the data contains a different type of variant, the following + /// `invalid_type` error should be constructed: + /// + /// ```edition2021 + /// # use serde::de::{self, value, DeserializeSeed, Visitor, VariantAccess, Unexpected}; + /// # + /// # struct X; + /// # + /// # impl<'de> VariantAccess<'de> for X { + /// # type Error = value::Error; + /// # + /// fn unit_variant(self) -> Result<(), Self::Error> { + /// // What the data actually contained; suppose it is a tuple variant. + /// let unexp = Unexpected::TupleVariant; + /// Err(de::Error::invalid_type(unexp, &"unit variant")) + /// } + /// # + /// # fn newtype_variant_seed(self, _: T) -> Result + /// # where + /// # T: DeserializeSeed<'de>, + /// # { unimplemented!() } + /// # + /// # fn tuple_variant(self, _: usize, _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # + /// # fn struct_variant(self, _: &[&str], _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # } + /// ``` + fn unit_variant(self) -> Result<(), Self::Error>; + + /// Called when deserializing a variant with a single value. + /// + /// `Deserialize` implementations should typically use + /// `VariantAccess::newtype_variant` instead. + /// + /// If the data contains a different type of variant, the following + /// `invalid_type` error should be constructed: + /// + /// ```edition2021 + /// # use serde::de::{self, value, DeserializeSeed, Visitor, VariantAccess, Unexpected}; + /// # + /// # struct X; + /// # + /// # impl<'de> VariantAccess<'de> for X { + /// # type Error = value::Error; + /// # + /// # fn unit_variant(self) -> Result<(), Self::Error> { + /// # unimplemented!() + /// # } + /// # + /// fn newtype_variant_seed(self, _seed: T) -> Result + /// where + /// T: DeserializeSeed<'de>, + /// { + /// // What the data actually contained; suppose it is a unit variant. + /// let unexp = Unexpected::UnitVariant; + /// Err(de::Error::invalid_type(unexp, &"newtype variant")) + /// } + /// # + /// # fn tuple_variant(self, _: usize, _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # + /// # fn struct_variant(self, _: &[&str], _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # } + /// ``` + fn newtype_variant_seed(self, seed: T) -> Result + where + T: DeserializeSeed<'de>; + + /// Called when deserializing a variant with a single value. + /// + /// This method exists as a convenience for `Deserialize` implementations. + /// `VariantAccess` implementations should not override the default + /// behavior. + #[inline] + fn newtype_variant(self) -> Result + where + T: Deserialize<'de>, + { + self.newtype_variant_seed(PhantomData) + } + + /// Called when deserializing a tuple-like variant. + /// + /// The `len` is the number of fields expected in the tuple variant. + /// + /// If the data contains a different type of variant, the following + /// `invalid_type` error should be constructed: + /// + /// ```edition2021 + /// # use serde::de::{self, value, DeserializeSeed, Visitor, VariantAccess, Unexpected}; + /// # + /// # struct X; + /// # + /// # impl<'de> VariantAccess<'de> for X { + /// # type Error = value::Error; + /// # + /// # fn unit_variant(self) -> Result<(), Self::Error> { + /// # unimplemented!() + /// # } + /// # + /// # fn newtype_variant_seed(self, _: T) -> Result + /// # where + /// # T: DeserializeSeed<'de>, + /// # { unimplemented!() } + /// # + /// fn tuple_variant(self, _len: usize, _visitor: V) -> Result + /// where + /// V: Visitor<'de>, + /// { + /// // What the data actually contained; suppose it is a unit variant. + /// let unexp = Unexpected::UnitVariant; + /// Err(de::Error::invalid_type(unexp, &"tuple variant")) + /// } + /// # + /// # fn struct_variant(self, _: &[&str], _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # } + /// ``` + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>; + + /// Called when deserializing a struct-like variant. + /// + /// The `fields` are the names of the fields of the struct variant. + /// + /// If the data contains a different type of variant, the following + /// `invalid_type` error should be constructed: + /// + /// ```edition2021 + /// # use serde::de::{self, value, DeserializeSeed, Visitor, VariantAccess, Unexpected}; + /// # + /// # struct X; + /// # + /// # impl<'de> VariantAccess<'de> for X { + /// # type Error = value::Error; + /// # + /// # fn unit_variant(self) -> Result<(), Self::Error> { + /// # unimplemented!() + /// # } + /// # + /// # fn newtype_variant_seed(self, _: T) -> Result + /// # where + /// # T: DeserializeSeed<'de>, + /// # { unimplemented!() } + /// # + /// # fn tuple_variant(self, _: usize, _: V) -> Result + /// # where + /// # V: Visitor<'de>, + /// # { unimplemented!() } + /// # + /// fn struct_variant( + /// self, + /// _fields: &'static [&'static str], + /// _visitor: V, + /// ) -> Result + /// where + /// V: Visitor<'de>, + /// { + /// // What the data actually contained; suppose it is a unit variant. + /// let unexp = Unexpected::UnitVariant; + /// Err(de::Error::invalid_type(unexp, &"struct variant")) + /// } + /// # } + /// ``` + fn struct_variant( + self, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>; +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Converts an existing value into a `Deserializer` from which other values can +/// be deserialized. +/// +/// # Lifetime +/// +/// The `'de` lifetime of this trait is the lifetime of data that may be +/// borrowed from the resulting `Deserializer`. See the page [Understanding +/// deserializer lifetimes] for a more detailed explanation of these lifetimes. +/// +/// [Understanding deserializer lifetimes]: https://serde.rs/lifetimes.html +/// +/// # Example +/// +/// ```edition2021 +/// use serde::de::{value, Deserialize, IntoDeserializer}; +/// use serde_derive::Deserialize; +/// use std::str::FromStr; +/// +/// #[derive(Deserialize)] +/// enum Setting { +/// On, +/// Off, +/// } +/// +/// impl FromStr for Setting { +/// type Err = value::Error; +/// +/// fn from_str(s: &str) -> Result { +/// Self::deserialize(s.into_deserializer()) +/// } +/// } +/// ``` +pub trait IntoDeserializer<'de, E: Error = value::Error> { + /// The type of the deserializer being converted into. + type Deserializer: Deserializer<'de, Error = E>; + + /// Convert this value into a deserializer. + fn into_deserializer(self) -> Self::Deserializer; +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Used in error messages. +/// +/// - expected `a` +/// - expected `a` or `b` +/// - expected one of `a`, `b`, `c` +/// +/// The slice of names must not be empty. +struct OneOf { + names: &'static [&'static str], +} + +impl Display for OneOf { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self.names.len() { + 0 => panic!(), // special case elsewhere + 1 => write!(formatter, "`{}`", self.names[0]), + 2 => write!(formatter, "`{}` or `{}`", self.names[0], self.names[1]), + _ => { + tri!(formatter.write_str("one of ")); + for (i, alt) in self.names.iter().enumerate() { + if i > 0 { + tri!(formatter.write_str(", ")); + } + tri!(write!(formatter, "`{}`", alt)); + } + Ok(()) + } + } + } +} + +struct WithDecimalPoint(f64); + +impl Display for WithDecimalPoint { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + struct LookForDecimalPoint<'f, 'a> { + formatter: &'f mut fmt::Formatter<'a>, + has_decimal_point: bool, + } + + impl<'f, 'a> fmt::Write for LookForDecimalPoint<'f, 'a> { + fn write_str(&mut self, fragment: &str) -> fmt::Result { + self.has_decimal_point |= fragment.contains('.'); + self.formatter.write_str(fragment) + } + + fn write_char(&mut self, ch: char) -> fmt::Result { + self.has_decimal_point |= ch == '.'; + self.formatter.write_char(ch) + } + } + + if self.0.is_finite() { + let mut writer = LookForDecimalPoint { + formatter, + has_decimal_point: false, + }; + tri!(write!(writer, "{}", self.0)); + if !writer.has_decimal_point { + tri!(formatter.write_str(".0")); + } + } else { + tri!(write!(formatter, "{}", self.0)); + } + Ok(()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/value.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/value.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d5475def127eb1daaeb0a4e991b07a4ee33867d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/de/value.rs @@ -0,0 +1,1895 @@ +//! Building blocks for deserializing basic values using the `IntoDeserializer` +//! trait. +//! +//! ```edition2021 +//! use serde::de::{value, Deserialize, IntoDeserializer}; +//! use serde_derive::Deserialize; +//! use std::str::FromStr; +//! +//! #[derive(Deserialize)] +//! enum Setting { +//! On, +//! Off, +//! } +//! +//! impl FromStr for Setting { +//! type Err = value::Error; +//! +//! fn from_str(s: &str) -> Result { +//! Self::deserialize(s.into_deserializer()) +//! } +//! } +//! ``` + +use crate::lib::*; + +use self::private::{First, Second}; +use crate::de::{self, Deserializer, Expected, IntoDeserializer, SeqAccess, Visitor}; +use crate::private::size_hint; +use crate::ser; + +//////////////////////////////////////////////////////////////////////////////// + +// For structs that contain a PhantomData. We do not want the trait +// bound `E: Clone` inferred by derive(Clone). +macro_rules! impl_copy_clone { + ($ty:ident $(<$lifetime:tt>)*) => { + impl<$($lifetime,)* E> Copy for $ty<$($lifetime,)* E> {} + + impl<$($lifetime,)* E> Clone for $ty<$($lifetime,)* E> { + fn clone(&self) -> Self { + *self + } + } + }; +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A minimal representation of all possible errors that can occur using the +/// `IntoDeserializer` trait. +#[derive(Clone, PartialEq)] +pub struct Error { + err: ErrorImpl, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +type ErrorImpl = Box; +#[cfg(not(any(feature = "std", feature = "alloc")))] +type ErrorImpl = (); + +impl de::Error for Error { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cold] + fn custom(msg: T) -> Self + where + T: Display, + { + Error { + err: msg.to_string().into_boxed_str(), + } + } + + #[cfg(not(any(feature = "std", feature = "alloc")))] + #[cold] + fn custom(msg: T) -> Self + where + T: Display, + { + let _ = msg; + Error { err: () } + } +} + +impl ser::Error for Error { + #[cold] + fn custom(msg: T) -> Self + where + T: Display, + { + de::Error::custom(msg) + } +} + +impl Display for Error { + #[cfg(any(feature = "std", feature = "alloc"))] + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(&self.err) + } + + #[cfg(not(any(feature = "std", feature = "alloc")))] + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Serde deserialization error") + } +} + +impl Debug for Error { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let mut debug = formatter.debug_tuple("Error"); + #[cfg(any(feature = "std", feature = "alloc"))] + debug.field(&self.err); + debug.finish() + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl error::Error for Error { + fn description(&self) -> &str { + &self.err + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl<'de, E> IntoDeserializer<'de, E> for () +where + E: de::Error, +{ + type Deserializer = UnitDeserializer; + + fn into_deserializer(self) -> UnitDeserializer { + UnitDeserializer::new() + } +} + +/// A deserializer holding a `()`. +pub struct UnitDeserializer { + marker: PhantomData, +} + +impl_copy_clone!(UnitDeserializer); + +impl UnitDeserializer { + #[allow(missing_docs)] + pub fn new() -> Self { + UnitDeserializer { + marker: PhantomData, + } + } +} + +impl<'de, E> de::Deserializer<'de> for UnitDeserializer +where + E: de::Error, +{ + type Error = E; + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf unit unit_struct newtype_struct seq tuple tuple_struct + map struct enum identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_none() + } +} + +impl<'de, E> IntoDeserializer<'de, E> for UnitDeserializer +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl Debug for UnitDeserializer { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.debug_struct("UnitDeserializer").finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer that cannot be instantiated. +#[cfg(feature = "unstable")] +#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))] +pub struct NeverDeserializer { + never: !, + marker: PhantomData, +} + +#[cfg(feature = "unstable")] +#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))] +impl<'de, E> IntoDeserializer<'de, E> for ! +where + E: de::Error, +{ + type Deserializer = NeverDeserializer; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +#[cfg(feature = "unstable")] +impl<'de, E> de::Deserializer<'de> for NeverDeserializer +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, _visitor: V) -> Result + where + V: de::Visitor<'de>, + { + self.never + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +#[cfg(feature = "unstable")] +impl<'de, E> IntoDeserializer<'de, E> for NeverDeserializer +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! primitive_deserializer { + ($ty:ty, $doc:tt, $name:ident, $method:ident $($cast:tt)*) => { + #[doc = "A deserializer holding"] + #[doc = $doc] + pub struct $name { + value: $ty, + marker: PhantomData + } + + impl_copy_clone!($name); + + impl<'de, E> IntoDeserializer<'de, E> for $ty + where + E: de::Error, + { + type Deserializer = $name; + + fn into_deserializer(self) -> $name { + $name::new(self) + } + } + + impl $name { + #[allow(missing_docs)] + pub fn new(value: $ty) -> Self { + $name { + value, + marker: PhantomData, + } + } + } + + impl<'de, E> de::Deserializer<'de> for $name + where + E: de::Error, + { + type Error = E; + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str + string bytes byte_buf option unit unit_struct newtype_struct seq + tuple tuple_struct map struct enum identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.$method(self.value $($cast)*) + } + } + + impl<'de, E> IntoDeserializer<'de, E> for $name + where + E: de::Error, + { + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } + } + + impl Debug for $name { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct(stringify!($name)) + .field("value", &self.value) + .finish() + } + } + } +} + +primitive_deserializer!(bool, "a `bool`.", BoolDeserializer, visit_bool); +primitive_deserializer!(i8, "an `i8`.", I8Deserializer, visit_i8); +primitive_deserializer!(i16, "an `i16`.", I16Deserializer, visit_i16); +primitive_deserializer!(i32, "an `i32`.", I32Deserializer, visit_i32); +primitive_deserializer!(i64, "an `i64`.", I64Deserializer, visit_i64); +primitive_deserializer!(i128, "an `i128`.", I128Deserializer, visit_i128); +primitive_deserializer!(isize, "an `isize`.", IsizeDeserializer, visit_i64 as i64); +primitive_deserializer!(u8, "a `u8`.", U8Deserializer, visit_u8); +primitive_deserializer!(u16, "a `u16`.", U16Deserializer, visit_u16); +primitive_deserializer!(u64, "a `u64`.", U64Deserializer, visit_u64); +primitive_deserializer!(u128, "a `u128`.", U128Deserializer, visit_u128); +primitive_deserializer!(usize, "a `usize`.", UsizeDeserializer, visit_u64 as u64); +primitive_deserializer!(f32, "an `f32`.", F32Deserializer, visit_f32); +primitive_deserializer!(f64, "an `f64`.", F64Deserializer, visit_f64); +primitive_deserializer!(char, "a `char`.", CharDeserializer, visit_char); + +/// A deserializer holding a `u32`. +pub struct U32Deserializer { + value: u32, + marker: PhantomData, +} + +impl_copy_clone!(U32Deserializer); + +impl<'de, E> IntoDeserializer<'de, E> for u32 +where + E: de::Error, +{ + type Deserializer = U32Deserializer; + + fn into_deserializer(self) -> U32Deserializer { + U32Deserializer::new(self) + } +} + +impl U32Deserializer { + #[allow(missing_docs)] + pub fn new(value: u32) -> Self { + U32Deserializer { + value, + marker: PhantomData, + } + } +} + +impl<'de, E> de::Deserializer<'de> for U32Deserializer +where + E: de::Error, +{ + type Error = E; + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_u32(self.value) + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } +} + +impl<'de, E> IntoDeserializer<'de, E> for U32Deserializer +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, E> de::EnumAccess<'de> for U32Deserializer +where + E: de::Error, +{ + type Error = E; + type Variant = private::UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(private::unit_only) + } +} + +impl Debug for U32Deserializer { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("U32Deserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `&str`. +pub struct StrDeserializer<'a, E> { + value: &'a str, + marker: PhantomData, +} + +impl_copy_clone!(StrDeserializer<'de>); + +impl<'de, 'a, E> IntoDeserializer<'de, E> for &'a str +where + E: de::Error, +{ + type Deserializer = StrDeserializer<'a, E>; + + fn into_deserializer(self) -> StrDeserializer<'a, E> { + StrDeserializer::new(self) + } +} + +impl<'a, E> StrDeserializer<'a, E> { + #[allow(missing_docs)] + pub fn new(value: &'a str) -> Self { + StrDeserializer { + value, + marker: PhantomData, + } + } +} + +impl<'de, 'a, E> de::Deserializer<'de> for StrDeserializer<'a, E> +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_str(self.value) + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } +} + +impl<'de, 'a, E> IntoDeserializer<'de, E> for StrDeserializer<'a, E> +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, 'a, E> de::EnumAccess<'de> for StrDeserializer<'a, E> +where + E: de::Error, +{ + type Error = E; + type Variant = private::UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(private::unit_only) + } +} + +impl<'a, E> Debug for StrDeserializer<'a, E> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("StrDeserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `&str` with a lifetime tied to another +/// deserializer. +pub struct BorrowedStrDeserializer<'de, E> { + value: &'de str, + marker: PhantomData, +} + +impl_copy_clone!(BorrowedStrDeserializer<'de>); + +impl<'de, E> BorrowedStrDeserializer<'de, E> { + /// Create a new borrowed deserializer from the given string. + pub fn new(value: &'de str) -> BorrowedStrDeserializer<'de, E> { + BorrowedStrDeserializer { + value, + marker: PhantomData, + } + } +} + +impl<'de, E> de::Deserializer<'de> for BorrowedStrDeserializer<'de, E> +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_borrowed_str(self.value) + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } +} + +impl<'de, E> IntoDeserializer<'de, E> for BorrowedStrDeserializer<'de, E> +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, E> de::EnumAccess<'de> for BorrowedStrDeserializer<'de, E> +where + E: de::Error, +{ + type Error = E; + type Variant = private::UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(private::unit_only) + } +} + +impl<'de, E> Debug for BorrowedStrDeserializer<'de, E> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("BorrowedStrDeserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `String`. +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +pub struct StringDeserializer { + value: String, + marker: PhantomData, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl Clone for StringDeserializer { + fn clone(&self) -> Self { + StringDeserializer { + value: self.value.clone(), + marker: PhantomData, + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, E> IntoDeserializer<'de, E> for String +where + E: de::Error, +{ + type Deserializer = StringDeserializer; + + fn into_deserializer(self) -> StringDeserializer { + StringDeserializer::new(self) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl StringDeserializer { + #[allow(missing_docs)] + pub fn new(value: String) -> Self { + StringDeserializer { + value, + marker: PhantomData, + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, E> de::Deserializer<'de> for StringDeserializer +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_string(self.value) + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, E> IntoDeserializer<'de, E> for StringDeserializer +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, E> de::EnumAccess<'de> for StringDeserializer +where + E: de::Error, +{ + type Error = E; + type Variant = private::UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(private::unit_only) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl Debug for StringDeserializer { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("StringDeserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `Cow`. +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +pub struct CowStrDeserializer<'a, E> { + value: Cow<'a, str>, + marker: PhantomData, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, E> Clone for CowStrDeserializer<'a, E> { + fn clone(&self) -> Self { + CowStrDeserializer { + value: self.value.clone(), + marker: PhantomData, + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, 'a, E> IntoDeserializer<'de, E> for Cow<'a, str> +where + E: de::Error, +{ + type Deserializer = CowStrDeserializer<'a, E>; + + fn into_deserializer(self) -> CowStrDeserializer<'a, E> { + CowStrDeserializer::new(self) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, E> CowStrDeserializer<'a, E> { + #[allow(missing_docs)] + pub fn new(value: Cow<'a, str>) -> Self { + CowStrDeserializer { + value, + marker: PhantomData, + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, 'a, E> de::Deserializer<'de> for CowStrDeserializer<'a, E> +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self.value { + Cow::Borrowed(string) => visitor.visit_str(string), + Cow::Owned(string) => visitor.visit_string(string), + } + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, 'a, E> IntoDeserializer<'de, E> for CowStrDeserializer<'a, E> +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'de, 'a, E> de::EnumAccess<'de> for CowStrDeserializer<'a, E> +where + E: de::Error, +{ + type Error = E; + type Variant = private::UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(private::unit_only) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, E> Debug for CowStrDeserializer<'a, E> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("CowStrDeserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `&[u8]`. Always calls [`Visitor::visit_bytes`]. +pub struct BytesDeserializer<'a, E> { + value: &'a [u8], + marker: PhantomData, +} + +impl<'a, E> BytesDeserializer<'a, E> { + /// Create a new deserializer from the given bytes. + pub fn new(value: &'a [u8]) -> Self { + BytesDeserializer { + value, + marker: PhantomData, + } + } +} + +impl_copy_clone!(BytesDeserializer<'a>); + +impl<'de, 'a, E> IntoDeserializer<'de, E> for &'a [u8] +where + E: de::Error, +{ + type Deserializer = BytesDeserializer<'a, E>; + + fn into_deserializer(self) -> BytesDeserializer<'a, E> { + BytesDeserializer::new(self) + } +} + +impl<'de, 'a, E> Deserializer<'de> for BytesDeserializer<'a, E> +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_bytes(self.value) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +impl<'de, 'a, E> IntoDeserializer<'de, E> for BytesDeserializer<'a, E> +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'a, E> Debug for BytesDeserializer<'a, E> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("BytesDeserializer") + .field("value", &self.value) + .finish() + } +} + +/// A deserializer holding a `&[u8]` with a lifetime tied to another +/// deserializer. Always calls [`Visitor::visit_borrowed_bytes`]. +pub struct BorrowedBytesDeserializer<'de, E> { + value: &'de [u8], + marker: PhantomData, +} + +impl<'de, E> BorrowedBytesDeserializer<'de, E> { + /// Create a new borrowed deserializer from the given borrowed bytes. + pub fn new(value: &'de [u8]) -> Self { + BorrowedBytesDeserializer { + value, + marker: PhantomData, + } + } +} + +impl_copy_clone!(BorrowedBytesDeserializer<'de>); + +impl<'de, E> Deserializer<'de> for BorrowedBytesDeserializer<'de, E> +where + E: de::Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_borrowed_bytes(self.value) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +impl<'de, E> IntoDeserializer<'de, E> for BorrowedBytesDeserializer<'de, E> +where + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, E> Debug for BorrowedBytesDeserializer<'de, E> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("BorrowedBytesDeserializer") + .field("value", &self.value) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer that iterates over a sequence. +#[derive(Clone)] +pub struct SeqDeserializer { + iter: iter::Fuse, + count: usize, + marker: PhantomData, +} + +impl SeqDeserializer +where + I: Iterator, +{ + /// Construct a new `SeqDeserializer`. + pub fn new(iter: I) -> Self { + SeqDeserializer { + iter: iter.fuse(), + count: 0, + marker: PhantomData, + } + } +} + +impl SeqDeserializer +where + I: Iterator, + E: de::Error, +{ + /// Check for remaining elements after passing a `SeqDeserializer` to + /// `Visitor::visit_seq`. + pub fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInSeq(self.count), + )) + } + } +} + +impl<'de, I, T, E> de::Deserializer<'de> for SeqDeserializer +where + I: Iterator, + T: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let v = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(v) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +impl<'de, I, T, E> IntoDeserializer<'de, E> for SeqDeserializer +where + I: Iterator, + T: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, I, T, E> de::SeqAccess<'de> for SeqDeserializer +where + I: Iterator, + T: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn next_element_seed(&mut self, seed: V) -> Result, Self::Error> + where + V: de::DeserializeSeed<'de>, + { + match self.iter.next() { + Some(value) => { + self.count += 1; + seed.deserialize(value.into_deserializer()).map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } +} + +struct ExpectedInSeq(usize); + +impl Expected for ExpectedInSeq { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.0 == 1 { + formatter.write_str("1 element in sequence") + } else { + write!(formatter, "{} elements in sequence", self.0) + } + } +} + +impl Debug for SeqDeserializer +where + I: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("SeqDeserializer") + .field("iter", &self.iter) + .field("count", &self.count) + .finish() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, T, E> IntoDeserializer<'de, E> for Vec +where + T: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Deserializer = SeqDeserializer<::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + SeqDeserializer::new(self.into_iter()) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, T, E> IntoDeserializer<'de, E> for BTreeSet +where + T: IntoDeserializer<'de, E> + Eq + Ord, + E: de::Error, +{ + type Deserializer = SeqDeserializer<::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + SeqDeserializer::new(self.into_iter()) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl<'de, T, S, E> IntoDeserializer<'de, E> for HashSet +where + T: IntoDeserializer<'de, E> + Eq + Hash, + S: BuildHasher, + E: de::Error, +{ + type Deserializer = SeqDeserializer<::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + SeqDeserializer::new(self.into_iter()) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `SeqAccess`. +#[derive(Clone, Debug)] +pub struct SeqAccessDeserializer { + seq: A, +} + +impl SeqAccessDeserializer { + /// Construct a new `SeqAccessDeserializer`. + pub fn new(seq: A) -> Self { + SeqAccessDeserializer { seq } + } +} + +impl<'de, A> de::Deserializer<'de> for SeqAccessDeserializer +where + A: de::SeqAccess<'de>, +{ + type Error = A::Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_seq(self.seq) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +impl<'de, A> IntoDeserializer<'de, A::Error> for SeqAccessDeserializer +where + A: de::SeqAccess<'de>, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer that iterates over a map. +pub struct MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, +{ + iter: iter::Fuse, + value: Option>, + count: usize, + lifetime: PhantomData<&'de ()>, + error: PhantomData, +} + +impl<'de, I, E> MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, +{ + /// Construct a new `MapDeserializer`. + pub fn new(iter: I) -> Self { + MapDeserializer { + iter: iter.fuse(), + value: None, + count: 0, + lifetime: PhantomData, + error: PhantomData, + } + } +} + +impl<'de, I, E> MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, + E: de::Error, +{ + /// Check for remaining elements after passing a `MapDeserializer` to + /// `Visitor::visit_map`. + pub fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInMap(self.count), + )) + } + } +} + +impl<'de, I, E> MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, +{ + fn next_pair(&mut self) -> Option<(First, Second)> { + match self.iter.next() { + Some(kv) => { + self.count += 1; + Some(private::Pair::split(kv)) + } + None => None, + } + } +} + +impl<'de, I, E> de::Deserializer<'de> for MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, + First: IntoDeserializer<'de, E>, + Second: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let value = tri!(visitor.visit_map(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_seq(mut self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let value = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let _ = len; + self.deserialize_seq(visitor) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } +} + +impl<'de, I, E> IntoDeserializer<'de, E> for MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, + First: IntoDeserializer<'de, E>, + Second: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, I, E> de::MapAccess<'de> for MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, + First: IntoDeserializer<'de, E>, + Second: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn next_key_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + self.value = Some(value); + seed.deserialize(key.into_deserializer()).map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + let value = self.value.take(); + // Panic because this indicates a bug in the program rather than an + // expected failure. + let value = value.expect("MapAccess::next_value called before next_key"); + seed.deserialize(value.into_deserializer()) + } + + fn next_entry_seed( + &mut self, + kseed: TK, + vseed: TV, + ) -> Result, Self::Error> + where + TK: de::DeserializeSeed<'de>, + TV: de::DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + let key = tri!(kseed.deserialize(key.into_deserializer())); + let value = tri!(vseed.deserialize(value.into_deserializer())); + Ok(Some((key, value))) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } +} + +impl<'de, I, E> de::SeqAccess<'de> for MapDeserializer<'de, I, E> +where + I: Iterator, + I::Item: private::Pair, + First: IntoDeserializer<'de, E>, + Second: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.next_pair() { + Some((k, v)) => { + let de = PairDeserializer(k, v, PhantomData); + seed.deserialize(de).map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } +} + +// Cannot #[derive(Clone)] because of the bound `Second: Clone`. +impl<'de, I, E> Clone for MapDeserializer<'de, I, E> +where + I: Iterator + Clone, + I::Item: private::Pair, + Second: Clone, +{ + fn clone(&self) -> Self { + MapDeserializer { + iter: self.iter.clone(), + value: self.value.clone(), + count: self.count, + lifetime: self.lifetime, + error: self.error, + } + } +} + +impl<'de, I, E> Debug for MapDeserializer<'de, I, E> +where + I: Iterator + Debug, + I::Item: private::Pair, + Second: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter + .debug_struct("MapDeserializer") + .field("iter", &self.iter) + .field("value", &self.value) + .field("count", &self.count) + .finish() + } +} + +// Used in the `impl SeqAccess for MapDeserializer` to visit the map as a +// sequence of pairs. +struct PairDeserializer(A, B, PhantomData); + +impl<'de, A, B, E> de::Deserializer<'de> for PairDeserializer +where + A: IntoDeserializer<'de, E>, + B: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let mut pair_visitor = PairVisitor(Some(self.0), Some(self.1), PhantomData); + let pair = tri!(visitor.visit_seq(&mut pair_visitor)); + if pair_visitor.1.is_none() { + Ok(pair) + } else { + let remaining = pair_visitor.size_hint().unwrap(); + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(2 - remaining))) + } + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + if len == 2 { + self.deserialize_seq(visitor) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(len))) + } + } +} + +struct PairVisitor(Option, Option, PhantomData); + +impl<'de, A, B, E> de::SeqAccess<'de> for PairVisitor +where + A: IntoDeserializer<'de, E>, + B: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + if let Some(k) = self.0.take() { + seed.deserialize(k.into_deserializer()).map(Some) + } else if let Some(v) = self.1.take() { + seed.deserialize(v.into_deserializer()).map(Some) + } else { + Ok(None) + } + } + + fn size_hint(&self) -> Option { + if self.0.is_some() { + Some(2) + } else if self.1.is_some() { + Some(1) + } else { + Some(0) + } + } +} + +struct ExpectedInMap(usize); + +impl Expected for ExpectedInMap { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.0 == 1 { + formatter.write_str("1 element in map") + } else { + write!(formatter, "{} elements in map", self.0) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl<'de, K, V, E> IntoDeserializer<'de, E> for BTreeMap +where + K: IntoDeserializer<'de, E> + Eq + Ord, + V: IntoDeserializer<'de, E>, + E: de::Error, +{ + type Deserializer = MapDeserializer<'de, ::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + MapDeserializer::new(self.into_iter()) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl<'de, K, V, S, E> IntoDeserializer<'de, E> for HashMap +where + K: IntoDeserializer<'de, E> + Eq + Hash, + V: IntoDeserializer<'de, E>, + S: BuildHasher, + E: de::Error, +{ + type Deserializer = MapDeserializer<'de, ::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + MapDeserializer::new(self.into_iter()) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding a `MapAccess`. +#[derive(Clone, Debug)] +pub struct MapAccessDeserializer { + map: A, +} + +impl MapAccessDeserializer { + /// Construct a new `MapAccessDeserializer`. + pub fn new(map: A) -> Self { + MapAccessDeserializer { map } + } +} + +impl<'de, A> de::Deserializer<'de> for MapAccessDeserializer +where + A: de::MapAccess<'de>, +{ + type Error = A::Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_map(self.map) + } + + fn deserialize_enum( + self, + _name: &str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_enum(self) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct identifier ignored_any + } +} + +impl<'de, A> IntoDeserializer<'de, A::Error> for MapAccessDeserializer +where + A: de::MapAccess<'de>, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +impl<'de, A> de::EnumAccess<'de> for MapAccessDeserializer +where + A: de::MapAccess<'de>, +{ + type Error = A::Error; + type Variant = private::MapAsEnum; + + fn variant_seed(mut self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match tri!(self.map.next_key_seed(seed)) { + Some(key) => Ok((key, private::map_as_enum(self.map))), + None => Err(de::Error::invalid_type(de::Unexpected::Map, &"enum")), + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A deserializer holding an `EnumAccess`. +#[derive(Clone, Debug)] +pub struct EnumAccessDeserializer { + access: A, +} + +impl EnumAccessDeserializer { + /// Construct a new `EnumAccessDeserializer`. + pub fn new(access: A) -> Self { + EnumAccessDeserializer { access } + } +} + +impl<'de, A> de::Deserializer<'de> for EnumAccessDeserializer +where + A: de::EnumAccess<'de>, +{ + type Error = A::Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_enum(self.access) + } + + forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +impl<'de, A> IntoDeserializer<'de, A::Error> for EnumAccessDeserializer +where + A: de::EnumAccess<'de>, +{ + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } +} + +//////////////////////////////////////////////////////////////////////////////// + +mod private { + use crate::lib::*; + + use crate::de::{ + self, DeserializeSeed, Deserializer, MapAccess, Unexpected, VariantAccess, Visitor, + }; + + pub struct UnitOnly { + marker: PhantomData, + } + + pub fn unit_only(t: T) -> (T, UnitOnly) { + ( + t, + UnitOnly { + marker: PhantomData, + }, + ) + } + + impl<'de, E> de::VariantAccess<'de> for UnitOnly + where + E: de::Error, + { + type Error = E; + + fn unit_variant(self) -> Result<(), Self::Error> { + Ok(()) + } + + fn newtype_variant_seed(self, _seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + Err(de::Error::invalid_type( + Unexpected::UnitVariant, + &"newtype variant", + )) + } + + fn tuple_variant(self, _len: usize, _visitor: V) -> Result + where + V: de::Visitor<'de>, + { + Err(de::Error::invalid_type( + Unexpected::UnitVariant, + &"tuple variant", + )) + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + Err(de::Error::invalid_type( + Unexpected::UnitVariant, + &"struct variant", + )) + } + } + + pub struct MapAsEnum { + map: A, + } + + pub fn map_as_enum(map: A) -> MapAsEnum { + MapAsEnum { map } + } + + impl<'de, A> VariantAccess<'de> for MapAsEnum + where + A: MapAccess<'de>, + { + type Error = A::Error; + + fn unit_variant(mut self) -> Result<(), Self::Error> { + self.map.next_value() + } + + fn newtype_variant_seed(mut self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + self.map.next_value_seed(seed) + } + + fn tuple_variant(mut self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.map.next_value_seed(SeedTupleVariant { len, visitor }) + } + + fn struct_variant( + mut self, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.map.next_value_seed(SeedStructVariant { visitor }) + } + } + + struct SeedTupleVariant { + len: usize, + visitor: V, + } + + impl<'de, V> DeserializeSeed<'de> for SeedTupleVariant + where + V: Visitor<'de>, + { + type Value = V::Value; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_tuple(self.len, self.visitor) + } + } + + struct SeedStructVariant { + visitor: V, + } + + impl<'de, V> DeserializeSeed<'de> for SeedStructVariant + where + V: Visitor<'de>, + { + type Value = V::Value; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_map(self.visitor) + } + } + + /// Avoid having to restate the generic types on `MapDeserializer`. The + /// `Iterator::Item` contains enough information to figure out K and V. + pub trait Pair { + type First; + type Second; + fn split(self) -> (Self::First, Self::Second); + } + + impl Pair for (A, B) { + type First = A; + type Second = B; + fn split(self) -> (A, B) { + self + } + } + + pub type First = ::First; + pub type Second = ::Second; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/format.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/format.rs new file mode 100644 index 0000000000000000000000000000000000000000..9053cc0704fc94bf09e68dbbd625f5e4bb7ffd30 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/format.rs @@ -0,0 +1,30 @@ +use crate::lib::fmt::{self, Write}; +use crate::lib::str; + +pub(super) struct Buf<'a> { + bytes: &'a mut [u8], + offset: usize, +} + +impl<'a> Buf<'a> { + pub fn new(bytes: &'a mut [u8]) -> Self { + Buf { bytes, offset: 0 } + } + + pub fn as_str(&self) -> &str { + let slice = &self.bytes[..self.offset]; + unsafe { str::from_utf8_unchecked(slice) } + } +} + +impl<'a> Write for Buf<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + if self.offset + s.len() > self.bytes.len() { + Err(fmt::Error) + } else { + self.bytes[self.offset..self.offset + s.len()].copy_from_slice(s.as_bytes()); + self.offset += s.len(); + Ok(()) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..58d143d79f724399ea14fe4e795f9f2c1256e757 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/lib.rs @@ -0,0 +1,121 @@ +//! Serde is a framework for ***ser***ializing and ***de***serializing Rust data +//! structures efficiently and generically. +//! +//! The `serde_core` crate contains Serde's trait definitions with **no support +//! for #\[derive()\]**. +//! +//! In crates that derive an implementation of `Serialize` or `Deserialize`, you +//! must depend on the [`serde`] crate, not `serde_core`. +//! +//! [`serde`]: https://crates.io/crates/serde +//! +//! In crates that handwrite implementations of Serde traits, or only use them +//! as trait bounds, depending on `serde_core` is permitted. But `serde` +//! re-exports all of these traits and can be used for this use case too. If in +//! doubt, disregard `serde_core` and always use `serde`. +//! +//! Crates that depend on `serde_core` instead of `serde` are able to compile in +//! parallel with `serde_derive` even when `serde`'s "derive" feature is turned on, +//! as shown in the following build timings. +//! +//!
+//! +//! +//! +//! +//!
When serde_json depends on serde
+//! +//!
+//! +//! +//! +//! +//!
When serde_json depends on serde_core
+ +//////////////////////////////////////////////////////////////////////////////// + +// Serde types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/serde_core/1.0.228")] +// Support using Serde without the standard library! +#![cfg_attr(not(feature = "std"), no_std)] +// Show which crate feature enables conditionally compiled APIs in documentation. +#![cfg_attr(docsrs, feature(doc_cfg, rustdoc_internals))] +#![cfg_attr(docsrs, allow(internal_features))] +// Unstable functionality only if the user asks for it. For tracking and +// discussion of these features please refer to this issue: +// +// https://github.com/serde-rs/serde/issues/812 +#![cfg_attr(feature = "unstable", feature(never_type))] +#![allow(unknown_lints, bare_trait_objects, deprecated)] +// Ignored clippy and clippy_pedantic lints +#![allow( + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 + clippy::unnested_or_patterns, + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/7768 + clippy::semicolon_if_nothing_returned, + // not available in our oldest supported compiler + clippy::empty_enum, + clippy::type_repetition_in_bounds, // https://github.com/rust-lang/rust-clippy/issues/8772 + // integer and float ser/de requires these sorts of casts + clippy::cast_possible_truncation, + clippy::cast_possible_wrap, + clippy::cast_precision_loss, + clippy::cast_sign_loss, + // things are often more readable this way + clippy::cast_lossless, + clippy::module_name_repetitions, + clippy::single_match_else, + clippy::type_complexity, + clippy::use_self, + clippy::zero_prefixed_literal, + // correctly used + clippy::derive_partial_eq_without_eq, + clippy::enum_glob_use, + clippy::explicit_auto_deref, + clippy::incompatible_msrv, + clippy::let_underscore_untyped, + clippy::map_err_ignore, + clippy::new_without_default, + clippy::result_unit_err, + clippy::wildcard_imports, + // not practical + clippy::needless_pass_by_value, + clippy::similar_names, + clippy::too_many_lines, + // preference + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::unseparated_literal_suffix, + // false positive + clippy::needless_doctest_main, + // noisy + clippy::missing_errors_doc, + clippy::must_use_candidate, +)] +// Restrictions +#![deny(clippy::question_mark_used)] +// Rustc lints. +#![deny(missing_docs, unused_imports)] + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "alloc")] +extern crate alloc; + +#[macro_use] +mod crate_root; +#[macro_use] +mod macros; + +crate_root!(); + +#[macro_export] +#[doc(hidden)] +macro_rules! __require_serde_not_serde_core { + () => { + ::core::compile_error!( + "Serde derive requires a dependency on the serde crate, not serde_core" + ); + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/macros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..b956730500554bfc0c487937433baeecd937e6ce --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/macros.rs @@ -0,0 +1,230 @@ +// Super explicit first paragraph because this shows up at the top level and +// trips up people who are just looking for basic Serialize / Deserialize +// documentation. +/// Helper macro when implementing the `Deserializer` part of a new data format +/// for Serde. +/// +/// Some [`Deserializer`] implementations for self-describing formats do not +/// care what hint the [`Visitor`] gives them, they just want to blindly call +/// the [`Visitor`] method corresponding to the data they can tell is in the +/// input. This requires repetitive implementations of all the [`Deserializer`] +/// trait methods. +/// +/// ```edition2021 +/// # use serde::forward_to_deserialize_any; +/// # use serde::de::{value, Deserializer, Visitor}; +/// # +/// # struct MyDeserializer; +/// # +/// # impl<'de> Deserializer<'de> for MyDeserializer { +/// # type Error = value::Error; +/// # +/// # fn deserialize_any(self, _: V) -> Result +/// # where +/// # V: Visitor<'de>, +/// # { +/// # unimplemented!() +/// # } +/// # +/// #[inline] +/// fn deserialize_bool(self, visitor: V) -> Result +/// where +/// V: Visitor<'de>, +/// { +/// self.deserialize_any(visitor) +/// } +/// # +/// # forward_to_deserialize_any! { +/// # i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string +/// # bytes byte_buf option unit unit_struct newtype_struct seq tuple +/// # tuple_struct map struct enum identifier ignored_any +/// # } +/// # } +/// ``` +/// +/// The `forward_to_deserialize_any!` macro implements these simple forwarding +/// methods so that they forward directly to [`Deserializer::deserialize_any`]. +/// You can choose which methods to forward. +/// +/// ```edition2021 +/// # use serde::forward_to_deserialize_any; +/// # use serde::de::{value, Deserializer, Visitor}; +/// # +/// # struct MyDeserializer; +/// # +/// impl<'de> Deserializer<'de> for MyDeserializer { +/// # type Error = value::Error; +/// # +/// fn deserialize_any(self, visitor: V) -> Result +/// where +/// V: Visitor<'de>, +/// { +/// /* ... */ +/// # let _ = visitor; +/// # unimplemented!() +/// } +/// +/// forward_to_deserialize_any! { +/// bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string +/// bytes byte_buf option unit unit_struct newtype_struct seq tuple +/// tuple_struct map struct enum identifier ignored_any +/// } +/// } +/// ``` +/// +/// The macro assumes the convention that your `Deserializer` lifetime parameter +/// is called `'de` and that the `Visitor` type parameters on each method are +/// called `V`. A different type parameter and a different lifetime can be +/// specified explicitly if necessary. +/// +/// ```edition2021 +/// # use serde::forward_to_deserialize_any; +/// # use serde::de::{value, Deserializer, Visitor}; +/// # use std::marker::PhantomData; +/// # +/// # struct MyDeserializer(PhantomData); +/// # +/// # impl<'q, V> Deserializer<'q> for MyDeserializer { +/// # type Error = value::Error; +/// # +/// # fn deserialize_any(self, visitor: W) -> Result +/// # where +/// # W: Visitor<'q>, +/// # { +/// # unimplemented!() +/// # } +/// # +/// forward_to_deserialize_any! { +/// > +/// bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string +/// bytes byte_buf option unit unit_struct newtype_struct seq tuple +/// tuple_struct map struct enum identifier ignored_any +/// } +/// # } +/// ``` +/// +/// [`Deserializer`]: crate::Deserializer +/// [`Visitor`]: crate::de::Visitor +/// [`Deserializer::deserialize_any`]: crate::Deserializer::deserialize_any +#[macro_export(local_inner_macros)] +macro_rules! forward_to_deserialize_any { + (<$visitor:ident: Visitor<$lifetime:tt>> $($func:ident)*) => { + $(forward_to_deserialize_any_helper!{$func<$lifetime, $visitor>})* + }; + // This case must be after the previous one. + ($($func:ident)*) => { + $(forward_to_deserialize_any_helper!{$func<'de, V>})* + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! forward_to_deserialize_any_method { + ($func:ident<$l:tt, $v:ident>($($arg:ident : $ty:ty),*)) => { + #[inline] + fn $func<$v>(self, $($arg: $ty,)* visitor: $v) -> $crate::__private::Result<$v::Value, >::Error> + where + $v: $crate::de::Visitor<$l>, + { + $( + let _ = $arg; + )* + self.deserialize_any(visitor) + } + }; +} + +#[doc(hidden)] +#[macro_export(local_inner_macros)] +macro_rules! forward_to_deserialize_any_helper { + (bool<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_bool<$l, $v>()} + }; + (i8<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_i8<$l, $v>()} + }; + (i16<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_i16<$l, $v>()} + }; + (i32<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_i32<$l, $v>()} + }; + (i64<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_i64<$l, $v>()} + }; + (i128<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_i128<$l, $v>()} + }; + (u8<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_u8<$l, $v>()} + }; + (u16<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_u16<$l, $v>()} + }; + (u32<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_u32<$l, $v>()} + }; + (u64<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_u64<$l, $v>()} + }; + (u128<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_u128<$l, $v>()} + }; + (f32<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_f32<$l, $v>()} + }; + (f64<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_f64<$l, $v>()} + }; + (char<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_char<$l, $v>()} + }; + (str<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_str<$l, $v>()} + }; + (string<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_string<$l, $v>()} + }; + (bytes<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_bytes<$l, $v>()} + }; + (byte_buf<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_byte_buf<$l, $v>()} + }; + (option<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_option<$l, $v>()} + }; + (unit<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_unit<$l, $v>()} + }; + (unit_struct<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_unit_struct<$l, $v>(name: &'static str)} + }; + (newtype_struct<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_newtype_struct<$l, $v>(name: &'static str)} + }; + (seq<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_seq<$l, $v>()} + }; + (tuple<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_tuple<$l, $v>(len: usize)} + }; + (tuple_struct<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_tuple_struct<$l, $v>(name: &'static str, len: usize)} + }; + (map<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_map<$l, $v>()} + }; + (struct<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_struct<$l, $v>(name: &'static str, fields: &'static [&'static str])} + }; + (enum<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_enum<$l, $v>(name: &'static str, variants: &'static [&'static str])} + }; + (identifier<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_identifier<$l, $v>()} + }; + (ignored_any<$l:tt, $v:ident>) => { + forward_to_deserialize_any_method!{deserialize_ignored_any<$l, $v>()} + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/content.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/content.rs new file mode 100644 index 0000000000000000000000000000000000000000..f29a9b527d65a7e7db298327eb5031f5aec5eb5b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/content.rs @@ -0,0 +1,39 @@ +use crate::lib::*; + +// Used from generated code to buffer the contents of the Deserializer when +// deserializing untagged enums and internally tagged enums. +// +// Not public API. Use serde-value instead. +// +// Obsoleted by format-specific buffer types (https://github.com/serde-rs/serde/pull/2912). +#[doc(hidden)] +pub enum Content<'de> { + Bool(bool), + + U8(u8), + U16(u16), + U32(u32), + U64(u64), + + I8(i8), + I16(i16), + I32(i32), + I64(i64), + + F32(f32), + F64(f64), + + Char(char), + String(String), + Str(&'de str), + ByteBuf(Vec), + Bytes(&'de [u8]), + + None, + Some(Box>), + + Unit, + Newtype(Box>), + Seq(Vec>), + Map(Vec<(Content<'de>, Content<'de>)>), +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/doc.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/doc.rs new file mode 100644 index 0000000000000000000000000000000000000000..2cc07f0d931ec977ad0812c828187eb73505cfe2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/doc.rs @@ -0,0 +1,165 @@ +// Used only by Serde doc tests. Not public API. + +use crate::lib::*; + +use crate::ser; + +#[doc(hidden)] +#[derive(Debug)] +pub struct Error; + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl ser::Error for Error { + fn custom(_: T) -> Self + where + T: Display, + { + unimplemented!() + } +} + +#[cfg(feature = "std")] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl error::Error for Error { + fn description(&self) -> &str { + unimplemented!() + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl Display for Error { + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + unimplemented!() + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __private_serialize { + () => { + trait Serialize { + fn serialize(&self, serializer: S) -> Result + where + S: $crate::Serializer; + } + }; +} + +#[doc(hidden)] +#[macro_export(local_inner_macros)] +macro_rules! __serialize_unimplemented { + ($($func:ident)*) => { + $( + __serialize_unimplemented_helper!($func); + )* + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __serialize_unimplemented_method { + ($func:ident $(<$t:ident>)* ($($arg:ty),*) -> $ret:ident) => { + fn $func $(<$t>)* (self $(, _: $arg)*) -> $crate::__private::Result + where + $($t: ?Sized + $crate::Serialize,)* + { + unimplemented!() + } + }; +} + +#[doc(hidden)] +#[macro_export(local_inner_macros)] +macro_rules! __serialize_unimplemented_helper { + (bool) => { + __serialize_unimplemented_method!(serialize_bool(bool) -> Ok); + }; + (i8) => { + __serialize_unimplemented_method!(serialize_i8(i8) -> Ok); + }; + (i16) => { + __serialize_unimplemented_method!(serialize_i16(i16) -> Ok); + }; + (i32) => { + __serialize_unimplemented_method!(serialize_i32(i32) -> Ok); + }; + (i64) => { + __serialize_unimplemented_method!(serialize_i64(i64) -> Ok); + }; + (u8) => { + __serialize_unimplemented_method!(serialize_u8(u8) -> Ok); + }; + (u16) => { + __serialize_unimplemented_method!(serialize_u16(u16) -> Ok); + }; + (u32) => { + __serialize_unimplemented_method!(serialize_u32(u32) -> Ok); + }; + (u64) => { + __serialize_unimplemented_method!(serialize_u64(u64) -> Ok); + }; + (f32) => { + __serialize_unimplemented_method!(serialize_f32(f32) -> Ok); + }; + (f64) => { + __serialize_unimplemented_method!(serialize_f64(f64) -> Ok); + }; + (char) => { + __serialize_unimplemented_method!(serialize_char(char) -> Ok); + }; + (str) => { + __serialize_unimplemented_method!(serialize_str(&str) -> Ok); + }; + (bytes) => { + __serialize_unimplemented_method!(serialize_bytes(&[u8]) -> Ok); + }; + (none) => { + __serialize_unimplemented_method!(serialize_none() -> Ok); + }; + (some) => { + __serialize_unimplemented_method!(serialize_some(&T) -> Ok); + }; + (unit) => { + __serialize_unimplemented_method!(serialize_unit() -> Ok); + }; + (unit_struct) => { + __serialize_unimplemented_method!(serialize_unit_struct(&str) -> Ok); + }; + (unit_variant) => { + __serialize_unimplemented_method!(serialize_unit_variant(&str, u32, &str) -> Ok); + }; + (newtype_struct) => { + __serialize_unimplemented_method!(serialize_newtype_struct(&str, &T) -> Ok); + }; + (newtype_variant) => { + __serialize_unimplemented_method!(serialize_newtype_variant(&str, u32, &str, &T) -> Ok); + }; + (seq) => { + type SerializeSeq = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_seq(Option) -> SerializeSeq); + }; + (tuple) => { + type SerializeTuple = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_tuple(usize) -> SerializeTuple); + }; + (tuple_struct) => { + type SerializeTupleStruct = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_tuple_struct(&str, usize) -> SerializeTupleStruct); + }; + (tuple_variant) => { + type SerializeTupleVariant = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_tuple_variant(&str, u32, &str, usize) -> SerializeTupleVariant); + }; + (map) => { + type SerializeMap = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_map(Option) -> SerializeMap); + }; + (struct) => { + type SerializeStruct = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_struct(&str, usize) -> SerializeStruct); + }; + (struct_variant) => { + type SerializeStructVariant = $crate::ser::Impossible; + __serialize_unimplemented_method!(serialize_struct_variant(&str, u32, &str, usize) -> SerializeStructVariant); + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd5f93fe963b267b2eada3b6b6d94fec119c34dd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/mod.rs @@ -0,0 +1,21 @@ +#[cfg(all(not(no_serde_derive), any(feature = "std", feature = "alloc")))] +mod content; +mod seed; + +// FIXME: #[cfg(doctest)] once https://github.com/rust-lang/rust/issues/67295 is fixed. +#[doc(hidden)] +pub mod doc; + +#[doc(hidden)] +pub mod size_hint; + +#[doc(hidden)] +pub mod string; + +#[cfg(all(not(no_serde_derive), any(feature = "std", feature = "alloc")))] +#[doc(hidden)] +pub use self::content::Content; +#[doc(hidden)] +pub use self::seed::InPlaceSeed; +#[doc(hidden)] +pub use crate::lib::result::Result; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/seed.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/seed.rs new file mode 100644 index 0000000000000000000000000000000000000000..bcf267cb09b59bba9133630c3bb3be26709b0e5c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/seed.rs @@ -0,0 +1,20 @@ +use crate::de::{Deserialize, DeserializeSeed, Deserializer}; + +/// A DeserializeSeed helper for implementing deserialize_in_place Visitors. +/// +/// Wraps a mutable reference and calls deserialize_in_place on it. +pub struct InPlaceSeed<'a, T: 'a>(pub &'a mut T); + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, 'de, T> DeserializeSeed<'de> for InPlaceSeed<'a, T> +where + T: Deserialize<'de>, +{ + type Value = (); + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + T::deserialize_in_place(deserializer, self.0) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/size_hint.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/size_hint.rs new file mode 100644 index 0000000000000000000000000000000000000000..783281b46c4e0a84b4cb562971b42ab921508d51 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/size_hint.rs @@ -0,0 +1,30 @@ +#[cfg(any(feature = "std", feature = "alloc"))] +use crate::lib::*; + +pub fn from_bounds(iter: &I) -> Option +where + I: Iterator, +{ + helper(iter.size_hint()) +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub fn cautious(hint: Option) -> usize { + const MAX_PREALLOC_BYTES: usize = 1024 * 1024; + + if mem::size_of::() == 0 { + 0 + } else { + cmp::min( + hint.unwrap_or(0), + MAX_PREALLOC_BYTES / mem::size_of::(), + ) + } +} + +fn helper(bounds: (usize, Option)) -> Option { + match bounds { + (lower, Some(upper)) if lower == upper => Some(upper), + _ => None, + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/string.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/string.rs new file mode 100644 index 0000000000000000000000000000000000000000..c8121a0da718908f5ce1011926a7c363ad77c7cc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/private/string.rs @@ -0,0 +1,23 @@ +use crate::lib::*; + +#[cfg(any(feature = "std", feature = "alloc"))] +#[doc(hidden)] +pub fn from_utf8_lossy(bytes: &[u8]) -> Cow<'_, str> { + String::from_utf8_lossy(bytes) +} + +// The generated code calls this like: +// +// let value = &_serde::__private::from_utf8_lossy(bytes); +// Err(_serde::de::Error::unknown_variant(value, VARIANTS)) +// +// so it is okay for the return type to be different from the std case as long +// as the above works. +#[cfg(not(any(feature = "std", feature = "alloc")))] +#[doc(hidden)] +pub fn from_utf8_lossy(bytes: &[u8]) -> &str { + // Three unicode replacement characters if it fails. They look like a + // white-on-black question mark. The user will recognize it as invalid + // UTF-8. + str::from_utf8(bytes).unwrap_or("\u{fffd}\u{fffd}\u{fffd}") +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/fmt.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/fmt.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b1549f0897cffe4e15347f6c8a359ce97ff68bb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/fmt.rs @@ -0,0 +1,170 @@ +use crate::lib::*; +use crate::ser::{Error, Impossible, Serialize, Serializer}; + +impl Error for fmt::Error { + fn custom(_msg: T) -> Self { + fmt::Error + } +} + +macro_rules! fmt_primitives { + ($($f:ident: $t:ty,)*) => { + $( + fn $f(self, v: $t) -> fmt::Result { + Display::fmt(&v, self) + } + )* + }; +} + +/// ```edition2021 +/// use serde::ser::Serialize; +/// use serde_derive::Serialize; +/// use std::fmt::{self, Display}; +/// +/// #[derive(Serialize)] +/// #[serde(rename_all = "kebab-case")] +/// pub enum MessageType { +/// StartRequest, +/// EndRequest, +/// } +/// +/// impl Display for MessageType { +/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +/// self.serialize(f) +/// } +/// } +/// ``` +impl<'a> Serializer for &mut fmt::Formatter<'a> { + type Ok = (); + type Error = fmt::Error; + type SerializeSeq = Impossible<(), fmt::Error>; + type SerializeTuple = Impossible<(), fmt::Error>; + type SerializeTupleStruct = Impossible<(), fmt::Error>; + type SerializeTupleVariant = Impossible<(), fmt::Error>; + type SerializeMap = Impossible<(), fmt::Error>; + type SerializeStruct = Impossible<(), fmt::Error>; + type SerializeStructVariant = Impossible<(), fmt::Error>; + + fmt_primitives! { + serialize_bool: bool, + serialize_i8: i8, + serialize_i16: i16, + serialize_i32: i32, + serialize_i64: i64, + serialize_i128: i128, + serialize_u8: u8, + serialize_u16: u16, + serialize_u32: u32, + serialize_u64: u64, + serialize_u128: u128, + serialize_f32: f32, + serialize_f64: f64, + serialize_char: char, + serialize_str: &str, + serialize_unit_struct: &'static str, + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + ) -> fmt::Result { + Display::fmt(variant, self) + } + + fn serialize_newtype_struct(self, _name: &'static str, value: &T) -> fmt::Result + where + T: ?Sized + Serialize, + { + Serialize::serialize(value, self) + } + + fn serialize_bytes(self, _v: &[u8]) -> fmt::Result { + Err(fmt::Error) + } + + fn serialize_none(self) -> fmt::Result { + Err(fmt::Error) + } + + fn serialize_some(self, _value: &T) -> fmt::Result + where + T: ?Sized + Serialize, + { + Err(fmt::Error) + } + + fn serialize_unit(self) -> fmt::Result { + Err(fmt::Error) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _value: &T, + ) -> fmt::Result + where + T: ?Sized + Serialize, + { + Err(fmt::Error) + } + + fn serialize_seq(self, _len: Option) -> Result { + Err(fmt::Error) + } + + fn serialize_tuple(self, _len: usize) -> Result { + Err(fmt::Error) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(fmt::Error) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(fmt::Error) + } + + fn serialize_map(self, _len: Option) -> Result { + Err(fmt::Error) + } + + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Err(fmt::Error) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Err(fmt::Error) + } + + fn collect_str(self, value: &T) -> fmt::Result + where + T: ?Sized + Display, + { + Display::fmt(value, self) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..a7a175db9465aa6ab4c6931e18448971a4b0b30a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impls.rs @@ -0,0 +1,1045 @@ +use crate::lib::*; + +use crate::ser::{Error, Serialize, SerializeTuple, Serializer}; + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! primitive_impl { + ($ty:ident, $method:ident $($cast:tt)*) => { + impl Serialize for $ty { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.$method(*self $($cast)*) + } + } + } +} + +primitive_impl!(bool, serialize_bool); +primitive_impl!(isize, serialize_i64 as i64); +primitive_impl!(i8, serialize_i8); +primitive_impl!(i16, serialize_i16); +primitive_impl!(i32, serialize_i32); +primitive_impl!(i64, serialize_i64); +primitive_impl!(i128, serialize_i128); +primitive_impl!(usize, serialize_u64 as u64); +primitive_impl!(u8, serialize_u8); +primitive_impl!(u16, serialize_u16); +primitive_impl!(u32, serialize_u32); +primitive_impl!(u64, serialize_u64); +primitive_impl!(u128, serialize_u128); +primitive_impl!(f32, serialize_f32); +primitive_impl!(f64, serialize_f64); +primitive_impl!(char, serialize_char); + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for str { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl Serialize for String { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self) + } +} + +impl<'a> Serialize for fmt::Arguments<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", not(no_core_cstr)))] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for CStr { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(self.to_bytes()) + } +} + +#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] +impl Serialize for CString { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(self.to_bytes()) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for Option +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Some(ref value) => serializer.serialize_some(value), + None => serializer.serialize_none(), + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for PhantomData +where + T: ?Sized, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_unit_struct("PhantomData") + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Does not require T: Serialize. +impl Serialize for [T; 0] { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + tri!(serializer.serialize_tuple(0)).end() + } +} + +macro_rules! array_impls { + ($($len:tt)+) => { + $( + impl Serialize for [T; $len] + where + T: Serialize, + { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq = tri!(serializer.serialize_tuple($len)); + for e in self { + tri!(seq.serialize_element(e)); + } + seq.end() + } + } + )+ + } +} + +array_impls! { + 01 02 03 04 05 06 07 08 09 10 + 11 12 13 14 15 16 17 18 19 20 + 21 22 23 24 25 26 27 28 29 30 + 31 32 +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for [T] +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self) + } +} + +macro_rules! seq_impl { + ( + $(#[$attr:meta])* + $ty:ident + ) => { + $(#[$attr])* + impl Serialize for $ty + where + T: Serialize, + { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self) + } + } + } +} + +seq_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BinaryHeap +} + +seq_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BTreeSet +} + +seq_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + HashSet +} + +seq_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + LinkedList +} + +seq_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + Vec +} + +seq_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + VecDeque +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for Range +where + Idx: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let mut state = tri!(serializer.serialize_struct("Range", 2)); + tri!(state.serialize_field("start", &self.start)); + tri!(state.serialize_field("end", &self.end)); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for RangeFrom +where + Idx: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let mut state = tri!(serializer.serialize_struct("RangeFrom", 1)); + tri!(state.serialize_field("start", &self.start)); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for RangeInclusive +where + Idx: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let mut state = tri!(serializer.serialize_struct("RangeInclusive", 2)); + tri!(state.serialize_field("start", &self.start())); + tri!(state.serialize_field("end", &self.end())); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for RangeTo +where + Idx: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let mut state = tri!(serializer.serialize_struct("RangeTo", 1)); + tri!(state.serialize_field("end", &self.end)); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for Bound +where + T: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Bound::Unbounded => serializer.serialize_unit_variant("Bound", 0, "Unbounded"), + Bound::Included(ref value) => { + serializer.serialize_newtype_variant("Bound", 1, "Included", value) + } + Bound::Excluded(ref value) => { + serializer.serialize_newtype_variant("Bound", 2, "Excluded", value) + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for () { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_unit() + } +} + +#[cfg(feature = "unstable")] +#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))] +impl Serialize for ! { + fn serialize(&self, _serializer: S) -> Result + where + S: Serializer, + { + *self + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! tuple_impls { + ($($len:expr => ($($n:tt $name:ident)+))+) => { + $( + #[cfg_attr(docsrs, doc(hidden))] + impl<$($name),+> Serialize for ($($name,)+) + where + $($name: Serialize,)+ + { + tuple_impl_body!($len => ($($n)+)); + } + )+ + }; +} + +macro_rules! tuple_impl_body { + ($len:expr => ($($n:tt)+)) => { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut tuple = tri!(serializer.serialize_tuple($len)); + $( + tri!(tuple.serialize_element(&self.$n)); + )+ + tuple.end() + } + }; +} + +#[cfg_attr(docsrs, doc(fake_variadic))] +#[cfg_attr( + docsrs, + doc = "This trait is implemented for tuples up to 16 items long." +)] +impl Serialize for (T,) +where + T: Serialize, +{ + tuple_impl_body!(1 => (0)); +} + +tuple_impls! { + 2 => (0 T0 1 T1) + 3 => (0 T0 1 T1 2 T2) + 4 => (0 T0 1 T1 2 T2 3 T3) + 5 => (0 T0 1 T1 2 T2 3 T3 4 T4) + 6 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5) + 7 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6) + 8 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7) + 9 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8) + 10 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9) + 11 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10) + 12 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11) + 13 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12) + 14 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13) + 15 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13 14 T14) + 16 => (0 T0 1 T1 2 T2 3 T3 4 T4 5 T5 6 T6 7 T7 8 T8 9 T9 10 T10 11 T11 12 T12 13 T13 14 T14 15 T15) +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! map_impl { + ( + $(#[$attr:meta])* + $ty:ident + ) => { + $(#[$attr])* + impl Serialize for $ty + where + K: Serialize, + V: Serialize, + { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_map(self) + } + } + } +} + +map_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + BTreeMap +} + +map_impl! { + #[cfg(feature = "std")] + #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + HashMap +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! deref_impl { + ( + $(#[$attr:meta])* + <$($desc:tt)+ + ) => { + $(#[$attr])* + impl <$($desc)+ { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + (**self).serialize(serializer) + } + } + }; +} + +deref_impl! { + <'a, T> Serialize for &'a T where T: ?Sized + Serialize +} + +deref_impl! { + <'a, T> Serialize for &'a mut T where T: ?Sized + Serialize +} + +deref_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + Serialize for Box where T: ?Sized + Serialize +} + +deref_impl! { + /// This impl requires the [`"rc"`] Cargo feature of Serde. + /// + /// Serializing a data structure containing `Rc` will serialize a copy of + /// the contents of the `Rc` each time the `Rc` is referenced within the + /// data structure. Serialization will not attempt to deduplicate these + /// repeated data. + /// + /// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc + #[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))] + Serialize for Rc where T: ?Sized + Serialize +} + +deref_impl! { + /// This impl requires the [`"rc"`] Cargo feature of Serde. + /// + /// Serializing a data structure containing `Arc` will serialize a copy of + /// the contents of the `Arc` each time the `Arc` is referenced within the + /// data structure. Serialization will not attempt to deduplicate these + /// repeated data. + /// + /// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc + #[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))] + Serialize for Arc where T: ?Sized + Serialize +} + +deref_impl! { + #[cfg(any(feature = "std", feature = "alloc"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))] + <'a, T> Serialize for Cow<'a, T> where T: ?Sized + Serialize + ToOwned +} + +//////////////////////////////////////////////////////////////////////////////// + +/// This impl requires the [`"rc"`] Cargo feature of Serde. +/// +/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc +#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))) +)] +impl Serialize for RcWeak +where + T: ?Sized + Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.upgrade().serialize(serializer) + } +} + +/// This impl requires the [`"rc"`] Cargo feature of Serde. +/// +/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc +#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))) +)] +impl Serialize for ArcWeak +where + T: ?Sized + Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.upgrade().serialize(serializer) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! nonzero_integers { + ($($T:ident,)+) => { + $( + impl Serialize for num::$T { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.get().serialize(serializer) + } + } + )+ + } +} + +nonzero_integers! { + NonZeroI8, + NonZeroI16, + NonZeroI32, + NonZeroI64, + NonZeroI128, + NonZeroIsize, + NonZeroU8, + NonZeroU16, + NonZeroU32, + NonZeroU64, + NonZeroU128, + NonZeroUsize, +} + +impl Serialize for Cell +where + T: Serialize + Copy, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.get().serialize(serializer) + } +} + +impl Serialize for RefCell +where + T: ?Sized + Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self.try_borrow() { + Ok(value) => value.serialize(serializer), + Err(_) => Err(S::Error::custom("already mutably borrowed")), + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for Mutex +where + T: ?Sized + Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self.lock() { + Ok(locked) => locked.serialize(serializer), + Err(_) => Err(S::Error::custom("lock poison error while serializing")), + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for RwLock +where + T: ?Sized + Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self.read() { + Ok(locked) => locked.serialize(serializer), + Err(_) => Err(S::Error::custom("lock poison error while serializing")), + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "result")] +#[cfg_attr(docsrs, doc(cfg(feature = "result")))] +impl Serialize for Result +where + T: Serialize, + E: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Result::Ok(ref value) => serializer.serialize_newtype_variant("Result", 0, "Ok", value), + Result::Err(ref value) => { + serializer.serialize_newtype_variant("Result", 1, "Err", value) + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for Duration { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let mut state = tri!(serializer.serialize_struct("Duration", 2)); + tri!(state.serialize_field("secs", &self.as_secs())); + tri!(state.serialize_field("nanos", &self.subsec_nanos())); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for SystemTime { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use super::SerializeStruct; + let duration_since_epoch = match self.duration_since(UNIX_EPOCH) { + Ok(duration_since_epoch) => duration_since_epoch, + Err(_) => return Err(S::Error::custom("SystemTime must be later than UNIX_EPOCH")), + }; + let mut state = tri!(serializer.serialize_struct("SystemTime", 2)); + tri!(state.serialize_field("secs_since_epoch", &duration_since_epoch.as_secs())); + tri!(state.serialize_field("nanos_since_epoch", &duration_since_epoch.subsec_nanos())); + state.end() + } +} + +//////////////////////////////////////////////////////////////////////////////// + +/// Serialize a value that implements `Display` as a string, when that string is +/// statically known to never have more than a constant `MAX_LEN` bytes. +/// +/// Panics if the `Display` impl tries to write more than `MAX_LEN` bytes. +#[cfg(any(feature = "std", not(no_core_net)))] +macro_rules! serialize_display_bounded_length { + ($value:expr, $max:expr, $serializer:expr) => {{ + let mut buffer = [0u8; $max]; + let mut writer = crate::format::Buf::new(&mut buffer); + write!(&mut writer, "{}", $value).unwrap(); + $serializer.serialize_str(writer.as_str()) + }}; +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::IpAddr { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + match *self { + net::IpAddr::V4(ref a) => a.serialize(serializer), + net::IpAddr::V6(ref a) => a.serialize(serializer), + } + } else { + match *self { + net::IpAddr::V4(ref a) => { + serializer.serialize_newtype_variant("IpAddr", 0, "V4", a) + } + net::IpAddr::V6(ref a) => { + serializer.serialize_newtype_variant("IpAddr", 1, "V6", a) + } + } + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +const DEC_DIGITS_LUT: &[u8] = b"\ + 0001020304050607080910111213141516171819\ + 2021222324252627282930313233343536373839\ + 4041424344454647484950515253545556575859\ + 6061626364656667686970717273747576777879\ + 8081828384858687888990919293949596979899"; + +#[cfg(any(feature = "std", not(no_core_net)))] +#[inline] +fn format_u8(mut n: u8, out: &mut [u8]) -> usize { + if n >= 100 { + let d1 = ((n % 100) << 1) as usize; + n /= 100; + out[0] = b'0' + n; + out[1] = DEC_DIGITS_LUT[d1]; + out[2] = DEC_DIGITS_LUT[d1 + 1]; + 3 + } else if n >= 10 { + let d1 = (n << 1) as usize; + out[0] = DEC_DIGITS_LUT[d1]; + out[1] = DEC_DIGITS_LUT[d1 + 1]; + 2 + } else { + out[0] = b'0' + n; + 1 + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +#[test] +fn test_format_u8() { + let mut i = 0u8; + + loop { + let mut buf = [0u8; 3]; + let written = format_u8(i, &mut buf); + assert_eq!(i.to_string().as_bytes(), &buf[..written]); + + match i.checked_add(1) { + Some(next) => i = next, + None => break, + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::Ipv4Addr { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + const MAX_LEN: usize = 15; + debug_assert_eq!(MAX_LEN, "101.102.103.104".len()); + let mut buf = [b'.'; MAX_LEN]; + let mut written = format_u8(self.octets()[0], &mut buf); + for oct in &self.octets()[1..] { + // Skip over delimiters that we initialized buf with + written += format_u8(*oct, &mut buf[written + 1..]) + 1; + } + // Safety: We've only written ASCII bytes to the buffer, so it is valid UTF-8 + let buf = unsafe { str::from_utf8_unchecked(&buf[..written]) }; + serializer.serialize_str(buf) + } else { + self.octets().serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::Ipv6Addr { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + const MAX_LEN: usize = 39; + debug_assert_eq!(MAX_LEN, "1001:1002:1003:1004:1005:1006:1007:1008".len()); + serialize_display_bounded_length!(self, MAX_LEN, serializer) + } else { + self.octets().serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::SocketAddr { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + match *self { + net::SocketAddr::V4(ref addr) => addr.serialize(serializer), + net::SocketAddr::V6(ref addr) => addr.serialize(serializer), + } + } else { + match *self { + net::SocketAddr::V4(ref addr) => { + serializer.serialize_newtype_variant("SocketAddr", 0, "V4", addr) + } + net::SocketAddr::V6(ref addr) => { + serializer.serialize_newtype_variant("SocketAddr", 1, "V6", addr) + } + } + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::SocketAddrV4 { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + const MAX_LEN: usize = 21; + debug_assert_eq!(MAX_LEN, "101.102.103.104:65000".len()); + serialize_display_bounded_length!(self, MAX_LEN, serializer) + } else { + (self.ip(), self.port()).serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", not(no_core_net)))] +impl Serialize for net::SocketAddrV6 { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if serializer.is_human_readable() { + const MAX_LEN: usize = 58; + debug_assert_eq!( + MAX_LEN, + "[1001:1002:1003:1004:1005:1006:1007:1008%4294967295]:65000".len() + ); + serialize_display_bounded_length!(self, MAX_LEN, serializer) + } else { + (self.ip(), self.port()).serialize(serializer) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for Path { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self.to_str() { + Some(s) => s.serialize(serializer), + None => Err(Error::custom("path contains invalid UTF-8 characters")), + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl Serialize for PathBuf { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.as_path().serialize(serializer) + } +} + +#[cfg(all(feature = "std", any(unix, windows)))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))] +impl Serialize for OsStr { + #[cfg(unix)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use std::os::unix::ffi::OsStrExt; + serializer.serialize_newtype_variant("OsString", 0, "Unix", self.as_bytes()) + } + + #[cfg(windows)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use std::os::windows::ffi::OsStrExt; + let val = self.encode_wide().collect::>(); + serializer.serialize_newtype_variant("OsString", 1, "Windows", &val) + } +} + +#[cfg(all(feature = "std", any(unix, windows)))] +#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))] +impl Serialize for OsString { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.as_os_str().serialize(serializer) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +impl Serialize for Wrapping +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(not(no_core_num_saturating))] +impl Serialize for Saturating +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +impl Serialize for Reverse +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.0.serialize(serializer) + } +} + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(all(feature = "std", not(no_std_atomic)))] +macro_rules! atomic_impl { + ($($ty:ident $size:expr)*) => { + $( + #[cfg(any(no_target_has_atomic, target_has_atomic = $size))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "std", target_has_atomic = $size))))] + impl Serialize for $ty { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Matches the atomic ordering used in libcore for the Debug impl + self.load(Ordering::Relaxed).serialize(serializer) + } + } + )* + } +} + +#[cfg(all(feature = "std", not(no_std_atomic)))] +atomic_impl! { + AtomicBool "8" + AtomicI8 "8" + AtomicI16 "16" + AtomicI32 "32" + AtomicIsize "ptr" + AtomicU8 "8" + AtomicU16 "16" + AtomicU32 "32" + AtomicUsize "ptr" +} + +#[cfg(all(feature = "std", not(no_std_atomic64)))] +atomic_impl! { + AtomicI64 "64" + AtomicU64 "64" +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impossible.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impossible.rs new file mode 100644 index 0000000000000000000000000000000000000000..fe69ae24e14bacddbd47362ce7ab75f04667cb87 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/impossible.rs @@ -0,0 +1,216 @@ +//! This module contains `Impossible` serializer and its implementations. + +use crate::lib::*; + +use crate::ser::{ + self, Serialize, SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, + SerializeTuple, SerializeTupleStruct, SerializeTupleVariant, +}; + +/// Helper type for implementing a `Serializer` that does not support +/// serializing one of the compound types. +/// +/// This type cannot be instantiated, but implements every one of the traits +/// corresponding to the [`Serializer`] compound types: [`SerializeSeq`], +/// [`SerializeTuple`], [`SerializeTupleStruct`], [`SerializeTupleVariant`], +/// [`SerializeMap`], [`SerializeStruct`], and [`SerializeStructVariant`]. +/// +/// ```edition2021 +/// # use serde::ser::{Serializer, Impossible}; +/// # use serde_core::__private::doc::Error; +/// # +/// # struct MySerializer; +/// # +/// impl Serializer for MySerializer { +/// type Ok = (); +/// type Error = Error; +/// +/// type SerializeSeq = Impossible<(), Error>; +/// /* other associated types */ +/// +/// /// This data format does not support serializing sequences. +/// fn serialize_seq(self, +/// len: Option) +/// -> Result { +/// // Given Impossible cannot be instantiated, the only +/// // thing we can do here is to return an error. +/// # stringify! { +/// Err(...) +/// # }; +/// # unimplemented!() +/// } +/// +/// /* other Serializer methods */ +/// # serde_core::__serialize_unimplemented! { +/// # bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str bytes none some +/// # unit unit_struct unit_variant newtype_struct newtype_variant +/// # tuple tuple_struct tuple_variant map struct struct_variant +/// # } +/// } +/// ``` +/// +/// [`Serializer`]: crate::Serializer +/// [`SerializeSeq`]: crate::ser::SerializeSeq +/// [`SerializeTuple`]: crate::ser::SerializeTuple +/// [`SerializeTupleStruct`]: crate::ser::SerializeTupleStruct +/// [`SerializeTupleVariant`]: crate::ser::SerializeTupleVariant +/// [`SerializeMap`]: crate::ser::SerializeMap +/// [`SerializeStruct`]: crate::ser::SerializeStruct +/// [`SerializeStructVariant`]: crate::ser::SerializeStructVariant +pub struct Impossible { + void: Void, + ok: PhantomData, + error: PhantomData, +} + +enum Void {} + +impl SerializeSeq for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeTuple for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeTupleStruct for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeTupleVariant for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeMap for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_key(&mut self, key: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = key; + match self.void {} + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeStruct for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = key; + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} + +impl SerializeStructVariant for Impossible +where + Error: ser::Error, +{ + type Ok = Ok; + type Error = Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Error> + where + T: ?Sized + Serialize, + { + let _ = key; + let _ = value; + match self.void {} + } + + fn end(self) -> Result { + match self.void {} + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..62e59d981d4ae7096876293f13245c0c80d007fb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/ser/mod.rs @@ -0,0 +1,2010 @@ +//! Generic data structure serialization framework. +//! +//! The two most important traits in this module are [`Serialize`] and +//! [`Serializer`]. +//! +//! - **A type that implements `Serialize` is a data structure** that can be +//! serialized to any data format supported by Serde, and conversely +//! - **A type that implements `Serializer` is a data format** that can +//! serialize any data structure supported by Serde. +//! +//! # The Serialize trait +//! +//! Serde provides [`Serialize`] implementations for many Rust primitive and +//! standard library types. The complete list is below. All of these can be +//! serialized using Serde out of the box. +//! +//! Additionally, Serde provides a procedural macro called [`serde_derive`] to +//! automatically generate [`Serialize`] implementations for structs and enums +//! in your program. See the [derive section of the manual] for how to use this. +//! +//! In rare cases it may be necessary to implement [`Serialize`] manually for +//! some type in your program. See the [Implementing `Serialize`] section of the +//! manual for more about this. +//! +//! Third-party crates may provide [`Serialize`] implementations for types that +//! they expose. For example the [`linked-hash-map`] crate provides a +//! [`LinkedHashMap`] type that is serializable by Serde because the crate +//! provides an implementation of [`Serialize`] for it. +//! +//! # The Serializer trait +//! +//! [`Serializer`] implementations are provided by third-party crates, for +//! example [`serde_json`], [`serde_yaml`] and [`postcard`]. +//! +//! A partial list of well-maintained formats is given on the [Serde +//! website][data formats]. +//! +//! # Implementations of Serialize provided by Serde +//! +//! - **Primitive types**: +//! - bool +//! - i8, i16, i32, i64, i128, isize +//! - u8, u16, u32, u64, u128, usize +//! - f32, f64 +//! - char +//! - str +//! - &T and &mut T +//! - **Compound types**: +//! - \[T\] +//! - \[T; 0\] through \[T; 32\] +//! - tuples up to size 16 +//! - **Common standard library types**: +//! - String +//! - Option\ +//! - Result\ +//! - PhantomData\ +//! - **Wrapper types**: +//! - Box\ +//! - Cow\<'a, T\> +//! - Cell\ +//! - RefCell\ +//! - Mutex\ +//! - RwLock\ +//! - Rc\ *(if* features = \["rc"\] *is enabled)* +//! - Arc\ *(if* features = \["rc"\] *is enabled)* +//! - **Collection types**: +//! - BTreeMap\ +//! - BTreeSet\ +//! - BinaryHeap\ +//! - HashMap\ +//! - HashSet\ +//! - LinkedList\ +//! - VecDeque\ +//! - Vec\ +//! - **FFI types**: +//! - CStr +//! - CString +//! - OsStr +//! - OsString +//! - **Miscellaneous standard library types**: +//! - Duration +//! - SystemTime +//! - Path +//! - PathBuf +//! - Range\ +//! - RangeInclusive\ +//! - Bound\ +//! - num::NonZero* +//! - `!` *(unstable)* +//! - **Net types**: +//! - IpAddr +//! - Ipv4Addr +//! - Ipv6Addr +//! - SocketAddr +//! - SocketAddrV4 +//! - SocketAddrV6 +//! +//! [Implementing `Serialize`]: https://serde.rs/impl-serialize.html +//! [`LinkedHashMap`]: https://docs.rs/linked-hash-map/*/linked_hash_map/struct.LinkedHashMap.html +//! [`Serialize`]: crate::Serialize +//! [`Serializer`]: crate::Serializer +//! [`postcard`]: https://github.com/jamesmunns/postcard +//! [`linked-hash-map`]: https://crates.io/crates/linked-hash-map +//! [`serde_derive`]: https://crates.io/crates/serde_derive +//! [`serde_json`]: https://github.com/serde-rs/json +//! [`serde_yaml`]: https://github.com/dtolnay/serde-yaml +//! [derive section of the manual]: https://serde.rs/derive.html +//! [data formats]: https://serde.rs/#data-formats + +use crate::lib::*; + +mod fmt; +mod impls; +mod impossible; + +pub use self::impossible::Impossible; + +#[cfg(all(not(feature = "std"), no_core_error))] +#[doc(no_inline)] +pub use crate::std_error::Error as StdError; +#[cfg(not(any(feature = "std", no_core_error)))] +#[doc(no_inline)] +pub use core::error::Error as StdError; +#[cfg(feature = "std")] +#[doc(no_inline)] +pub use std::error::Error as StdError; + +//////////////////////////////////////////////////////////////////////////////// + +macro_rules! declare_error_trait { + (Error: Sized $(+ $($supertrait:ident)::+)*) => { + /// Trait used by `Serialize` implementations to generically construct + /// errors belonging to the `Serializer` against which they are + /// currently running. + /// + /// # Example implementation + /// + /// The [example data format] presented on the website shows an error + /// type appropriate for a basic JSON data format. + /// + /// [example data format]: https://serde.rs/data-format.html + #[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::Error` is not satisfied", + ) + )] + pub trait Error: Sized $(+ $($supertrait)::+)* { + /// Used when a [`Serialize`] implementation encounters any error + /// while serializing a type. + /// + /// The message should not be capitalized and should not end with a + /// period. + /// + /// For example, a filesystem [`Path`] may refuse to serialize + /// itself if it contains invalid UTF-8 data. + /// + /// ```edition2021 + /// # struct Path; + /// # + /// # impl Path { + /// # fn to_str(&self) -> Option<&str> { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::ser::{self, Serialize, Serializer}; + /// + /// impl Serialize for Path { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match self.to_str() { + /// Some(s) => serializer.serialize_str(s), + /// None => Err(ser::Error::custom("path contains invalid UTF-8 characters")), + /// } + /// } + /// } + /// ``` + /// + /// [`Path`]: std::path::Path + /// [`Serialize`]: crate::Serialize + fn custom(msg: T) -> Self + where + T: Display; + } + } +} + +#[cfg(feature = "std")] +declare_error_trait!(Error: Sized + StdError); + +#[cfg(not(feature = "std"))] +declare_error_trait!(Error: Sized + Debug + Display); + +//////////////////////////////////////////////////////////////////////////////// + +/// A **data structure** that can be serialized into any data format supported +/// by Serde. +/// +/// Serde provides `Serialize` implementations for many Rust primitive and +/// standard library types. The complete list is [here][crate::ser]. All of +/// these can be serialized using Serde out of the box. +/// +/// Additionally, Serde provides a procedural macro called [`serde_derive`] to +/// automatically generate `Serialize` implementations for structs and enums in +/// your program. See the [derive section of the manual] for how to use this. +/// +/// In rare cases it may be necessary to implement `Serialize` manually for some +/// type in your program. See the [Implementing `Serialize`] section of the +/// manual for more about this. +/// +/// Third-party crates may provide `Serialize` implementations for types that +/// they expose. For example the [`linked-hash-map`] crate provides a +/// [`LinkedHashMap`] type that is serializable by Serde because the crate +/// provides an implementation of `Serialize` for it. +/// +/// [Implementing `Serialize`]: https://serde.rs/impl-serialize.html +/// [`LinkedHashMap`]: https://docs.rs/linked-hash-map/*/linked_hash_map/struct.LinkedHashMap.html +/// [`linked-hash-map`]: https://crates.io/crates/linked-hash-map +/// [`serde_derive`]: https://crates.io/crates/serde_derive +/// [derive section of the manual]: https://serde.rs/derive.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + // Prevents `serde_core::ser::Serialize` appearing in the error message + // in projects with no direct dependency on serde_core. + message = "the trait bound `{Self}: serde::Serialize` is not satisfied", + note = "for local types consider adding `#[derive(serde::Serialize)]` to your `{Self}` type", + note = "for types from other crates check whether the crate offers a `serde` feature flag", + ) +)] +pub trait Serialize { + /// Serialize this value into the given Serde serializer. + /// + /// See the [Implementing `Serialize`] section of the manual for more + /// information about how to implement this method. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeStruct, Serializer}; + /// + /// struct Person { + /// name: String, + /// age: u8, + /// phones: Vec, + /// } + /// + /// // This is what #[derive(Serialize)] would generate. + /// impl Serialize for Person { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut s = serializer.serialize_struct("Person", 3)?; + /// s.serialize_field("name", &self.name)?; + /// s.serialize_field("age", &self.age)?; + /// s.serialize_field("phones", &self.phones)?; + /// s.end() + /// } + /// } + /// ``` + /// + /// [Implementing `Serialize`]: https://serde.rs/impl-serialize.html + fn serialize(&self, serializer: S) -> Result + where + S: Serializer; +} + +//////////////////////////////////////////////////////////////////////////////// + +/// A **data format** that can serialize any data structure supported by Serde. +/// +/// The role of this trait is to define the serialization half of the [Serde +/// data model], which is a way to categorize every Rust data structure into one +/// of 29 possible types. Each method of the `Serializer` trait corresponds to +/// one of the types of the data model. +/// +/// Implementations of `Serialize` map themselves into this data model by +/// invoking exactly one of the `Serializer` methods. +/// +/// The types that make up the Serde data model are: +/// +/// - **14 primitive types** +/// - bool +/// - i8, i16, i32, i64, i128 +/// - u8, u16, u32, u64, u128 +/// - f32, f64 +/// - char +/// - **string** +/// - UTF-8 bytes with a length and no null terminator. +/// - When serializing, all strings are handled equally. When deserializing, +/// there are three flavors of strings: transient, owned, and borrowed. +/// - **byte array** - \[u8\] +/// - Similar to strings, during deserialization byte arrays can be +/// transient, owned, or borrowed. +/// - **option** +/// - Either none or some value. +/// - **unit** +/// - The type of `()` in Rust. It represents an anonymous value containing +/// no data. +/// - **unit_struct** +/// - For example `struct Unit` or `PhantomData`. It represents a named +/// value containing no data. +/// - **unit_variant** +/// - For example the `E::A` and `E::B` in `enum E { A, B }`. +/// - **newtype_struct** +/// - For example `struct Millimeters(u8)`. +/// - **newtype_variant** +/// - For example the `E::N` in `enum E { N(u8) }`. +/// - **seq** +/// - A variably sized heterogeneous sequence of values, for example +/// `Vec` or `HashSet`. When serializing, the length may or may not +/// be known before iterating through all the data. When deserializing, +/// the length is determined by looking at the serialized data. +/// - **tuple** +/// - A statically sized heterogeneous sequence of values for which the +/// length will be known at deserialization time without looking at the +/// serialized data, for example `(u8,)` or `(String, u64, Vec)` or +/// `[u64; 10]`. +/// - **tuple_struct** +/// - A named tuple, for example `struct Rgb(u8, u8, u8)`. +/// - **tuple_variant** +/// - For example the `E::T` in `enum E { T(u8, u8) }`. +/// - **map** +/// - A heterogeneous key-value pairing, for example `BTreeMap`. +/// - **struct** +/// - A heterogeneous key-value pairing in which the keys are strings and +/// will be known at deserialization time without looking at the +/// serialized data, for example `struct S { r: u8, g: u8, b: u8 }`. +/// - **struct_variant** +/// - For example the `E::S` in `enum E { S { r: u8, g: u8, b: u8 } }`. +/// +/// Many Serde serializers produce text or binary data as output, for example +/// JSON or Postcard. This is not a requirement of the `Serializer` trait, and +/// there are serializers that do not produce text or binary output. One example +/// is the `serde_json::value::Serializer` (distinct from the main `serde_json` +/// serializer) that produces a `serde_json::Value` data structure in memory as +/// output. +/// +/// [Serde data model]: https://serde.rs/data-model.html +/// +/// # Example implementation +/// +/// The [example data format] presented on the website contains example code for +/// a basic JSON `Serializer`. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::Serializer` is not satisfied", + ) +)] +pub trait Serializer: Sized { + /// The output type produced by this `Serializer` during successful + /// serialization. Most serializers that produce text or binary output + /// should set `Ok = ()` and serialize into an [`io::Write`] or buffer + /// contained within the `Serializer` instance. Serializers that build + /// in-memory data structures may be simplified by using `Ok` to propagate + /// the data structure around. + /// + /// [`io::Write`]: std::io::Write + type Ok; + + /// The error type when some error occurs during serialization. + type Error: Error; + + /// Type returned from [`serialize_seq`] for serializing the content of the + /// sequence. + /// + /// [`serialize_seq`]: #tymethod.serialize_seq + type SerializeSeq: SerializeSeq; + + /// Type returned from [`serialize_tuple`] for serializing the content of + /// the tuple. + /// + /// [`serialize_tuple`]: #tymethod.serialize_tuple + type SerializeTuple: SerializeTuple; + + /// Type returned from [`serialize_tuple_struct`] for serializing the + /// content of the tuple struct. + /// + /// [`serialize_tuple_struct`]: #tymethod.serialize_tuple_struct + type SerializeTupleStruct: SerializeTupleStruct; + + /// Type returned from [`serialize_tuple_variant`] for serializing the + /// content of the tuple variant. + /// + /// [`serialize_tuple_variant`]: #tymethod.serialize_tuple_variant + type SerializeTupleVariant: SerializeTupleVariant; + + /// Type returned from [`serialize_map`] for serializing the content of the + /// map. + /// + /// [`serialize_map`]: #tymethod.serialize_map + type SerializeMap: SerializeMap; + + /// Type returned from [`serialize_struct`] for serializing the content of + /// the struct. + /// + /// [`serialize_struct`]: #tymethod.serialize_struct + type SerializeStruct: SerializeStruct; + + /// Type returned from [`serialize_struct_variant`] for serializing the + /// content of the struct variant. + /// + /// [`serialize_struct_variant`]: #tymethod.serialize_struct_variant + type SerializeStructVariant: SerializeStructVariant; + + /// Serialize a `bool` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for bool { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_bool(*self) + /// } + /// } + /// ``` + fn serialize_bool(self, v: bool) -> Result; + + /// Serialize an `i8` value. + /// + /// If the format does not differentiate between `i8` and `i64`, a + /// reasonable implementation would be to cast the value to `i64` and + /// forward to `serialize_i64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for i8 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_i8(*self) + /// } + /// } + /// ``` + fn serialize_i8(self, v: i8) -> Result; + + /// Serialize an `i16` value. + /// + /// If the format does not differentiate between `i16` and `i64`, a + /// reasonable implementation would be to cast the value to `i64` and + /// forward to `serialize_i64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for i16 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_i16(*self) + /// } + /// } + /// ``` + fn serialize_i16(self, v: i16) -> Result; + + /// Serialize an `i32` value. + /// + /// If the format does not differentiate between `i32` and `i64`, a + /// reasonable implementation would be to cast the value to `i64` and + /// forward to `serialize_i64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for i32 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_i32(*self) + /// } + /// } + /// ``` + fn serialize_i32(self, v: i32) -> Result; + + /// Serialize an `i64` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for i64 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_i64(*self) + /// } + /// } + /// ``` + fn serialize_i64(self, v: i64) -> Result; + + /// Serialize an `i128` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for i128 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_i128(*self) + /// } + /// } + /// ``` + /// + /// The default behavior unconditionally returns an error. + fn serialize_i128(self, v: i128) -> Result { + let _ = v; + Err(Error::custom("i128 is not supported")) + } + + /// Serialize a `u8` value. + /// + /// If the format does not differentiate between `u8` and `u64`, a + /// reasonable implementation would be to cast the value to `u64` and + /// forward to `serialize_u64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for u8 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_u8(*self) + /// } + /// } + /// ``` + fn serialize_u8(self, v: u8) -> Result; + + /// Serialize a `u16` value. + /// + /// If the format does not differentiate between `u16` and `u64`, a + /// reasonable implementation would be to cast the value to `u64` and + /// forward to `serialize_u64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for u16 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_u16(*self) + /// } + /// } + /// ``` + fn serialize_u16(self, v: u16) -> Result; + + /// Serialize a `u32` value. + /// + /// If the format does not differentiate between `u32` and `u64`, a + /// reasonable implementation would be to cast the value to `u64` and + /// forward to `serialize_u64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for u32 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_u32(*self) + /// } + /// } + /// ``` + fn serialize_u32(self, v: u32) -> Result; + + /// Serialize a `u64` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for u64 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_u64(*self) + /// } + /// } + /// ``` + fn serialize_u64(self, v: u64) -> Result; + + /// Serialize a `u128` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for u128 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_u128(*self) + /// } + /// } + /// ``` + /// + /// The default behavior unconditionally returns an error. + fn serialize_u128(self, v: u128) -> Result { + let _ = v; + Err(Error::custom("u128 is not supported")) + } + + /// Serialize an `f32` value. + /// + /// If the format does not differentiate between `f32` and `f64`, a + /// reasonable implementation would be to cast the value to `f64` and + /// forward to `serialize_f64`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for f32 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_f32(*self) + /// } + /// } + /// ``` + fn serialize_f32(self, v: f32) -> Result; + + /// Serialize an `f64` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for f64 { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_f64(*self) + /// } + /// } + /// ``` + fn serialize_f64(self, v: f64) -> Result; + + /// Serialize a character. + /// + /// If the format does not support characters, it is reasonable to serialize + /// it as a single element `str` or a `u32`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for char { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_char(*self) + /// } + /// } + /// ``` + fn serialize_char(self, v: char) -> Result; + + /// Serialize a `&str`. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for str { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_str(self) + /// } + /// } + /// ``` + fn serialize_str(self, v: &str) -> Result; + + /// Serialize a chunk of raw byte data. + /// + /// Enables serializers to serialize byte slices more compactly or more + /// efficiently than other types of slices. If no efficient implementation + /// is available, a reasonable implementation would be to forward to + /// `serialize_seq`. If forwarded, the implementation looks usually just + /// like this: + /// + /// ```edition2021 + /// # use serde::ser::{Serializer, SerializeSeq}; + /// # use serde_core::__private::doc::Error; + /// # + /// # struct MySerializer; + /// # + /// # impl Serializer for MySerializer { + /// # type Ok = (); + /// # type Error = Error; + /// # + /// fn serialize_bytes(self, v: &[u8]) -> Result { + /// let mut seq = self.serialize_seq(Some(v.len()))?; + /// for b in v { + /// seq.serialize_element(b)?; + /// } + /// seq.end() + /// } + /// # + /// # serde_core::__serialize_unimplemented! { + /// # bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str none some + /// # unit unit_struct unit_variant newtype_struct newtype_variant + /// # seq tuple tuple_struct tuple_variant map struct struct_variant + /// # } + /// # } + /// ``` + fn serialize_bytes(self, v: &[u8]) -> Result; + + /// Serialize a [`None`] value. + /// + /// ```edition2021 + /// # use serde::{Serialize, Serializer}; + /// # + /// # enum Option { + /// # Some(T), + /// # None, + /// # } + /// # + /// # use self::Option::{Some, None}; + /// # + /// impl Serialize for Option + /// where + /// T: Serialize, + /// { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// Some(ref value) => serializer.serialize_some(value), + /// None => serializer.serialize_none(), + /// } + /// } + /// } + /// # + /// # fn main() {} + /// ``` + /// + /// [`None`]: core::option::Option::None + fn serialize_none(self) -> Result; + + /// Serialize a [`Some(T)`] value. + /// + /// ```edition2021 + /// # use serde::{Serialize, Serializer}; + /// # + /// # enum Option { + /// # Some(T), + /// # None, + /// # } + /// # + /// # use self::Option::{Some, None}; + /// # + /// impl Serialize for Option + /// where + /// T: Serialize, + /// { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// Some(ref value) => serializer.serialize_some(value), + /// None => serializer.serialize_none(), + /// } + /// } + /// } + /// # + /// # fn main() {} + /// ``` + /// + /// [`Some(T)`]: core::option::Option::Some + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize; + + /// Serialize a `()` value. + /// + /// ```edition2021 + /// # use serde::Serializer; + /// # + /// # serde_core::__private_serialize!(); + /// # + /// impl Serialize for () { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_unit() + /// } + /// } + /// ``` + fn serialize_unit(self) -> Result; + + /// Serialize a unit struct like `struct Unit` or `PhantomData`. + /// + /// A reasonable implementation would be to forward to `serialize_unit`. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// + /// struct Nothing; + /// + /// impl Serialize for Nothing { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_unit_struct("Nothing") + /// } + /// } + /// ``` + fn serialize_unit_struct(self, name: &'static str) -> Result; + + /// Serialize a unit variant like `E::A` in `enum E { A, B }`. + /// + /// The `name` is the name of the enum, the `variant_index` is the index of + /// this variant within the enum, and the `variant` is the name of the + /// variant. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// + /// enum E { + /// A, + /// B, + /// } + /// + /// impl Serialize for E { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// E::A => serializer.serialize_unit_variant("E", 0, "A"), + /// E::B => serializer.serialize_unit_variant("E", 1, "B"), + /// } + /// } + /// } + /// ``` + fn serialize_unit_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + ) -> Result; + + /// Serialize a newtype struct like `struct Millimeters(u8)`. + /// + /// Serializers are encouraged to treat newtype structs as insignificant + /// wrappers around the data they contain. A reasonable implementation would + /// be to forward to `value.serialize(self)`. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// + /// struct Millimeters(u8); + /// + /// impl Serialize for Millimeters { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.serialize_newtype_struct("Millimeters", &self.0) + /// } + /// } + /// ``` + fn serialize_newtype_struct( + self, + name: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize; + + /// Serialize a newtype variant like `E::N` in `enum E { N(u8) }`. + /// + /// The `name` is the name of the enum, the `variant_index` is the index of + /// this variant within the enum, and the `variant` is the name of the + /// variant. The `value` is the data contained within this newtype variant. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// + /// enum E { + /// M(String), + /// N(u8), + /// } + /// + /// impl Serialize for E { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// E::M(ref s) => serializer.serialize_newtype_variant("E", 0, "M", s), + /// E::N(n) => serializer.serialize_newtype_variant("E", 1, "N", &n), + /// } + /// } + /// } + /// ``` + fn serialize_newtype_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize; + + /// Begin to serialize a variably sized sequence. This call must be + /// followed by zero or more calls to `serialize_element`, then a call to + /// `end`. + /// + /// The argument is the number of elements in the sequence, which may or may + /// not be computable before the sequence is iterated. Some serializers only + /// support sequences whose length is known up front. + /// + /// ```edition2021 + /// # use std::marker::PhantomData; + /// # + /// # struct Vec(PhantomData); + /// # + /// # impl Vec { + /// # fn len(&self) -> usize { + /// # unimplemented!() + /// # } + /// # } + /// # + /// # impl<'a, T> IntoIterator for &'a Vec { + /// # type Item = &'a T; + /// # type IntoIter = Box>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::ser::{Serialize, SerializeSeq, Serializer}; + /// + /// impl Serialize for Vec + /// where + /// T: Serialize, + /// { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut seq = serializer.serialize_seq(Some(self.len()))?; + /// for element in self { + /// seq.serialize_element(element)?; + /// } + /// seq.end() + /// } + /// } + /// ``` + fn serialize_seq(self, len: Option) -> Result; + + /// Begin to serialize a statically sized sequence whose length will be + /// known at deserialization time without looking at the serialized data. + /// This call must be followed by zero or more calls to `serialize_element`, + /// then a call to `end`. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeTuple, Serializer}; + /// + /// # mod fool { + /// # trait Serialize {} + /// impl Serialize for (A, B, C) + /// # {} + /// # } + /// # + /// # struct Tuple3(A, B, C); + /// # + /// # impl Serialize for Tuple3 + /// where + /// A: Serialize, + /// B: Serialize, + /// C: Serialize, + /// { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut tup = serializer.serialize_tuple(3)?; + /// tup.serialize_element(&self.0)?; + /// tup.serialize_element(&self.1)?; + /// tup.serialize_element(&self.2)?; + /// tup.end() + /// } + /// } + /// ``` + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeTuple, Serializer}; + /// + /// const VRAM_SIZE: usize = 386; + /// struct Vram([u16; VRAM_SIZE]); + /// + /// impl Serialize for Vram { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut seq = serializer.serialize_tuple(VRAM_SIZE)?; + /// for element in &self.0[..] { + /// seq.serialize_element(element)?; + /// } + /// seq.end() + /// } + /// } + /// ``` + fn serialize_tuple(self, len: usize) -> Result; + + /// Begin to serialize a tuple struct like `struct Rgb(u8, u8, u8)`. This + /// call must be followed by zero or more calls to `serialize_field`, then a + /// call to `end`. + /// + /// The `name` is the name of the tuple struct and the `len` is the number + /// of data fields that will be serialized. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeTupleStruct, Serializer}; + /// + /// struct Rgb(u8, u8, u8); + /// + /// impl Serialize for Rgb { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut ts = serializer.serialize_tuple_struct("Rgb", 3)?; + /// ts.serialize_field(&self.0)?; + /// ts.serialize_field(&self.1)?; + /// ts.serialize_field(&self.2)?; + /// ts.end() + /// } + /// } + /// ``` + fn serialize_tuple_struct( + self, + name: &'static str, + len: usize, + ) -> Result; + + /// Begin to serialize a tuple variant like `E::T` in `enum E { T(u8, u8) + /// }`. This call must be followed by zero or more calls to + /// `serialize_field`, then a call to `end`. + /// + /// The `name` is the name of the enum, the `variant_index` is the index of + /// this variant within the enum, the `variant` is the name of the variant, + /// and the `len` is the number of data fields that will be serialized. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeTupleVariant, Serializer}; + /// + /// enum E { + /// T(u8, u8), + /// U(String, u32, u32), + /// } + /// + /// impl Serialize for E { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// E::T(ref a, ref b) => { + /// let mut tv = serializer.serialize_tuple_variant("E", 0, "T", 2)?; + /// tv.serialize_field(a)?; + /// tv.serialize_field(b)?; + /// tv.end() + /// } + /// E::U(ref a, ref b, ref c) => { + /// let mut tv = serializer.serialize_tuple_variant("E", 1, "U", 3)?; + /// tv.serialize_field(a)?; + /// tv.serialize_field(b)?; + /// tv.serialize_field(c)?; + /// tv.end() + /// } + /// } + /// } + /// } + /// ``` + fn serialize_tuple_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result; + + /// Begin to serialize a map. This call must be followed by zero or more + /// calls to `serialize_key` and `serialize_value`, then a call to `end`. + /// + /// The argument is the number of elements in the map, which may or may not + /// be computable before the map is iterated. Some serializers only support + /// maps whose length is known up front. + /// + /// ```edition2021 + /// # use std::marker::PhantomData; + /// # + /// # struct HashMap(PhantomData, PhantomData); + /// # + /// # impl HashMap { + /// # fn len(&self) -> usize { + /// # unimplemented!() + /// # } + /// # } + /// # + /// # impl<'a, K, V> IntoIterator for &'a HashMap { + /// # type Item = (&'a K, &'a V); + /// # type IntoIter = Box>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::ser::{Serialize, SerializeMap, Serializer}; + /// + /// impl Serialize for HashMap + /// where + /// K: Serialize, + /// V: Serialize, + /// { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut map = serializer.serialize_map(Some(self.len()))?; + /// for (k, v) in self { + /// map.serialize_entry(k, v)?; + /// } + /// map.end() + /// } + /// } + /// ``` + fn serialize_map(self, len: Option) -> Result; + + /// Begin to serialize a struct like `struct Rgb { r: u8, g: u8, b: u8 }`. + /// This call must be followed by zero or more calls to `serialize_field`, + /// then a call to `end`. + /// + /// The `name` is the name of the struct and the `len` is the number of + /// data fields that will be serialized. `len` does not include fields + /// which are skipped with [`SerializeStruct::skip_field`]. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeStruct, Serializer}; + /// + /// struct Rgb { + /// r: u8, + /// g: u8, + /// b: u8, + /// } + /// + /// impl Serialize for Rgb { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// let mut rgb = serializer.serialize_struct("Rgb", 3)?; + /// rgb.serialize_field("r", &self.r)?; + /// rgb.serialize_field("g", &self.g)?; + /// rgb.serialize_field("b", &self.b)?; + /// rgb.end() + /// } + /// } + /// ``` + fn serialize_struct( + self, + name: &'static str, + len: usize, + ) -> Result; + + /// Begin to serialize a struct variant like `E::S` in `enum E { S { r: u8, + /// g: u8, b: u8 } }`. This call must be followed by zero or more calls to + /// `serialize_field`, then a call to `end`. + /// + /// The `name` is the name of the enum, the `variant_index` is the index of + /// this variant within the enum, the `variant` is the name of the variant, + /// and the `len` is the number of data fields that will be serialized. + /// `len` does not include fields which are skipped with + /// [`SerializeStructVariant::skip_field`]. + /// + /// ```edition2021 + /// use serde::ser::{Serialize, SerializeStructVariant, Serializer}; + /// + /// enum E { + /// S { r: u8, g: u8, b: u8 }, + /// } + /// + /// impl Serialize for E { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// match *self { + /// E::S { + /// ref r, + /// ref g, + /// ref b, + /// } => { + /// let mut sv = serializer.serialize_struct_variant("E", 0, "S", 3)?; + /// sv.serialize_field("r", r)?; + /// sv.serialize_field("g", g)?; + /// sv.serialize_field("b", b)?; + /// sv.end() + /// } + /// } + /// } + /// } + /// ``` + fn serialize_struct_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result; + + /// Collect an iterator as a sequence. + /// + /// The default implementation serializes each item yielded by the iterator + /// using [`serialize_seq`]. Implementors should not need to override this + /// method. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// + /// struct SecretlyOneHigher { + /// data: Vec, + /// } + /// + /// impl Serialize for SecretlyOneHigher { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.collect_seq(self.data.iter().map(|x| x + 1)) + /// } + /// } + /// ``` + /// + /// [`serialize_seq`]: #tymethod.serialize_seq + fn collect_seq(self, iter: I) -> Result + where + I: IntoIterator, + ::Item: Serialize, + { + let mut iter = iter.into_iter(); + let mut serializer = tri!(self.serialize_seq(iterator_len_hint(&iter))); + tri!(iter.try_for_each(|item| serializer.serialize_element(&item))); + serializer.end() + } + + /// Collect an iterator as a map. + /// + /// The default implementation serializes each pair yielded by the iterator + /// using [`serialize_map`]. Implementors should not need to override this + /// method. + /// + /// ```edition2021 + /// use serde::{Serialize, Serializer}; + /// use std::collections::BTreeSet; + /// + /// struct MapToUnit { + /// keys: BTreeSet, + /// } + /// + /// // Serializes as a map in which the values are all unit. + /// impl Serialize for MapToUnit { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.collect_map(self.keys.iter().map(|k| (k, ()))) + /// } + /// } + /// ``` + /// + /// [`serialize_map`]: #tymethod.serialize_map + fn collect_map(self, iter: I) -> Result + where + K: Serialize, + V: Serialize, + I: IntoIterator, + { + let mut iter = iter.into_iter(); + let mut serializer = tri!(self.serialize_map(iterator_len_hint(&iter))); + tri!(iter.try_for_each(|(key, value)| serializer.serialize_entry(&key, &value))); + serializer.end() + } + + /// Serialize a string produced by an implementation of `Display`. + /// + /// The default implementation builds a heap-allocated [`String`] and + /// delegates to [`serialize_str`]. Serializers are encouraged to provide a + /// more efficient implementation if possible. + /// + /// ```edition2021 + /// # struct DateTime; + /// # + /// # impl DateTime { + /// # fn naive_local(&self) -> () { () } + /// # fn offset(&self) -> () { () } + /// # } + /// # + /// use serde::{Serialize, Serializer}; + /// + /// impl Serialize for DateTime { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.collect_str(&format_args!("{:?}{:?}", self.naive_local(), self.offset())) + /// } + /// } + /// ``` + /// + /// [`serialize_str`]: Self::serialize_str + #[cfg(any(feature = "std", feature = "alloc"))] + fn collect_str(self, value: &T) -> Result + where + T: ?Sized + Display, + { + self.serialize_str(&value.to_string()) + } + + /// Serialize a string produced by an implementation of `Display`. + /// + /// Serializers that use `no_std` are required to provide an implementation + /// of this method. If no more sensible behavior is possible, the + /// implementation is expected to return an error. + /// + /// ```edition2021 + /// # struct DateTime; + /// # + /// # impl DateTime { + /// # fn naive_local(&self) -> () { () } + /// # fn offset(&self) -> () { () } + /// # } + /// # + /// use serde::{Serialize, Serializer}; + /// + /// impl Serialize for DateTime { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// serializer.collect_str(&format_args!("{:?}{:?}", self.naive_local(), self.offset())) + /// } + /// } + /// ``` + #[cfg(not(any(feature = "std", feature = "alloc")))] + fn collect_str(self, value: &T) -> Result + where + T: ?Sized + Display; + + /// Determine whether `Serialize` implementations should serialize in + /// human-readable form. + /// + /// Some types have a human-readable form that may be somewhat expensive to + /// construct, as well as a binary form that is compact and efficient. + /// Generally text-based formats like JSON and YAML will prefer to use the + /// human-readable one and binary formats like Postcard will prefer the + /// compact one. + /// + /// ```edition2021 + /// # use std::fmt::{self, Display}; + /// # + /// # struct Timestamp; + /// # + /// # impl Timestamp { + /// # fn seconds_since_epoch(&self) -> u64 { unimplemented!() } + /// # } + /// # + /// # impl Display for Timestamp { + /// # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// # unimplemented!() + /// # } + /// # } + /// # + /// use serde::{Serialize, Serializer}; + /// + /// impl Serialize for Timestamp { + /// fn serialize(&self, serializer: S) -> Result + /// where + /// S: Serializer, + /// { + /// if serializer.is_human_readable() { + /// // Serialize to a human-readable string "2015-05-15T17:01:00Z". + /// self.to_string().serialize(serializer) + /// } else { + /// // Serialize to a compact binary representation. + /// self.seconds_since_epoch().serialize(serializer) + /// } + /// } + /// } + /// ``` + /// + /// The default implementation of this method returns `true`. Data formats + /// may override this to `false` to request a compact form for types that + /// support one. Note that modifying this method to change a format from + /// human-readable to compact or vice versa should be regarded as a breaking + /// change, as a value serialized in human-readable mode is not required to + /// deserialize from the same data in compact mode. + #[inline] + fn is_human_readable(&self) -> bool { + true + } +} + +/// Returned from `Serializer::serialize_seq`. +/// +/// # Example use +/// +/// ```edition2021 +/// # use std::marker::PhantomData; +/// # +/// # struct Vec(PhantomData); +/// # +/// # impl Vec { +/// # fn len(&self) -> usize { +/// # unimplemented!() +/// # } +/// # } +/// # +/// # impl<'a, T> IntoIterator for &'a Vec { +/// # type Item = &'a T; +/// # type IntoIter = Box>; +/// # fn into_iter(self) -> Self::IntoIter { +/// # unimplemented!() +/// # } +/// # } +/// # +/// use serde::ser::{Serialize, SerializeSeq, Serializer}; +/// +/// impl Serialize for Vec +/// where +/// T: Serialize, +/// { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut seq = serializer.serialize_seq(Some(self.len()))?; +/// for element in self { +/// seq.serialize_element(element)?; +/// } +/// seq.end() +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeSeq` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeSeq` is not satisfied", + ) +)] +pub trait SerializeSeq { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a sequence element. + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Finish serializing a sequence. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_tuple`. +/// +/// # Example use +/// +/// ```edition2021 +/// use serde::ser::{Serialize, SerializeTuple, Serializer}; +/// +/// # mod fool { +/// # trait Serialize {} +/// impl Serialize for (A, B, C) +/// # {} +/// # } +/// # +/// # struct Tuple3(A, B, C); +/// # +/// # impl Serialize for Tuple3 +/// where +/// A: Serialize, +/// B: Serialize, +/// C: Serialize, +/// { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut tup = serializer.serialize_tuple(3)?; +/// tup.serialize_element(&self.0)?; +/// tup.serialize_element(&self.1)?; +/// tup.serialize_element(&self.2)?; +/// tup.end() +/// } +/// } +/// ``` +/// +/// ```edition2021 +/// # use std::marker::PhantomData; +/// # +/// # struct Array(PhantomData); +/// # +/// # impl Array { +/// # fn len(&self) -> usize { +/// # unimplemented!() +/// # } +/// # } +/// # +/// # impl<'a, T> IntoIterator for &'a Array { +/// # type Item = &'a T; +/// # type IntoIter = Box>; +/// # fn into_iter(self) -> Self::IntoIter { +/// # unimplemented!() +/// # } +/// # } +/// # +/// use serde::ser::{Serialize, SerializeTuple, Serializer}; +/// +/// # mod fool { +/// # trait Serialize {} +/// impl Serialize for [T; 16] +/// # {} +/// # } +/// # +/// # impl Serialize for Array +/// where +/// T: Serialize, +/// { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut seq = serializer.serialize_tuple(16)?; +/// for element in self { +/// seq.serialize_element(element)?; +/// } +/// seq.end() +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeTuple` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeTuple` is not satisfied", + ) +)] +pub trait SerializeTuple { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a tuple element. + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Finish serializing a tuple. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_tuple_struct`. +/// +/// # Example use +/// +/// ```edition2021 +/// use serde::ser::{Serialize, SerializeTupleStruct, Serializer}; +/// +/// struct Rgb(u8, u8, u8); +/// +/// impl Serialize for Rgb { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut ts = serializer.serialize_tuple_struct("Rgb", 3)?; +/// ts.serialize_field(&self.0)?; +/// ts.serialize_field(&self.1)?; +/// ts.serialize_field(&self.2)?; +/// ts.end() +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeTupleStruct` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeTupleStruct` is not satisfied", + ) +)] +pub trait SerializeTupleStruct { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a tuple struct field. + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Finish serializing a tuple struct. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_tuple_variant`. +/// +/// # Example use +/// +/// ```edition2021 +/// use serde::ser::{Serialize, SerializeTupleVariant, Serializer}; +/// +/// enum E { +/// T(u8, u8), +/// U(String, u32, u32), +/// } +/// +/// impl Serialize for E { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// match *self { +/// E::T(ref a, ref b) => { +/// let mut tv = serializer.serialize_tuple_variant("E", 0, "T", 2)?; +/// tv.serialize_field(a)?; +/// tv.serialize_field(b)?; +/// tv.end() +/// } +/// E::U(ref a, ref b, ref c) => { +/// let mut tv = serializer.serialize_tuple_variant("E", 1, "U", 3)?; +/// tv.serialize_field(a)?; +/// tv.serialize_field(b)?; +/// tv.serialize_field(c)?; +/// tv.end() +/// } +/// } +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeTupleVariant` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeTupleVariant` is not satisfied", + ) +)] +pub trait SerializeTupleVariant { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a tuple variant field. + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Finish serializing a tuple variant. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_map`. +/// +/// # Example use +/// +/// ```edition2021 +/// # use std::marker::PhantomData; +/// # +/// # struct HashMap(PhantomData, PhantomData); +/// # +/// # impl HashMap { +/// # fn len(&self) -> usize { +/// # unimplemented!() +/// # } +/// # } +/// # +/// # impl<'a, K, V> IntoIterator for &'a HashMap { +/// # type Item = (&'a K, &'a V); +/// # type IntoIter = Box>; +/// # +/// # fn into_iter(self) -> Self::IntoIter { +/// # unimplemented!() +/// # } +/// # } +/// # +/// use serde::ser::{Serialize, SerializeMap, Serializer}; +/// +/// impl Serialize for HashMap +/// where +/// K: Serialize, +/// V: Serialize, +/// { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut map = serializer.serialize_map(Some(self.len()))?; +/// for (k, v) in self { +/// map.serialize_entry(k, v)?; +/// } +/// map.end() +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeMap` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeMap` is not satisfied", + ) +)] +pub trait SerializeMap { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a map key. + /// + /// If possible, `Serialize` implementations are encouraged to use + /// `serialize_entry` instead as it may be implemented more efficiently in + /// some formats compared to a pair of calls to `serialize_key` and + /// `serialize_value`. + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Serialize a map value. + /// + /// # Panics + /// + /// Calling `serialize_value` before `serialize_key` is incorrect and is + /// allowed to panic or produce bogus results. + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Serialize a map entry consisting of a key and a value. + /// + /// Some [`Serialize`] types are not able to hold a key and value in memory + /// at the same time so `SerializeMap` implementations are required to + /// support [`serialize_key`] and [`serialize_value`] individually. The + /// `serialize_entry` method allows serializers to optimize for the case + /// where key and value are both available. [`Serialize`] implementations + /// are encouraged to use `serialize_entry` if possible. + /// + /// The default implementation delegates to [`serialize_key`] and + /// [`serialize_value`]. This is appropriate for serializers that do not + /// care about performance or are not able to optimize `serialize_entry` any + /// better than this. + /// + /// [`Serialize`]: crate::Serialize + /// [`serialize_key`]: Self::serialize_key + /// [`serialize_value`]: Self::serialize_value + fn serialize_entry(&mut self, key: &K, value: &V) -> Result<(), Self::Error> + where + K: ?Sized + Serialize, + V: ?Sized + Serialize, + { + tri!(self.serialize_key(key)); + self.serialize_value(value) + } + + /// Finish serializing a map. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_struct`. +/// +/// # Example use +/// +/// ```edition2021 +/// use serde::ser::{Serialize, SerializeStruct, Serializer}; +/// +/// struct Rgb { +/// r: u8, +/// g: u8, +/// b: u8, +/// } +/// +/// impl Serialize for Rgb { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// let mut rgb = serializer.serialize_struct("Rgb", 3)?; +/// rgb.serialize_field("r", &self.r)?; +/// rgb.serialize_field("g", &self.g)?; +/// rgb.serialize_field("b", &self.b)?; +/// rgb.end() +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeStruct` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeStruct` is not satisfied", + ) +)] +pub trait SerializeStruct { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a struct field. + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Indicate that a struct field has been skipped. + /// + /// The default implementation does nothing. + #[inline] + fn skip_field(&mut self, key: &'static str) -> Result<(), Self::Error> { + let _ = key; + Ok(()) + } + + /// Finish serializing a struct. + fn end(self) -> Result; +} + +/// Returned from `Serializer::serialize_struct_variant`. +/// +/// # Example use +/// +/// ```edition2021 +/// use serde::ser::{Serialize, SerializeStructVariant, Serializer}; +/// +/// enum E { +/// S { r: u8, g: u8, b: u8 }, +/// } +/// +/// impl Serialize for E { +/// fn serialize(&self, serializer: S) -> Result +/// where +/// S: Serializer, +/// { +/// match *self { +/// E::S { +/// ref r, +/// ref g, +/// ref b, +/// } => { +/// let mut sv = serializer.serialize_struct_variant("E", 0, "S", 3)?; +/// sv.serialize_field("r", r)?; +/// sv.serialize_field("g", g)?; +/// sv.serialize_field("b", b)?; +/// sv.end() +/// } +/// } +/// } +/// } +/// ``` +/// +/// # Example implementation +/// +/// The [example data format] presented on the website demonstrates an +/// implementation of `SerializeStructVariant` for a basic JSON data format. +/// +/// [example data format]: https://serde.rs/data-format.html +#[cfg_attr( + not(no_diagnostic_namespace), + diagnostic::on_unimplemented( + message = "the trait bound `{Self}: serde::ser::SerializeStructVariant` is not satisfied", + ) +)] +pub trait SerializeStructVariant { + /// Must match the `Ok` type of our `Serializer`. + type Ok; + + /// Must match the `Error` type of our `Serializer`. + type Error: Error; + + /// Serialize a struct variant field. + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize; + + /// Indicate that a struct variant field has been skipped. + /// + /// The default implementation does nothing. + #[inline] + fn skip_field(&mut self, key: &'static str) -> Result<(), Self::Error> { + let _ = key; + Ok(()) + } + + /// Finish serializing a struct variant. + fn end(self) -> Result; +} + +fn iterator_len_hint(iter: &I) -> Option +where + I: Iterator, +{ + match iter.size_hint() { + (lo, Some(hi)) if lo == hi => Some(lo), + _ => None, + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/std_error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/std_error.rs new file mode 100644 index 0000000000000000000000000000000000000000..e026ace100f29f98ff3ddf6e163248430c0c9b3c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/core/std_error.rs @@ -0,0 +1,48 @@ +use crate::lib::{Debug, Display}; + +/// Either a re-export of std::error::Error or a new identical trait, depending +/// on whether Serde's "std" feature is enabled. +/// +/// Serde's error traits [`serde::ser::Error`] and [`serde::de::Error`] require +/// [`std::error::Error`] as a supertrait, but only when Serde is built with +/// "std" enabled. Data formats that don't care about no\_std support should +/// generally provide their error types with a `std::error::Error` impl +/// directly: +/// +/// ```edition2021 +/// #[derive(Debug)] +/// struct MySerError {...} +/// +/// impl serde::ser::Error for MySerError {...} +/// +/// impl std::fmt::Display for MySerError {...} +/// +/// // We don't support no_std! +/// impl std::error::Error for MySerError {} +/// ``` +/// +/// Data formats that *do* support no\_std may either have a "std" feature of +/// their own: +/// +/// ```toml +/// [features] +/// std = ["serde/std"] +/// ``` +/// +/// ```edition2021 +/// #[cfg(feature = "std")] +/// impl std::error::Error for MySerError {} +/// ``` +/// +/// ... or else provide the std Error impl unconditionally via Serde's +/// re-export: +/// +/// ```edition2021 +/// impl serde::ser::StdError for MySerError {} +/// ``` +pub trait Error: Debug + Display { + /// The underlying cause of this error, if any. + fn source(&self) -> Option<&(dyn Error + 'static)> { + None + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/integer128.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/integer128.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ff9d64bd197794d671ee474ecd510b07fa6d2c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/integer128.rs @@ -0,0 +1,14 @@ +#[macro_export] +#[deprecated = " +This macro has no effect on any version of Serde released in the past 2 years. +It was used long ago in crates that needed to support Rustc older than 1.26.0, +or Emscripten targets older than 1.40.0, which did not yet have 128-bit integer +support. These days Serde requires a Rust compiler newer than that so 128-bit +integers are always supported. +"] +#[doc(hidden)] +macro_rules! serde_if_integer128 { + ($($tt:tt)*) => { + $($tt)* + }; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ae495282bda35359b07e35fb55d891c8f7dacb82 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/lib.rs @@ -0,0 +1,285 @@ +//! # Serde +//! +//! Serde is a framework for ***ser***ializing and ***de***serializing Rust data +//! structures efficiently and generically. +//! +//! The Serde ecosystem consists of data structures that know how to serialize +//! and deserialize themselves along with data formats that know how to +//! serialize and deserialize other things. Serde provides the layer by which +//! these two groups interact with each other, allowing any supported data +//! structure to be serialized and deserialized using any supported data format. +//! +//! See the Serde website for additional documentation and +//! usage examples. +//! +//! ## Design +//! +//! Where many other languages rely on runtime reflection for serializing data, +//! Serde is instead built on Rust's powerful trait system. A data structure +//! that knows how to serialize and deserialize itself is one that implements +//! Serde's `Serialize` and `Deserialize` traits (or uses Serde's derive +//! attribute to automatically generate implementations at compile time). This +//! avoids any overhead of reflection or runtime type information. In fact in +//! many situations the interaction between data structure and data format can +//! be completely optimized away by the Rust compiler, leaving Serde +//! serialization to perform the same speed as a handwritten serializer for the +//! specific selection of data structure and data format. +//! +//! ## Data formats +//! +//! The following is a partial list of data formats that have been implemented +//! for Serde by the community. +//! +//! - [JSON], the ubiquitous JavaScript Object Notation used by many HTTP APIs. +//! - [Postcard], a no\_std and embedded-systems friendly compact binary format. +//! - [CBOR], a Concise Binary Object Representation designed for small message +//! size without the need for version negotiation. +//! - [YAML], a self-proclaimed human-friendly configuration language that ain't +//! markup language. +//! - [MessagePack], an efficient binary format that resembles a compact JSON. +//! - [TOML], a minimal configuration format used by [Cargo]. +//! - [Pickle], a format common in the Python world. +//! - [RON], a Rusty Object Notation. +//! - [BSON], the data storage and network transfer format used by MongoDB. +//! - [Avro], a binary format used within Apache Hadoop, with support for schema +//! definition. +//! - [JSON5], a superset of JSON including some productions from ES5. +//! - [URL] query strings, in the x-www-form-urlencoded format. +//! - [Starlark], the format used for describing build targets by the Bazel and +//! Buck build systems. *(serialization only)* +//! - [Envy], a way to deserialize environment variables into Rust structs. +//! *(deserialization only)* +//! - [Envy Store], a way to deserialize [AWS Parameter Store] parameters into +//! Rust structs. *(deserialization only)* +//! - [S-expressions], the textual representation of code and data used by the +//! Lisp language family. +//! - [D-Bus]'s binary wire format. +//! - [FlexBuffers], the schemaless cousin of Google's FlatBuffers zero-copy +//! serialization format. +//! - [Bencode], a simple binary format used in the BitTorrent protocol. +//! - [Token streams], for processing Rust procedural macro input. +//! *(deserialization only)* +//! - [DynamoDB Items], the format used by [rusoto_dynamodb] to transfer data to +//! and from DynamoDB. +//! - [Hjson], a syntax extension to JSON designed around human reading and +//! editing. *(deserialization only)* +//! - [CSV], Comma-separated values is a tabular text file format. +//! +//! [JSON]: https://github.com/serde-rs/json +//! [Postcard]: https://github.com/jamesmunns/postcard +//! [CBOR]: https://github.com/enarx/ciborium +//! [YAML]: https://github.com/dtolnay/serde-yaml +//! [MessagePack]: https://github.com/3Hren/msgpack-rust +//! [TOML]: https://docs.rs/toml +//! [Pickle]: https://github.com/birkenfeld/serde-pickle +//! [RON]: https://github.com/ron-rs/ron +//! [BSON]: https://github.com/mongodb/bson-rust +//! [Avro]: https://docs.rs/apache-avro +//! [JSON5]: https://github.com/callum-oakley/json5-rs +//! [URL]: https://docs.rs/serde_qs +//! [Starlark]: https://github.com/dtolnay/serde-starlark +//! [Envy]: https://github.com/softprops/envy +//! [Envy Store]: https://github.com/softprops/envy-store +//! [Cargo]: https://doc.rust-lang.org/cargo/reference/manifest.html +//! [AWS Parameter Store]: https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html +//! [S-expressions]: https://github.com/rotty/lexpr-rs +//! [D-Bus]: https://docs.rs/zvariant +//! [FlexBuffers]: https://github.com/google/flatbuffers/tree/master/rust/flexbuffers +//! [Bencode]: https://github.com/P3KI/bendy +//! [Token streams]: https://github.com/oxidecomputer/serde_tokenstream +//! [DynamoDB Items]: https://docs.rs/serde_dynamo +//! [rusoto_dynamodb]: https://docs.rs/rusoto_dynamodb +//! [Hjson]: https://github.com/Canop/deser-hjson +//! [CSV]: https://docs.rs/csv + +//////////////////////////////////////////////////////////////////////////////// + +// Serde types in rustdoc of other crates get linked to here. +#![doc(html_root_url = "https://docs.rs/serde/1.0.228")] +// Support using Serde without the standard library! +#![cfg_attr(not(feature = "std"), no_std)] +// Show which crate feature enables conditionally compiled APIs in documentation. +#![cfg_attr(docsrs, feature(doc_cfg, rustdoc_internals))] +#![cfg_attr(docsrs, allow(internal_features))] +// Unstable functionality only if the user asks for it. For tracking and +// discussion of these features please refer to this issue: +// +// https://github.com/serde-rs/serde/issues/812 +#![cfg_attr(feature = "unstable", feature(never_type))] +#![allow( + unknown_lints, + bare_trait_objects, + deprecated, + mismatched_lifetime_syntaxes +)] +// Ignored clippy and clippy_pedantic lints +#![allow( + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/5704 + clippy::unnested_or_patterns, + // clippy bug: https://github.com/rust-lang/rust-clippy/issues/7768 + clippy::semicolon_if_nothing_returned, + // not available in our oldest supported compiler + clippy::empty_enum, + clippy::type_repetition_in_bounds, // https://github.com/rust-lang/rust-clippy/issues/8772 + // integer and float ser/de requires these sorts of casts + clippy::cast_possible_truncation, + clippy::cast_possible_wrap, + clippy::cast_precision_loss, + clippy::cast_sign_loss, + // things are often more readable this way + clippy::cast_lossless, + clippy::module_name_repetitions, + clippy::single_match_else, + clippy::type_complexity, + clippy::use_self, + clippy::zero_prefixed_literal, + // correctly used + clippy::derive_partial_eq_without_eq, + clippy::enum_glob_use, + clippy::explicit_auto_deref, + clippy::incompatible_msrv, + clippy::let_underscore_untyped, + clippy::map_err_ignore, + clippy::new_without_default, + clippy::result_unit_err, + clippy::wildcard_imports, + // not practical + clippy::needless_pass_by_value, + clippy::similar_names, + clippy::too_many_lines, + // preference + clippy::doc_markdown, + clippy::elidable_lifetime_names, + clippy::needless_lifetimes, + clippy::unseparated_literal_suffix, + // false positive + clippy::needless_doctest_main, + // noisy + clippy::missing_errors_doc, + clippy::must_use_candidate, +)] +// Restrictions +#![deny(clippy::question_mark_used)] +// Rustc lints. +#![deny(missing_docs, unused_imports)] + +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(feature = "alloc")] +extern crate alloc; + +// Rustdoc has a lot of shortcomings related to cross-crate re-exports that make +// the rendered documentation of serde_core traits in serde more challenging to +// understand than the equivalent documentation of the same items in serde_core. +// https://github.com/rust-lang/rust/labels/A-cross-crate-reexports +// So, just for the purpose of docs.rs documentation, we inline the contents of +// serde_core into serde. This sidesteps all the cross-crate rustdoc bugs. +#[cfg(docsrs)] +#[macro_use] +#[path = "core/crate_root.rs"] +mod crate_root; + +#[cfg(docsrs)] +#[macro_use] +#[path = "core/macros.rs"] +mod macros; + +#[cfg(not(docsrs))] +macro_rules! crate_root { + () => { + /// A facade around all the types we need from the `std`, `core`, and `alloc` + /// crates. This avoids elaborate import wrangling having to happen in every + /// module. + mod lib { + mod core { + #[cfg(not(feature = "std"))] + pub use core::*; + #[cfg(feature = "std")] + pub use std::*; + } + + pub use self::core::{f32, f64}; + pub use self::core::{ptr, str}; + + #[cfg(any(feature = "std", feature = "alloc"))] + pub use self::core::slice; + + pub use self::core::clone; + pub use self::core::convert; + pub use self::core::default; + pub use self::core::fmt::{self, Debug, Display, Write as FmtWrite}; + pub use self::core::marker::{self, PhantomData}; + pub use self::core::option; + pub use self::core::result; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::borrow::{Cow, ToOwned}; + #[cfg(feature = "std")] + pub use std::borrow::{Cow, ToOwned}; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::string::{String, ToString}; + #[cfg(feature = "std")] + pub use std::string::{String, ToString}; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::vec::Vec; + #[cfg(feature = "std")] + pub use std::vec::Vec; + + #[cfg(all(feature = "alloc", not(feature = "std")))] + pub use alloc::boxed::Box; + #[cfg(feature = "std")] + pub use std::boxed::Box; + } + + // None of this crate's error handling needs the `From::from` error conversion + // performed implicitly by the `?` operator or the standard library's `try!` + // macro. This simplified macro gives a 5.5% improvement in compile time + // compared to standard `try!`, and 9% improvement compared to `?`. + #[cfg(not(no_serde_derive))] + macro_rules! tri { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(err) => return Err(err), + } + }; + } + + //////////////////////////////////////////////////////////////////////////////// + + pub use serde_core::{ + de, forward_to_deserialize_any, ser, Deserialize, Deserializer, Serialize, Serializer, + }; + + // Used by generated code and doc tests. Not public API. + #[doc(hidden)] + mod private; + + include!(concat!(env!("OUT_DIR"), "/private.rs")); + }; +} + +crate_root!(); + +mod integer128; + +// Re-export #[derive(Serialize, Deserialize)]. +// +// The reason re-exporting is not enabled by default is that disabling it would +// be annoying for crates that provide handwritten impls or data formats. They +// would need to disable default features and then explicitly re-enable std. +#[cfg(feature = "serde_derive")] +extern crate serde_derive; + +/// Derive macro available if serde is built with `features = ["derive"]`. +#[cfg(feature = "serde_derive")] +#[cfg_attr(docsrs, doc(cfg(feature = "derive")))] +pub use serde_derive::{Deserialize, Serialize}; + +#[macro_export] +#[doc(hidden)] +macro_rules! __require_serde_not_serde_core { + () => {}; +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/de.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/de.rs new file mode 100644 index 0000000000000000000000000000000000000000..6f657f50eb3292be32a52c16feb81bd62e56e041 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/de.rs @@ -0,0 +1,3501 @@ +use crate::lib::*; + +use crate::de::value::{BorrowedBytesDeserializer, BytesDeserializer}; +use crate::de::{ + Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, IntoDeserializer, VariantAccess, + Visitor, +}; + +#[cfg(any(feature = "std", feature = "alloc"))] +use crate::de::{MapAccess, Unexpected}; + +#[cfg(any(feature = "std", feature = "alloc"))] +pub use self::content::{ + content_as_str, Content, ContentDeserializer, ContentRefDeserializer, ContentVisitor, + EnumDeserializer, InternallyTaggedUnitVisitor, TagContentOtherField, + TagContentOtherFieldVisitor, TagOrContentField, TagOrContentFieldVisitor, TaggedContentVisitor, + UntaggedUnitVisitor, +}; + +pub use crate::serde_core_private::InPlaceSeed; + +/// If the missing field is of type `Option` then treat is as `None`, +/// otherwise it is an error. +pub fn missing_field<'de, V, E>(field: &'static str) -> Result +where + V: Deserialize<'de>, + E: Error, +{ + struct MissingFieldDeserializer(&'static str, PhantomData); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> Deserializer<'de> for MissingFieldDeserializer + where + E: Error, + { + type Error = E; + + fn deserialize_any(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(Error::missing_field(self.0)) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_none() + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } + } + + let deserializer = MissingFieldDeserializer(field, PhantomData); + Deserialize::deserialize(deserializer) +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub fn borrow_cow_str<'de: 'a, 'a, D, R>(deserializer: D) -> Result +where + D: Deserializer<'de>, + R: From>, +{ + struct CowStrVisitor; + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a> Visitor<'a> for CowStrVisitor { + type Value = Cow<'a, str>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + Ok(Cow::Owned(v.to_owned())) + } + + fn visit_borrowed_str(self, v: &'a str) -> Result + where + E: Error, + { + Ok(Cow::Borrowed(v)) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + Ok(Cow::Owned(v)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + match str::from_utf8(v) { + Ok(s) => Ok(Cow::Owned(s.to_owned())), + Err(_) => Err(Error::invalid_value(Unexpected::Bytes(v), &self)), + } + } + + fn visit_borrowed_bytes(self, v: &'a [u8]) -> Result + where + E: Error, + { + match str::from_utf8(v) { + Ok(s) => Ok(Cow::Borrowed(s)), + Err(_) => Err(Error::invalid_value(Unexpected::Bytes(v), &self)), + } + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + match String::from_utf8(v) { + Ok(s) => Ok(Cow::Owned(s)), + Err(e) => Err(Error::invalid_value( + Unexpected::Bytes(&e.into_bytes()), + &self, + )), + } + } + } + + deserializer.deserialize_str(CowStrVisitor).map(From::from) +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub fn borrow_cow_bytes<'de: 'a, 'a, D, R>(deserializer: D) -> Result +where + D: Deserializer<'de>, + R: From>, +{ + struct CowBytesVisitor; + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a> Visitor<'a> for CowBytesVisitor { + type Value = Cow<'a, [u8]>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a byte array") + } + + fn visit_str(self, v: &str) -> Result + where + E: Error, + { + Ok(Cow::Owned(v.as_bytes().to_vec())) + } + + fn visit_borrowed_str(self, v: &'a str) -> Result + where + E: Error, + { + Ok(Cow::Borrowed(v.as_bytes())) + } + + fn visit_string(self, v: String) -> Result + where + E: Error, + { + Ok(Cow::Owned(v.into_bytes())) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: Error, + { + Ok(Cow::Owned(v.to_vec())) + } + + fn visit_borrowed_bytes(self, v: &'a [u8]) -> Result + where + E: Error, + { + Ok(Cow::Borrowed(v)) + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: Error, + { + Ok(Cow::Owned(v)) + } + } + + deserializer + .deserialize_bytes(CowBytesVisitor) + .map(From::from) +} + +#[cfg(any(feature = "std", feature = "alloc"))] +mod content { + // This module is private and nothing here should be used outside of + // generated code. + // + // We will iterate on the implementation for a few releases and only have to + // worry about backward compatibility for the `untagged` and `tag` attributes + // rather than for this entire mechanism. + // + // This issue is tracking making some of this stuff public: + // https://github.com/serde-rs/serde/issues/741 + + use crate::lib::*; + + use crate::de::{ + self, Deserialize, DeserializeSeed, Deserializer, EnumAccess, Expected, IgnoredAny, + MapAccess, SeqAccess, Unexpected, Visitor, + }; + use crate::serde_core_private::size_hint; + pub use crate::serde_core_private::Content; + + pub fn content_as_str<'a, 'de>(content: &'a Content<'de>) -> Option<&'a str> { + match *content { + Content::Str(x) => Some(x), + Content::String(ref x) => Some(x), + Content::Bytes(x) => str::from_utf8(x).ok(), + Content::ByteBuf(ref x) => str::from_utf8(x).ok(), + _ => None, + } + } + + fn content_clone<'de>(content: &Content<'de>) -> Content<'de> { + match content { + Content::Bool(b) => Content::Bool(*b), + Content::U8(n) => Content::U8(*n), + Content::U16(n) => Content::U16(*n), + Content::U32(n) => Content::U32(*n), + Content::U64(n) => Content::U64(*n), + Content::I8(n) => Content::I8(*n), + Content::I16(n) => Content::I16(*n), + Content::I32(n) => Content::I32(*n), + Content::I64(n) => Content::I64(*n), + Content::F32(f) => Content::F32(*f), + Content::F64(f) => Content::F64(*f), + Content::Char(c) => Content::Char(*c), + Content::String(s) => Content::String(s.clone()), + Content::Str(s) => Content::Str(*s), + Content::ByteBuf(b) => Content::ByteBuf(b.clone()), + Content::Bytes(b) => Content::Bytes(b), + Content::None => Content::None, + Content::Some(content) => Content::Some(Box::new(content_clone(content))), + Content::Unit => Content::Unit, + Content::Newtype(content) => Content::Newtype(Box::new(content_clone(content))), + Content::Seq(seq) => Content::Seq(seq.iter().map(content_clone).collect()), + Content::Map(map) => Content::Map( + map.iter() + .map(|(k, v)| (content_clone(k), content_clone(v))) + .collect(), + ), + } + } + + #[cold] + fn content_unexpected<'a, 'de>(content: &'a Content<'de>) -> Unexpected<'a> { + match *content { + Content::Bool(b) => Unexpected::Bool(b), + Content::U8(n) => Unexpected::Unsigned(n as u64), + Content::U16(n) => Unexpected::Unsigned(n as u64), + Content::U32(n) => Unexpected::Unsigned(n as u64), + Content::U64(n) => Unexpected::Unsigned(n), + Content::I8(n) => Unexpected::Signed(n as i64), + Content::I16(n) => Unexpected::Signed(n as i64), + Content::I32(n) => Unexpected::Signed(n as i64), + Content::I64(n) => Unexpected::Signed(n), + Content::F32(f) => Unexpected::Float(f as f64), + Content::F64(f) => Unexpected::Float(f), + Content::Char(c) => Unexpected::Char(c), + Content::String(ref s) => Unexpected::Str(s), + Content::Str(s) => Unexpected::Str(s), + Content::ByteBuf(ref b) => Unexpected::Bytes(b), + Content::Bytes(b) => Unexpected::Bytes(b), + Content::None | Content::Some(_) => Unexpected::Option, + Content::Unit => Unexpected::Unit, + Content::Newtype(_) => Unexpected::NewtypeStruct, + Content::Seq(_) => Unexpected::Seq, + Content::Map(_) => Unexpected::Map, + } + } + + pub struct ContentVisitor<'de> { + value: PhantomData>, + } + + impl<'de> ContentVisitor<'de> { + pub fn new() -> Self { + ContentVisitor { value: PhantomData } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> DeserializeSeed<'de> for ContentVisitor<'de> { + type Value = Content<'de>; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.__deserialize_content_v1(self) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> Visitor<'de> for ContentVisitor<'de> { + type Value = Content<'de>; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str("any value") + } + + fn visit_bool(self, value: bool) -> Result + where + F: de::Error, + { + Ok(Content::Bool(value)) + } + + fn visit_i8(self, value: i8) -> Result + where + F: de::Error, + { + Ok(Content::I8(value)) + } + + fn visit_i16(self, value: i16) -> Result + where + F: de::Error, + { + Ok(Content::I16(value)) + } + + fn visit_i32(self, value: i32) -> Result + where + F: de::Error, + { + Ok(Content::I32(value)) + } + + fn visit_i64(self, value: i64) -> Result + where + F: de::Error, + { + Ok(Content::I64(value)) + } + + fn visit_u8(self, value: u8) -> Result + where + F: de::Error, + { + Ok(Content::U8(value)) + } + + fn visit_u16(self, value: u16) -> Result + where + F: de::Error, + { + Ok(Content::U16(value)) + } + + fn visit_u32(self, value: u32) -> Result + where + F: de::Error, + { + Ok(Content::U32(value)) + } + + fn visit_u64(self, value: u64) -> Result + where + F: de::Error, + { + Ok(Content::U64(value)) + } + + fn visit_f32(self, value: f32) -> Result + where + F: de::Error, + { + Ok(Content::F32(value)) + } + + fn visit_f64(self, value: f64) -> Result + where + F: de::Error, + { + Ok(Content::F64(value)) + } + + fn visit_char(self, value: char) -> Result + where + F: de::Error, + { + Ok(Content::Char(value)) + } + + fn visit_str(self, value: &str) -> Result + where + F: de::Error, + { + Ok(Content::String(value.into())) + } + + fn visit_borrowed_str(self, value: &'de str) -> Result + where + F: de::Error, + { + Ok(Content::Str(value)) + } + + fn visit_string(self, value: String) -> Result + where + F: de::Error, + { + Ok(Content::String(value)) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + F: de::Error, + { + Ok(Content::ByteBuf(value.into())) + } + + fn visit_borrowed_bytes(self, value: &'de [u8]) -> Result + where + F: de::Error, + { + Ok(Content::Bytes(value)) + } + + fn visit_byte_buf(self, value: Vec) -> Result + where + F: de::Error, + { + Ok(Content::ByteBuf(value)) + } + + fn visit_unit(self) -> Result + where + F: de::Error, + { + Ok(Content::Unit) + } + + fn visit_none(self) -> Result + where + F: de::Error, + { + Ok(Content::None) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let v = tri!(ContentVisitor::new().deserialize(deserializer)); + Ok(Content::Some(Box::new(v))) + } + + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let v = tri!(ContentVisitor::new().deserialize(deserializer)); + Ok(Content::Newtype(Box::new(v))) + } + + fn visit_seq(self, mut visitor: V) -> Result + where + V: SeqAccess<'de>, + { + let mut vec = + Vec::::with_capacity(size_hint::cautious::(visitor.size_hint())); + while let Some(e) = tri!(visitor.next_element_seed(ContentVisitor::new())) { + vec.push(e); + } + Ok(Content::Seq(vec)) + } + + fn visit_map(self, mut visitor: V) -> Result + where + V: MapAccess<'de>, + { + let mut vec = + Vec::<(Content, Content)>::with_capacity( + size_hint::cautious::<(Content, Content)>(visitor.size_hint()), + ); + while let Some(kv) = + tri!(visitor.next_entry_seed(ContentVisitor::new(), ContentVisitor::new())) + { + vec.push(kv); + } + Ok(Content::Map(vec)) + } + + fn visit_enum(self, _visitor: V) -> Result + where + V: EnumAccess<'de>, + { + Err(de::Error::custom( + "untagged and internally tagged enums do not support enum input", + )) + } + } + + /// This is the type of the map keys in an internally tagged enum. + /// + /// Not public API. + pub enum TagOrContent<'de> { + Tag, + Content(Content<'de>), + } + + /// Serves as a seed for deserializing a key of internally tagged enum. + /// Cannot capture externally tagged enums, `i128` and `u128`. + struct TagOrContentVisitor<'de> { + name: &'static str, + value: PhantomData>, + } + + impl<'de> TagOrContentVisitor<'de> { + fn new(name: &'static str) -> Self { + TagOrContentVisitor { + name, + value: PhantomData, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> DeserializeSeed<'de> for TagOrContentVisitor<'de> { + type Value = TagOrContent<'de>; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Internally tagged enums are only supported in self-describing + // formats. + deserializer.deserialize_any(self) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> Visitor<'de> for TagOrContentVisitor<'de> { + type Value = TagOrContent<'de>; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "a type tag `{}` or any other value", self.name) + } + + fn visit_bool(self, value: bool) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_bool(value) + .map(TagOrContent::Content) + } + + fn visit_i8(self, value: i8) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_i8(value) + .map(TagOrContent::Content) + } + + fn visit_i16(self, value: i16) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_i16(value) + .map(TagOrContent::Content) + } + + fn visit_i32(self, value: i32) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_i32(value) + .map(TagOrContent::Content) + } + + fn visit_i64(self, value: i64) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_i64(value) + .map(TagOrContent::Content) + } + + fn visit_u8(self, value: u8) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_u8(value) + .map(TagOrContent::Content) + } + + fn visit_u16(self, value: u16) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_u16(value) + .map(TagOrContent::Content) + } + + fn visit_u32(self, value: u32) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_u32(value) + .map(TagOrContent::Content) + } + + fn visit_u64(self, value: u64) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_u64(value) + .map(TagOrContent::Content) + } + + fn visit_f32(self, value: f32) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_f32(value) + .map(TagOrContent::Content) + } + + fn visit_f64(self, value: f64) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_f64(value) + .map(TagOrContent::Content) + } + + fn visit_char(self, value: char) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_char(value) + .map(TagOrContent::Content) + } + + fn visit_str(self, value: &str) -> Result + where + F: de::Error, + { + if value == self.name { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_str(value) + .map(TagOrContent::Content) + } + } + + fn visit_borrowed_str(self, value: &'de str) -> Result + where + F: de::Error, + { + if value == self.name { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_borrowed_str(value) + .map(TagOrContent::Content) + } + } + + fn visit_string(self, value: String) -> Result + where + F: de::Error, + { + if value == self.name { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_string(value) + .map(TagOrContent::Content) + } + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + F: de::Error, + { + if value == self.name.as_bytes() { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_bytes(value) + .map(TagOrContent::Content) + } + } + + fn visit_borrowed_bytes(self, value: &'de [u8]) -> Result + where + F: de::Error, + { + if value == self.name.as_bytes() { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_borrowed_bytes(value) + .map(TagOrContent::Content) + } + } + + fn visit_byte_buf(self, value: Vec) -> Result + where + F: de::Error, + { + if value == self.name.as_bytes() { + Ok(TagOrContent::Tag) + } else { + ContentVisitor::new() + .visit_byte_buf(value) + .map(TagOrContent::Content) + } + } + + fn visit_unit(self) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_unit() + .map(TagOrContent::Content) + } + + fn visit_none(self) -> Result + where + F: de::Error, + { + ContentVisitor::new() + .visit_none() + .map(TagOrContent::Content) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + ContentVisitor::new() + .visit_some(deserializer) + .map(TagOrContent::Content) + } + + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + ContentVisitor::new() + .visit_newtype_struct(deserializer) + .map(TagOrContent::Content) + } + + fn visit_seq(self, visitor: V) -> Result + where + V: SeqAccess<'de>, + { + ContentVisitor::new() + .visit_seq(visitor) + .map(TagOrContent::Content) + } + + fn visit_map(self, visitor: V) -> Result + where + V: MapAccess<'de>, + { + ContentVisitor::new() + .visit_map(visitor) + .map(TagOrContent::Content) + } + + fn visit_enum(self, visitor: V) -> Result + where + V: EnumAccess<'de>, + { + ContentVisitor::new() + .visit_enum(visitor) + .map(TagOrContent::Content) + } + } + + /// Used by generated code to deserialize an internally tagged enum. + /// + /// Captures map or sequence from the original deserializer and searches + /// a tag in it (in case of sequence, tag is the first element of sequence). + /// + /// Not public API. + pub struct TaggedContentVisitor { + tag_name: &'static str, + expecting: &'static str, + value: PhantomData, + } + + impl TaggedContentVisitor { + /// Visitor for the content of an internally tagged enum with the given + /// tag name. + pub fn new(name: &'static str, expecting: &'static str) -> Self { + TaggedContentVisitor { + tag_name: name, + expecting, + value: PhantomData, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, T> Visitor<'de> for TaggedContentVisitor + where + T: Deserialize<'de>, + { + type Value = (T, Content<'de>); + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(self.expecting) + } + + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + let tag = match tri!(seq.next_element()) { + Some(tag) => tag, + None => { + return Err(de::Error::missing_field(self.tag_name)); + } + }; + let rest = de::value::SeqAccessDeserializer::new(seq); + Ok((tag, tri!(ContentVisitor::new().deserialize(rest)))) + } + + fn visit_map(self, mut map: M) -> Result + where + M: MapAccess<'de>, + { + let mut tag = None; + let mut vec = Vec::<(Content, Content)>::with_capacity(size_hint::cautious::<( + Content, + Content, + )>(map.size_hint())); + while let Some(k) = tri!(map.next_key_seed(TagOrContentVisitor::new(self.tag_name))) { + match k { + TagOrContent::Tag => { + if tag.is_some() { + return Err(de::Error::duplicate_field(self.tag_name)); + } + tag = Some(tri!(map.next_value())); + } + TagOrContent::Content(k) => { + let v = tri!(map.next_value_seed(ContentVisitor::new())); + vec.push((k, v)); + } + } + } + match tag { + None => Err(de::Error::missing_field(self.tag_name)), + Some(tag) => Ok((tag, Content::Map(vec))), + } + } + } + + /// Used by generated code to deserialize an adjacently tagged enum. + /// + /// Not public API. + pub enum TagOrContentField { + Tag, + Content, + } + + /// Not public API. + pub struct TagOrContentFieldVisitor { + /// Name of the tag field of the adjacently tagged enum + pub tag: &'static str, + /// Name of the content field of the adjacently tagged enum + pub content: &'static str, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> DeserializeSeed<'de> for TagOrContentFieldVisitor { + type Value = TagOrContentField; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(self) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> Visitor<'de> for TagOrContentFieldVisitor { + type Value = TagOrContentField; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{:?} or {:?}", self.tag, self.content) + } + + fn visit_u64(self, field_index: u64) -> Result + where + E: de::Error, + { + match field_index { + 0 => Ok(TagOrContentField::Tag), + 1 => Ok(TagOrContentField::Content), + _ => Err(de::Error::invalid_value( + Unexpected::Unsigned(field_index), + &self, + )), + } + } + + fn visit_str(self, field: &str) -> Result + where + E: de::Error, + { + if field == self.tag { + Ok(TagOrContentField::Tag) + } else if field == self.content { + Ok(TagOrContentField::Content) + } else { + Err(de::Error::invalid_value(Unexpected::Str(field), &self)) + } + } + + fn visit_bytes(self, field: &[u8]) -> Result + where + E: de::Error, + { + if field == self.tag.as_bytes() { + Ok(TagOrContentField::Tag) + } else if field == self.content.as_bytes() { + Ok(TagOrContentField::Content) + } else { + Err(de::Error::invalid_value(Unexpected::Bytes(field), &self)) + } + } + } + + /// Used by generated code to deserialize an adjacently tagged enum when + /// ignoring unrelated fields is allowed. + /// + /// Not public API. + pub enum TagContentOtherField { + Tag, + Content, + Other, + } + + /// Not public API. + pub struct TagContentOtherFieldVisitor { + /// Name of the tag field of the adjacently tagged enum + pub tag: &'static str, + /// Name of the content field of the adjacently tagged enum + pub content: &'static str, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> DeserializeSeed<'de> for TagContentOtherFieldVisitor { + type Value = TagContentOtherField; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_identifier(self) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de> Visitor<'de> for TagContentOtherFieldVisitor { + type Value = TagContentOtherField; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!( + formatter, + "{:?}, {:?}, or other ignored fields", + self.tag, self.content + ) + } + + fn visit_u64(self, field_index: u64) -> Result + where + E: de::Error, + { + match field_index { + 0 => Ok(TagContentOtherField::Tag), + 1 => Ok(TagContentOtherField::Content), + _ => Ok(TagContentOtherField::Other), + } + } + + fn visit_str(self, field: &str) -> Result + where + E: de::Error, + { + self.visit_bytes(field.as_bytes()) + } + + fn visit_bytes(self, field: &[u8]) -> Result + where + E: de::Error, + { + if field == self.tag.as_bytes() { + Ok(TagContentOtherField::Tag) + } else if field == self.content.as_bytes() { + Ok(TagContentOtherField::Content) + } else { + Ok(TagContentOtherField::Other) + } + } + } + + /// Not public API + pub struct ContentDeserializer<'de, E> { + content: Content<'de>, + err: PhantomData, + } + + impl<'de, E> ContentDeserializer<'de, E> + where + E: de::Error, + { + #[cold] + fn invalid_type(self, exp: &dyn Expected) -> E { + de::Error::invalid_type(content_unexpected(&self.content), exp) + } + + fn deserialize_integer(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_float(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::F32(v) => visitor.visit_f32(v), + Content::F64(v) => visitor.visit_f64(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + } + + fn visit_content_seq<'de, V, E>(content: Vec>, visitor: V) -> Result + where + V: Visitor<'de>, + E: de::Error, + { + let mut seq_visitor = SeqDeserializer::new(content); + let value = tri!(visitor.visit_seq(&mut seq_visitor)); + tri!(seq_visitor.end()); + Ok(value) + } + + fn visit_content_map<'de, V, E>( + content: Vec<(Content<'de>, Content<'de>)>, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + E: de::Error, + { + let mut map_visitor = MapDeserializer::new(content); + let value = tri!(visitor.visit_map(&mut map_visitor)); + tri!(map_visitor.end()); + Ok(value) + } + + /// Used when deserializing an internally tagged enum because the content + /// will be used exactly once. + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> Deserializer<'de> for ContentDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Bool(v) => visitor.visit_bool(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + Content::F32(v) => visitor.visit_f32(v), + Content::F64(v) => visitor.visit_f64(v), + Content::Char(v) => visitor.visit_char(v), + Content::String(v) => visitor.visit_string(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(v) => visitor.visit_byte_buf(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::Unit => visitor.visit_unit(), + Content::None => visitor.visit_none(), + Content::Some(v) => visitor.visit_some(ContentDeserializer::new(*v)), + Content::Newtype(v) => visitor.visit_newtype_struct(ContentDeserializer::new(*v)), + Content::Seq(v) => visit_content_seq(v, visitor), + Content::Map(v) => visit_content_map(v, visitor), + } + } + + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Bool(v) => visitor.visit_bool(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_i8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_f32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_float(visitor) + } + + fn deserialize_f64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_float(visitor) + } + + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Char(v) => visitor.visit_char(v), + Content::String(v) => visitor.visit_string(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_string(visitor) + } + + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::String(v) => visitor.visit_string(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(v) => visitor.visit_byte_buf(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_byte_buf(visitor) + } + + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::String(v) => visitor.visit_string(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(v) => visitor.visit_byte_buf(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::Seq(v) => visit_content_seq(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::None => visitor.visit_none(), + Content::Some(v) => visitor.visit_some(ContentDeserializer::new(*v)), + Content::Unit => visitor.visit_unit(), + _ => visitor.visit_some(self), + } + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Unit => visitor.visit_unit(), + + // Allow deserializing newtype variant containing unit. + // + // #[derive(Deserialize)] + // #[serde(tag = "result")] + // enum Response { + // Success(T), + // } + // + // We want {"result":"Success"} to deserialize into Response<()>. + Content::Map(ref v) if v.is_empty() => visitor.visit_unit(), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + match self.content { + // As a special case, allow deserializing untagged newtype + // variant containing unit struct. + // + // #[derive(Deserialize)] + // struct Info; + // + // #[derive(Deserialize)] + // #[serde(tag = "topic")] + // enum Message { + // Info(Info), + // } + // + // We want {"topic":"Info"} to deserialize even though + // ordinarily unit structs do not deserialize from empty map/seq. + Content::Map(ref v) if v.is_empty() => visitor.visit_unit(), + Content::Seq(ref v) if v.is_empty() => visitor.visit_unit(), + _ => self.deserialize_any(visitor), + } + } + + fn deserialize_newtype_struct( + self, + _name: &str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Newtype(v) => visitor.visit_newtype_struct(ContentDeserializer::new(*v)), + _ => visitor.visit_newtype_struct(self), + } + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Seq(v) => visit_content_seq(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_tuple(self, _len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Map(v) => visit_content_map(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::Seq(v) => visit_content_seq(v, visitor), + Content::Map(v) => visit_content_map(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_enum( + self, + _name: &str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let (variant, value) = match self.content { + Content::Map(value) => { + let mut iter = value.into_iter(); + let (variant, value) = match iter.next() { + Some(v) => v, + None => { + return Err(de::Error::invalid_value( + de::Unexpected::Map, + &"map with a single key", + )); + } + }; + // enums are encoded in json as maps with a single key:value pair + if iter.next().is_some() { + return Err(de::Error::invalid_value( + de::Unexpected::Map, + &"map with a single key", + )); + } + (variant, Some(value)) + } + s @ Content::String(_) | s @ Content::Str(_) => (s, None), + other => { + return Err(de::Error::invalid_type( + content_unexpected(&other), + &"string or map", + )); + } + }; + + visitor.visit_enum(EnumDeserializer::new(variant, value)) + } + + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match self.content { + Content::String(v) => visitor.visit_string(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(v) => visitor.visit_byte_buf(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U64(v) => visitor.visit_u64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + drop(self); + visitor.visit_unit() + } + + fn __deserialize_content_v1(self, visitor: V) -> Result + where + V: Visitor<'de, Value = Content<'de>>, + { + let _ = visitor; + Ok(self.content) + } + } + + impl<'de, E> ContentDeserializer<'de, E> { + /// private API, don't use + pub fn new(content: Content<'de>) -> Self { + ContentDeserializer { + content, + err: PhantomData, + } + } + } + + struct SeqDeserializer<'de, E> { + iter: > as IntoIterator>::IntoIter, + count: usize, + marker: PhantomData, + } + + impl<'de, E> SeqDeserializer<'de, E> { + fn new(content: Vec>) -> Self { + SeqDeserializer { + iter: content.into_iter(), + count: 0, + marker: PhantomData, + } + } + } + + impl<'de, E> SeqDeserializer<'de, E> + where + E: de::Error, + { + fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInSeq(self.count), + )) + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> Deserializer<'de> for SeqDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let v = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(v) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> SeqAccess<'de> for SeqDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: V) -> Result, Self::Error> + where + V: DeserializeSeed<'de>, + { + match self.iter.next() { + Some(value) => { + self.count += 1; + seed.deserialize(ContentDeserializer::new(value)).map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + struct ExpectedInSeq(usize); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl Expected for ExpectedInSeq { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.0 == 1 { + formatter.write_str("1 element in sequence") + } else { + write!(formatter, "{} elements in sequence", self.0) + } + } + } + + struct MapDeserializer<'de, E> { + iter: , Content<'de>)> as IntoIterator>::IntoIter, + value: Option>, + count: usize, + error: PhantomData, + } + + impl<'de, E> MapDeserializer<'de, E> { + fn new(content: Vec<(Content<'de>, Content<'de>)>) -> Self { + MapDeserializer { + iter: content.into_iter(), + value: None, + count: 0, + error: PhantomData, + } + } + } + + impl<'de, E> MapDeserializer<'de, E> + where + E: de::Error, + { + fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInMap(self.count), + )) + } + } + } + + impl<'de, E> MapDeserializer<'de, E> { + fn next_pair(&mut self) -> Option<(Content<'de>, Content<'de>)> { + match self.iter.next() { + Some((k, v)) => { + self.count += 1; + Some((k, v)) + } + None => None, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> Deserializer<'de> for MapDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let value = tri!(visitor.visit_map(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_seq(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let value = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + let _ = len; + self.deserialize_seq(visitor) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> MapAccess<'de> for MapDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_key_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + self.value = Some(value); + seed.deserialize(ContentDeserializer::new(key)).map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + let value = self.value.take(); + // Panic because this indicates a bug in the program rather than an + // expected failure. + let value = value.expect("MapAccess::next_value called before next_key"); + seed.deserialize(ContentDeserializer::new(value)) + } + + fn next_entry_seed( + &mut self, + kseed: TK, + vseed: TV, + ) -> Result, Self::Error> + where + TK: DeserializeSeed<'de>, + TV: DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + let key = tri!(kseed.deserialize(ContentDeserializer::new(key))); + let value = tri!(vseed.deserialize(ContentDeserializer::new(value))); + Ok(Some((key, value))) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> SeqAccess<'de> for MapDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.next_pair() { + Some((k, v)) => { + let de = PairDeserializer(k, v, PhantomData); + seed.deserialize(de).map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + struct PairDeserializer<'de, E>(Content<'de>, Content<'de>, PhantomData); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> Deserializer<'de> for PairDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut pair_visitor = PairVisitor(Some(self.0), Some(self.1), PhantomData); + let pair = tri!(visitor.visit_seq(&mut pair_visitor)); + if pair_visitor.1.is_none() { + Ok(pair) + } else { + let remaining = pair_visitor.size_hint().unwrap(); + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(2 - remaining))) + } + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + if len == 2 { + self.deserialize_seq(visitor) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(len))) + } + } + } + + struct PairVisitor<'de, E>(Option>, Option>, PhantomData); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> SeqAccess<'de> for PairVisitor<'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + if let Some(k) = self.0.take() { + seed.deserialize(ContentDeserializer::new(k)).map(Some) + } else if let Some(v) = self.1.take() { + seed.deserialize(ContentDeserializer::new(v)).map(Some) + } else { + Ok(None) + } + } + + fn size_hint(&self) -> Option { + if self.0.is_some() { + Some(2) + } else if self.1.is_some() { + Some(1) + } else { + Some(0) + } + } + } + + struct ExpectedInMap(usize); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl Expected for ExpectedInMap { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + if self.0 == 1 { + formatter.write_str("1 element in map") + } else { + write!(formatter, "{} elements in map", self.0) + } + } + } + + pub struct EnumDeserializer<'de, E> + where + E: de::Error, + { + variant: Content<'de>, + value: Option>, + err: PhantomData, + } + + impl<'de, E> EnumDeserializer<'de, E> + where + E: de::Error, + { + pub fn new(variant: Content<'de>, value: Option>) -> EnumDeserializer<'de, E> { + EnumDeserializer { + variant, + value, + err: PhantomData, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> de::EnumAccess<'de> for EnumDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + type Variant = VariantDeserializer<'de, Self::Error>; + + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), E> + where + V: de::DeserializeSeed<'de>, + { + let visitor = VariantDeserializer { + value: self.value, + err: PhantomData, + }; + seed.deserialize(ContentDeserializer::new(self.variant)) + .map(|v| (v, visitor)) + } + } + + pub struct VariantDeserializer<'de, E> + where + E: de::Error, + { + value: Option>, + err: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> de::VariantAccess<'de> for VariantDeserializer<'de, E> + where + E: de::Error, + { + type Error = E; + + fn unit_variant(self) -> Result<(), E> { + match self.value { + Some(value) => de::Deserialize::deserialize(ContentDeserializer::new(value)), + None => Ok(()), + } + } + + fn newtype_variant_seed(self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + match self.value { + Some(value) => seed.deserialize(ContentDeserializer::new(value)), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"newtype variant", + )), + } + } + + fn tuple_variant(self, _len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self.value { + Some(Content::Seq(v)) => { + de::Deserializer::deserialize_any(SeqDeserializer::new(v), visitor) + } + Some(other) => Err(de::Error::invalid_type( + content_unexpected(&other), + &"tuple variant", + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"tuple variant", + )), + } + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + match self.value { + Some(Content::Map(v)) => { + de::Deserializer::deserialize_any(MapDeserializer::new(v), visitor) + } + Some(Content::Seq(v)) => { + de::Deserializer::deserialize_any(SeqDeserializer::new(v), visitor) + } + Some(other) => Err(de::Error::invalid_type( + content_unexpected(&other), + &"struct variant", + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"struct variant", + )), + } + } + } + + /// Not public API. + pub struct ContentRefDeserializer<'a, 'de: 'a, E> { + content: &'a Content<'de>, + err: PhantomData, + } + + impl<'a, 'de, E> ContentRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + #[cold] + fn invalid_type(self, exp: &dyn Expected) -> E { + de::Error::invalid_type(content_unexpected(self.content), exp) + } + + fn deserialize_integer(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_float(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::F32(v) => visitor.visit_f32(v), + Content::F64(v) => visitor.visit_f64(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + } + + fn visit_content_seq_ref<'a, 'de, V, E>( + content: &'a [Content<'de>], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + E: de::Error, + { + let mut seq_visitor = SeqRefDeserializer::new(content); + let value = tri!(visitor.visit_seq(&mut seq_visitor)); + tri!(seq_visitor.end()); + Ok(value) + } + + fn visit_content_map_ref<'a, 'de, V, E>( + content: &'a [(Content<'de>, Content<'de>)], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + E: de::Error, + { + let mut map_visitor = MapRefDeserializer::new(content); + let value = tri!(visitor.visit_map(&mut map_visitor)); + tri!(map_visitor.end()); + Ok(value) + } + + /// Used when deserializing an untagged enum because the content may need + /// to be used more than once. + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a, E> Deserializer<'de> for ContentRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Bool(v) => visitor.visit_bool(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U16(v) => visitor.visit_u16(v), + Content::U32(v) => visitor.visit_u32(v), + Content::U64(v) => visitor.visit_u64(v), + Content::I8(v) => visitor.visit_i8(v), + Content::I16(v) => visitor.visit_i16(v), + Content::I32(v) => visitor.visit_i32(v), + Content::I64(v) => visitor.visit_i64(v), + Content::F32(v) => visitor.visit_f32(v), + Content::F64(v) => visitor.visit_f64(v), + Content::Char(v) => visitor.visit_char(v), + Content::String(ref v) => visitor.visit_str(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(ref v) => visitor.visit_bytes(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::Unit => visitor.visit_unit(), + Content::None => visitor.visit_none(), + Content::Some(ref v) => visitor.visit_some(ContentRefDeserializer::new(v)), + Content::Newtype(ref v) => { + visitor.visit_newtype_struct(ContentRefDeserializer::new(v)) + } + Content::Seq(ref v) => visit_content_seq_ref(v, visitor), + Content::Map(ref v) => visit_content_map_ref(v, visitor), + } + } + + fn deserialize_bool(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Bool(v) => visitor.visit_bool(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_i8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_i64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u8(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u16(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_u64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_integer(visitor) + } + + fn deserialize_f32(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_float(visitor) + } + + fn deserialize_f64(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_float(visitor) + } + + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Char(v) => visitor.visit_char(v), + Content::String(ref v) => visitor.visit_str(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::String(ref v) => visitor.visit_str(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(ref v) => visitor.visit_bytes(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(visitor) + } + + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::String(ref v) => visitor.visit_str(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(ref v) => visitor.visit_bytes(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::Seq(ref v) => visit_content_seq_ref(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_bytes(visitor) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + // Covered by tests/test_enum_untagged.rs + // with_optional_field::* + match *self.content { + Content::None => visitor.visit_none(), + Content::Some(ref v) => visitor.visit_some(ContentRefDeserializer::new(v)), + Content::Unit => visitor.visit_unit(), + // This case is to support data formats which do not encode an + // indication whether a value is optional. An example of such a + // format is JSON, and a counterexample is RON. When requesting + // `deserialize_any` in JSON, the data format never performs + // `Visitor::visit_some` but we still must be able to + // deserialize the resulting Content into data structures with + // optional fields. + _ => visitor.visit_some(self), + } + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Unit => visitor.visit_unit(), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_unit(visitor) + } + + fn deserialize_newtype_struct(self, _name: &str, visitor: V) -> Result + where + V: Visitor<'de>, + { + // Covered by tests/test_enum_untagged.rs + // newtype_struct + match *self.content { + Content::Newtype(ref v) => { + visitor.visit_newtype_struct(ContentRefDeserializer::new(v)) + } + // This case is to support data formats that encode newtype + // structs and their underlying data the same, with no + // indication whether a newtype wrapper was present. For example + // JSON does this, while RON does not. In RON a newtype's name + // is included in the serialized representation and it knows to + // call `Visitor::visit_newtype_struct` from `deserialize_any`. + // JSON's `deserialize_any` never calls `visit_newtype_struct` + // but in this code we still must be able to deserialize the + // resulting Content into newtypes. + _ => visitor.visit_newtype_struct(self), + } + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Seq(ref v) => visit_content_seq_ref(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_tuple(self, _len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Map(ref v) => visit_content_map_ref(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::Seq(ref v) => visit_content_seq_ref(v, visitor), + Content::Map(ref v) => visit_content_map_ref(v, visitor), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_enum( + self, + _name: &str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let (variant, value) = match *self.content { + Content::Map(ref value) => { + let mut iter = value.iter(); + let (variant, value) = match iter.next() { + Some(v) => v, + None => { + return Err(de::Error::invalid_value( + de::Unexpected::Map, + &"map with a single key", + )); + } + }; + // enums are encoded in json as maps with a single key:value pair + if iter.next().is_some() { + return Err(de::Error::invalid_value( + de::Unexpected::Map, + &"map with a single key", + )); + } + (variant, Some(value)) + } + ref s @ Content::String(_) | ref s @ Content::Str(_) => (s, None), + ref other => { + return Err(de::Error::invalid_type( + content_unexpected(other), + &"string or map", + )); + } + }; + + visitor.visit_enum(EnumRefDeserializer { + variant, + value, + err: PhantomData, + }) + } + + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match *self.content { + Content::String(ref v) => visitor.visit_str(v), + Content::Str(v) => visitor.visit_borrowed_str(v), + Content::ByteBuf(ref v) => visitor.visit_bytes(v), + Content::Bytes(v) => visitor.visit_borrowed_bytes(v), + Content::U8(v) => visitor.visit_u8(v), + Content::U64(v) => visitor.visit_u64(v), + _ => Err(self.invalid_type(&visitor)), + } + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn __deserialize_content_v1(self, visitor: V) -> Result + where + V: Visitor<'de, Value = Content<'de>>, + { + let _ = visitor; + Ok(content_clone(self.content)) + } + } + + impl<'a, 'de, E> ContentRefDeserializer<'a, 'de, E> { + /// private API, don't use + pub fn new(content: &'a Content<'de>) -> Self { + ContentRefDeserializer { + content, + err: PhantomData, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de: 'a, E> Copy for ContentRefDeserializer<'a, 'de, E> {} + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de: 'a, E> Clone for ContentRefDeserializer<'a, 'de, E> { + fn clone(&self) -> Self { + *self + } + } + + struct SeqRefDeserializer<'a, 'de, E> { + iter: <&'a [Content<'de>] as IntoIterator>::IntoIter, + count: usize, + marker: PhantomData, + } + + impl<'a, 'de, E> SeqRefDeserializer<'a, 'de, E> { + fn new(content: &'a [Content<'de>]) -> Self { + SeqRefDeserializer { + iter: content.iter(), + count: 0, + marker: PhantomData, + } + } + } + + impl<'a, 'de, E> SeqRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInSeq(self.count), + )) + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> Deserializer<'de> for SeqRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let v = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(v) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> SeqAccess<'de> for SeqRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: V) -> Result, Self::Error> + where + V: DeserializeSeed<'de>, + { + match self.iter.next() { + Some(value) => { + self.count += 1; + seed.deserialize(ContentRefDeserializer::new(value)) + .map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + struct MapRefDeserializer<'a, 'de, E> { + iter: <&'a [(Content<'de>, Content<'de>)] as IntoIterator>::IntoIter, + value: Option<&'a Content<'de>>, + count: usize, + error: PhantomData, + } + + impl<'a, 'de, E> MapRefDeserializer<'a, 'de, E> { + fn new(content: &'a [(Content<'de>, Content<'de>)]) -> Self { + MapRefDeserializer { + iter: content.iter(), + value: None, + count: 0, + error: PhantomData, + } + } + } + + impl<'a, 'de, E> MapRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + fn end(self) -> Result<(), E> { + let remaining = self.iter.count(); + if remaining == 0 { + Ok(()) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length( + self.count + remaining, + &ExpectedInMap(self.count), + )) + } + } + } + + impl<'a, 'de, E> MapRefDeserializer<'a, 'de, E> { + fn next_pair(&mut self) -> Option<(&'a Content<'de>, &'a Content<'de>)> { + match self.iter.next() { + Some((k, v)) => { + self.count += 1; + Some((k, v)) + } + None => None, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> Deserializer<'de> for MapRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn deserialize_any(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let value = tri!(visitor.visit_map(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_seq(mut self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let value = tri!(visitor.visit_seq(&mut self)); + tri!(self.end()); + Ok(value) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + let _ = len; + self.deserialize_seq(visitor) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> MapAccess<'de> for MapRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_key_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + self.value = Some(value); + seed.deserialize(ContentRefDeserializer::new(key)).map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + let value = self.value.take(); + // Panic because this indicates a bug in the program rather than an + // expected failure. + let value = value.expect("MapAccess::next_value called before next_key"); + seed.deserialize(ContentRefDeserializer::new(value)) + } + + fn next_entry_seed( + &mut self, + kseed: TK, + vseed: TV, + ) -> Result, Self::Error> + where + TK: DeserializeSeed<'de>, + TV: DeserializeSeed<'de>, + { + match self.next_pair() { + Some((key, value)) => { + let key = tri!(kseed.deserialize(ContentRefDeserializer::new(key))); + let value = tri!(vseed.deserialize(ContentRefDeserializer::new(value))); + Ok(Some((key, value))) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> SeqAccess<'de> for MapRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.next_pair() { + Some((k, v)) => { + let de = PairRefDeserializer(k, v, PhantomData); + seed.deserialize(de).map(Some) + } + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + size_hint::from_bounds(&self.iter) + } + } + + struct PairRefDeserializer<'a, 'de, E>(&'a Content<'de>, &'a Content<'de>, PhantomData); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> Deserializer<'de> for PairRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct tuple_struct map + struct enum identifier ignored_any + } + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_seq(visitor) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut pair_visitor = PairRefVisitor(Some(self.0), Some(self.1), PhantomData); + let pair = tri!(visitor.visit_seq(&mut pair_visitor)); + if pair_visitor.1.is_none() { + Ok(pair) + } else { + let remaining = pair_visitor.size_hint().unwrap(); + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(2 - remaining))) + } + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + if len == 2 { + self.deserialize_seq(visitor) + } else { + // First argument is the number of elements in the data, second + // argument is the number of elements expected by the Deserialize. + Err(de::Error::invalid_length(2, &ExpectedInSeq(len))) + } + } + } + + struct PairRefVisitor<'a, 'de, E>( + Option<&'a Content<'de>>, + Option<&'a Content<'de>>, + PhantomData, + ); + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'a, 'de, E> SeqAccess<'de> for PairRefVisitor<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + if let Some(k) = self.0.take() { + seed.deserialize(ContentRefDeserializer::new(k)).map(Some) + } else if let Some(v) = self.1.take() { + seed.deserialize(ContentRefDeserializer::new(v)).map(Some) + } else { + Ok(None) + } + } + + fn size_hint(&self) -> Option { + if self.0.is_some() { + Some(2) + } else if self.1.is_some() { + Some(1) + } else { + Some(0) + } + } + } + + struct EnumRefDeserializer<'a, 'de: 'a, E> + where + E: de::Error, + { + variant: &'a Content<'de>, + value: Option<&'a Content<'de>>, + err: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a, E> de::EnumAccess<'de> for EnumRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + type Variant = VariantRefDeserializer<'a, 'de, Self::Error>; + + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: de::DeserializeSeed<'de>, + { + let visitor = VariantRefDeserializer { + value: self.value, + err: PhantomData, + }; + seed.deserialize(ContentRefDeserializer::new(self.variant)) + .map(|v| (v, visitor)) + } + } + + struct VariantRefDeserializer<'a, 'de: 'a, E> + where + E: de::Error, + { + value: Option<&'a Content<'de>>, + err: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a, E> de::VariantAccess<'de> for VariantRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Error = E; + + fn unit_variant(self) -> Result<(), E> { + match self.value { + Some(value) => de::Deserialize::deserialize(ContentRefDeserializer::new(value)), + // Covered by tests/test_annotations.rs + // test_partially_untagged_adjacently_tagged_enum + // Covered by tests/test_enum_untagged.rs + // newtype_enum::unit + None => Ok(()), + } + } + + fn newtype_variant_seed(self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + match self.value { + // Covered by tests/test_annotations.rs + // test_partially_untagged_enum_desugared + // test_partially_untagged_enum_generic + // Covered by tests/test_enum_untagged.rs + // newtype_enum::newtype + Some(value) => seed.deserialize(ContentRefDeserializer::new(value)), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"newtype variant", + )), + } + } + + fn tuple_variant(self, _len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self.value { + // Covered by tests/test_annotations.rs + // test_partially_untagged_enum + // test_partially_untagged_enum_desugared + // Covered by tests/test_enum_untagged.rs + // newtype_enum::tuple0 + // newtype_enum::tuple2 + Some(Content::Seq(v)) => visit_content_seq_ref(v, visitor), + Some(other) => Err(de::Error::invalid_type( + content_unexpected(other), + &"tuple variant", + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"tuple variant", + )), + } + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + match self.value { + // Covered by tests/test_enum_untagged.rs + // newtype_enum::struct_from_map + Some(Content::Map(v)) => visit_content_map_ref(v, visitor), + // Covered by tests/test_enum_untagged.rs + // newtype_enum::struct_from_seq + // newtype_enum::empty_struct_from_seq + Some(Content::Seq(v)) => visit_content_seq_ref(v, visitor), + Some(other) => Err(de::Error::invalid_type( + content_unexpected(other), + &"struct variant", + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"struct variant", + )), + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, E> de::IntoDeserializer<'de, E> for ContentDeserializer<'de, E> + where + E: de::Error, + { + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a, E> de::IntoDeserializer<'de, E> for ContentRefDeserializer<'a, 'de, E> + where + E: de::Error, + { + type Deserializer = Self; + + fn into_deserializer(self) -> Self { + self + } + } + + /// Visitor for deserializing an internally tagged unit variant. + /// + /// Not public API. + pub struct InternallyTaggedUnitVisitor<'a> { + type_name: &'a str, + variant_name: &'a str, + } + + impl<'a> InternallyTaggedUnitVisitor<'a> { + /// Not public API. + pub fn new(type_name: &'a str, variant_name: &'a str) -> Self { + InternallyTaggedUnitVisitor { + type_name, + variant_name, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a> Visitor<'de> for InternallyTaggedUnitVisitor<'a> { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!( + formatter, + "unit variant {}::{}", + self.type_name, self.variant_name + ) + } + + fn visit_seq(self, _: S) -> Result<(), S::Error> + where + S: SeqAccess<'de>, + { + Ok(()) + } + + fn visit_map(self, mut access: M) -> Result<(), M::Error> + where + M: MapAccess<'de>, + { + while tri!(access.next_entry::()).is_some() {} + Ok(()) + } + } + + /// Visitor for deserializing an untagged unit variant. + /// + /// Not public API. + pub struct UntaggedUnitVisitor<'a> { + type_name: &'a str, + variant_name: &'a str, + } + + impl<'a> UntaggedUnitVisitor<'a> { + /// Not public API. + pub fn new(type_name: &'a str, variant_name: &'a str) -> Self { + UntaggedUnitVisitor { + type_name, + variant_name, + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl<'de, 'a> Visitor<'de> for UntaggedUnitVisitor<'a> { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!( + formatter, + "unit variant {}::{}", + self.type_name, self.variant_name + ) + } + + fn visit_unit(self) -> Result<(), E> + where + E: de::Error, + { + Ok(()) + } + + fn visit_none(self) -> Result<(), E> + where + E: de::Error, + { + Ok(()) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +// Like `IntoDeserializer` but also implemented for `&[u8]`. This is used for +// the newtype fallthrough case of `field_identifier`. +// +// #[derive(Deserialize)] +// #[serde(field_identifier)] +// enum F { +// A, +// B, +// Other(String), // deserialized using IdentifierDeserializer +// } +pub trait IdentifierDeserializer<'de, E: Error> { + type Deserializer: Deserializer<'de, Error = E>; + + fn from(self) -> Self::Deserializer; +} + +pub struct Borrowed<'de, T: 'de + ?Sized>(pub &'de T); + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, E> IdentifierDeserializer<'de, E> for u64 +where + E: Error, +{ + type Deserializer = >::Deserializer; + + fn from(self) -> Self::Deserializer { + self.into_deserializer() + } +} + +pub struct StrDeserializer<'a, E> { + value: &'a str, + marker: PhantomData, +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, 'a, E> Deserializer<'de> for StrDeserializer<'a, E> +where + E: Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_str(self.value) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +pub struct BorrowedStrDeserializer<'de, E> { + value: &'de str, + marker: PhantomData, +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, E> Deserializer<'de> for BorrowedStrDeserializer<'de, E> +where + E: Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_borrowed_str(self.value) + } + + serde_core::forward_to_deserialize_any! { + bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string + bytes byte_buf option unit unit_struct newtype_struct seq tuple + tuple_struct map struct enum identifier ignored_any + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, E> IdentifierDeserializer<'a, E> for &'a str +where + E: Error, +{ + type Deserializer = StrDeserializer<'a, E>; + + fn from(self) -> Self::Deserializer { + StrDeserializer { + value: self, + marker: PhantomData, + } + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, E> IdentifierDeserializer<'de, E> for Borrowed<'de, str> +where + E: Error, +{ + type Deserializer = BorrowedStrDeserializer<'de, E>; + + fn from(self) -> Self::Deserializer { + BorrowedStrDeserializer { + value: self.0, + marker: PhantomData, + } + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, E> IdentifierDeserializer<'a, E> for &'a [u8] +where + E: Error, +{ + type Deserializer = BytesDeserializer<'a, E>; + + fn from(self) -> Self::Deserializer { + BytesDeserializer::new(self) + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, E> IdentifierDeserializer<'de, E> for Borrowed<'de, [u8]> +where + E: Error, +{ + type Deserializer = BorrowedBytesDeserializer<'de, E>; + + fn from(self) -> Self::Deserializer { + BorrowedBytesDeserializer::new(self.0) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapDeserializer<'a, 'de: 'a, E>( + pub &'a mut Vec, Content<'de>)>>, + pub PhantomData, +); + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, 'de, E> FlatMapDeserializer<'a, 'de, E> +where + E: Error, +{ + fn deserialize_other() -> Result { + Err(Error::custom("can only flatten structs and maps")) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +macro_rules! forward_to_deserialize_other { + ($($func:ident ($($arg:ty),*))*) => { + $( + fn $func(self, $(_: $arg,)* _visitor: V) -> Result + where + V: Visitor<'de>, + { + Self::deserialize_other() + } + )* + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, 'de, E> Deserializer<'de> for FlatMapDeserializer<'a, 'de, E> +where + E: Error, +{ + type Error = E; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_map(visitor) + } + + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + for entry in self.0 { + if let Some((key, value)) = flat_map_take_entry(entry, variants) { + return visitor.visit_enum(EnumDeserializer::new(key, Some(value))); + } + } + + Err(Error::custom(format_args!( + "no variant of enum {} found in flattened data", + name + ))) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_map(FlatMapAccess { + iter: self.0.iter(), + pending_content: None, + _marker: PhantomData, + }) + } + + fn deserialize_struct( + self, + _: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_map(FlatStructAccess { + iter: self.0.iter_mut(), + pending_content: None, + fields, + _marker: PhantomData, + }) + } + + fn deserialize_newtype_struct(self, _name: &str, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match visitor.__private_visit_untagged_option(self) { + Ok(value) => Ok(value), + Err(()) => Self::deserialize_other(), + } + } + + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + visitor.visit_unit() + } + + forward_to_deserialize_other! { + deserialize_bool() + deserialize_i8() + deserialize_i16() + deserialize_i32() + deserialize_i64() + deserialize_u8() + deserialize_u16() + deserialize_u32() + deserialize_u64() + deserialize_f32() + deserialize_f64() + deserialize_char() + deserialize_str() + deserialize_string() + deserialize_bytes() + deserialize_byte_buf() + deserialize_seq() + deserialize_tuple(usize) + deserialize_tuple_struct(&'static str, usize) + deserialize_identifier() + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +struct FlatMapAccess<'a, 'de: 'a, E> { + iter: slice::Iter<'a, Option<(Content<'de>, Content<'de>)>>, + pending_content: Option<&'a Content<'de>>, + _marker: PhantomData, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, 'de, E> MapAccess<'de> for FlatMapAccess<'a, 'de, E> +where + E: Error, +{ + type Error = E; + + fn next_key_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + for item in &mut self.iter { + // Items in the vector are nulled out when used by a struct. + if let Some((ref key, ref content)) = *item { + // Do not take(), instead borrow this entry. The internally tagged + // enum does its own buffering so we can't tell whether this entry + // is going to be consumed. Borrowing here leaves the entry + // available for later flattened fields. + self.pending_content = Some(content); + return seed.deserialize(ContentRefDeserializer::new(key)).map(Some); + } + } + Ok(None) + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + match self.pending_content.take() { + Some(value) => seed.deserialize(ContentRefDeserializer::new(value)), + None => Err(Error::custom("value is missing")), + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +struct FlatStructAccess<'a, 'de: 'a, E> { + iter: slice::IterMut<'a, Option<(Content<'de>, Content<'de>)>>, + pending_content: Option>, + fields: &'static [&'static str], + _marker: PhantomData, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, 'de, E> MapAccess<'de> for FlatStructAccess<'a, 'de, E> +where + E: Error, +{ + type Error = E; + + fn next_key_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: DeserializeSeed<'de>, + { + for entry in self.iter.by_ref() { + if let Some((key, content)) = flat_map_take_entry(entry, self.fields) { + self.pending_content = Some(content); + return seed.deserialize(ContentDeserializer::new(key)).map(Some); + } + } + Ok(None) + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: DeserializeSeed<'de>, + { + match self.pending_content.take() { + Some(value) => seed.deserialize(ContentDeserializer::new(value)), + None => Err(Error::custom("value is missing")), + } + } +} + +/// Claims one key-value pair from a FlatMapDeserializer's field buffer if the +/// field name matches any of the recognized ones. +#[cfg(any(feature = "std", feature = "alloc"))] +fn flat_map_take_entry<'de>( + entry: &mut Option<(Content<'de>, Content<'de>)>, + recognized: &[&str], +) -> Option<(Content<'de>, Content<'de>)> { + // Entries in the FlatMapDeserializer buffer are nulled out as they get + // claimed for deserialization. We only use an entry if it is still present + // and if the field is one recognized by the current data structure. + let is_recognized = match entry { + None => false, + Some((k, _v)) => content_as_str(k).map_or(false, |name| recognized.contains(&name)), + }; + + if is_recognized { + entry.take() + } else { + None + } +} + +pub struct AdjacentlyTaggedEnumVariantSeed { + pub enum_name: &'static str, + pub variants: &'static [&'static str], + pub fields_enum: PhantomData, +} + +pub struct AdjacentlyTaggedEnumVariantVisitor { + enum_name: &'static str, + fields_enum: PhantomData, +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, F> Visitor<'de> for AdjacentlyTaggedEnumVariantVisitor +where + F: Deserialize<'de>, +{ + type Value = F; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "variant of enum {}", self.enum_name) + } + + fn visit_enum
(self, data: A) -> Result + where + A: EnumAccess<'de>, + { + let (variant, variant_access) = tri!(data.variant()); + tri!(variant_access.unit_variant()); + Ok(variant) + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'de, F> DeserializeSeed<'de> for AdjacentlyTaggedEnumVariantSeed +where + F: Deserialize<'de>, +{ + type Value = F; + + fn deserialize(self, deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_enum( + self.enum_name, + self.variants, + AdjacentlyTaggedEnumVariantVisitor { + enum_name: self.enum_name, + fields_enum: PhantomData, + }, + ) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..20bfa4eb886af71985fafbc5c5d0f8b8b425d382 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/mod.rs @@ -0,0 +1,18 @@ +#[cfg(not(no_serde_derive))] +pub mod de; +#[cfg(not(no_serde_derive))] +pub mod ser; + +pub use crate::lib::clone::Clone; +pub use crate::lib::convert::{From, Into, TryFrom}; +pub use crate::lib::default::Default; +pub use crate::lib::fmt::{self, Formatter}; +pub use crate::lib::marker::PhantomData; +pub use crate::lib::option::Option::{self, None, Some}; +pub use crate::lib::ptr; +pub use crate::lib::result::Result::{self, Err, Ok}; + +pub use crate::serde_core_private::string::from_utf8_lossy; + +#[cfg(any(feature = "alloc", feature = "std"))] +pub use crate::lib::{ToString, Vec}; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/ser.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/ser.rs new file mode 100644 index 0000000000000000000000000000000000000000..411e2b41ee8c9da262e6fafc1c7532e3c8030fef --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/serde-1.0.228/src/private/ser.rs @@ -0,0 +1,1382 @@ +use crate::lib::*; + +use crate::ser::{self, Impossible, Serialize, SerializeMap, SerializeStruct, Serializer}; + +#[cfg(any(feature = "std", feature = "alloc"))] +use self::content::{ + Content, ContentSerializer, SerializeStructVariantAsMapValue, SerializeTupleVariantAsMapValue, +}; + +/// Used to check that serde(getter) attributes return the expected type. +/// Not public API. +pub fn constrain(t: &T) -> &T { + t +} + +/// Not public API. +pub fn serialize_tagged_newtype( + serializer: S, + type_ident: &'static str, + variant_ident: &'static str, + tag: &'static str, + variant_name: &'static str, + value: &T, +) -> Result +where + S: Serializer, + T: Serialize, +{ + value.serialize(TaggedSerializer { + type_ident, + variant_ident, + tag, + variant_name, + delegate: serializer, + }) +} + +struct TaggedSerializer { + type_ident: &'static str, + variant_ident: &'static str, + tag: &'static str, + variant_name: &'static str, + delegate: S, +} + +enum Unsupported { + Boolean, + Integer, + Float, + Char, + String, + ByteArray, + Optional, + Sequence, + Tuple, + TupleStruct, + #[cfg(not(any(feature = "std", feature = "alloc")))] + Enum, +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl Display for Unsupported { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match *self { + Unsupported::Boolean => formatter.write_str("a boolean"), + Unsupported::Integer => formatter.write_str("an integer"), + Unsupported::Float => formatter.write_str("a float"), + Unsupported::Char => formatter.write_str("a char"), + Unsupported::String => formatter.write_str("a string"), + Unsupported::ByteArray => formatter.write_str("a byte array"), + Unsupported::Optional => formatter.write_str("an optional"), + Unsupported::Sequence => formatter.write_str("a sequence"), + Unsupported::Tuple => formatter.write_str("a tuple"), + Unsupported::TupleStruct => formatter.write_str("a tuple struct"), + #[cfg(not(any(feature = "std", feature = "alloc")))] + Unsupported::Enum => formatter.write_str("an enum"), + } + } +} + +impl TaggedSerializer +where + S: Serializer, +{ + fn bad_type(self, what: Unsupported) -> S::Error { + ser::Error::custom(format_args!( + "cannot serialize tagged newtype variant {}::{} containing {}", + self.type_ident, self.variant_ident, what + )) + } +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl Serializer for TaggedSerializer +where + S: Serializer, +{ + type Ok = S::Ok; + type Error = S::Error; + + type SerializeSeq = Impossible; + type SerializeTuple = Impossible; + type SerializeTupleStruct = Impossible; + type SerializeMap = S::SerializeMap; + type SerializeStruct = S::SerializeStruct; + + #[cfg(not(any(feature = "std", feature = "alloc")))] + type SerializeTupleVariant = Impossible; + #[cfg(any(feature = "std", feature = "alloc"))] + type SerializeTupleVariant = SerializeTupleVariantAsMapValue; + + #[cfg(not(any(feature = "std", feature = "alloc")))] + type SerializeStructVariant = Impossible; + #[cfg(any(feature = "std", feature = "alloc"))] + type SerializeStructVariant = SerializeStructVariantAsMapValue; + + fn serialize_bool(self, _: bool) -> Result { + Err(self.bad_type(Unsupported::Boolean)) + } + + fn serialize_i8(self, _: i8) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_i16(self, _: i16) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_i32(self, _: i32) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_i64(self, _: i64) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_u8(self, _: u8) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_u16(self, _: u16) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_u32(self, _: u32) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_u64(self, _: u64) -> Result { + Err(self.bad_type(Unsupported::Integer)) + } + + fn serialize_f32(self, _: f32) -> Result { + Err(self.bad_type(Unsupported::Float)) + } + + fn serialize_f64(self, _: f64) -> Result { + Err(self.bad_type(Unsupported::Float)) + } + + fn serialize_char(self, _: char) -> Result { + Err(self.bad_type(Unsupported::Char)) + } + + fn serialize_str(self, _: &str) -> Result { + Err(self.bad_type(Unsupported::String)) + } + + fn serialize_bytes(self, _: &[u8]) -> Result { + Err(self.bad_type(Unsupported::ByteArray)) + } + + fn serialize_none(self) -> Result { + Err(self.bad_type(Unsupported::Optional)) + } + + fn serialize_some(self, _: &T) -> Result + where + T: ?Sized + Serialize, + { + Err(self.bad_type(Unsupported::Optional)) + } + + fn serialize_unit(self) -> Result { + let mut map = tri!(self.delegate.serialize_map(Some(1))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + map.end() + } + + fn serialize_unit_struct(self, _: &'static str) -> Result { + let mut map = tri!(self.delegate.serialize_map(Some(1))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + map.end() + } + + fn serialize_unit_variant( + self, + _: &'static str, + _: u32, + inner_variant: &'static str, + ) -> Result { + let mut map = tri!(self.delegate.serialize_map(Some(2))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + tri!(map.serialize_entry(inner_variant, &())); + map.end() + } + + fn serialize_newtype_struct( + self, + _: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + value.serialize(self) + } + + fn serialize_newtype_variant( + self, + _: &'static str, + _: u32, + inner_variant: &'static str, + inner_value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + let mut map = tri!(self.delegate.serialize_map(Some(2))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + tri!(map.serialize_entry(inner_variant, inner_value)); + map.end() + } + + fn serialize_seq(self, _: Option) -> Result { + Err(self.bad_type(Unsupported::Sequence)) + } + + fn serialize_tuple(self, _: usize) -> Result { + Err(self.bad_type(Unsupported::Tuple)) + } + + fn serialize_tuple_struct( + self, + _: &'static str, + _: usize, + ) -> Result { + Err(self.bad_type(Unsupported::TupleStruct)) + } + + #[cfg(not(any(feature = "std", feature = "alloc")))] + fn serialize_tuple_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + _: usize, + ) -> Result { + // Lack of push-based serialization means we need to buffer the content + // of the tuple variant, so it requires std. + Err(self.bad_type(Unsupported::Enum)) + } + + #[cfg(any(feature = "std", feature = "alloc"))] + fn serialize_tuple_variant( + self, + _: &'static str, + _: u32, + inner_variant: &'static str, + len: usize, + ) -> Result { + let mut map = tri!(self.delegate.serialize_map(Some(2))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + tri!(map.serialize_key(inner_variant)); + Ok(SerializeTupleVariantAsMapValue::new( + map, + inner_variant, + len, + )) + } + + fn serialize_map(self, len: Option) -> Result { + let mut map = tri!(self.delegate.serialize_map(len.map(|len| len + 1))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + Ok(map) + } + + fn serialize_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + let mut state = tri!(self.delegate.serialize_struct(name, len + 1)); + tri!(state.serialize_field(self.tag, self.variant_name)); + Ok(state) + } + + #[cfg(not(any(feature = "std", feature = "alloc")))] + fn serialize_struct_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + _: usize, + ) -> Result { + // Lack of push-based serialization means we need to buffer the content + // of the struct variant, so it requires std. + Err(self.bad_type(Unsupported::Enum)) + } + + #[cfg(any(feature = "std", feature = "alloc"))] + fn serialize_struct_variant( + self, + _: &'static str, + _: u32, + inner_variant: &'static str, + len: usize, + ) -> Result { + let mut map = tri!(self.delegate.serialize_map(Some(2))); + tri!(map.serialize_entry(self.tag, self.variant_name)); + tri!(map.serialize_key(inner_variant)); + Ok(SerializeStructVariantAsMapValue::new( + map, + inner_variant, + len, + )) + } + + #[cfg(not(any(feature = "std", feature = "alloc")))] + fn collect_str(self, _: &T) -> Result + where + T: ?Sized + Display, + { + Err(self.bad_type(Unsupported::String)) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +mod content { + use crate::lib::*; + + use crate::ser::{self, Serialize, Serializer}; + + pub struct SerializeTupleVariantAsMapValue { + map: M, + name: &'static str, + fields: Vec, + } + + impl SerializeTupleVariantAsMapValue { + pub fn new(map: M, name: &'static str, len: usize) -> Self { + SerializeTupleVariantAsMapValue { + map, + name, + fields: Vec::with_capacity(len), + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeTupleVariant for SerializeTupleVariantAsMapValue + where + M: ser::SerializeMap, + { + type Ok = M::Ok; + type Error = M::Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), M::Error> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push(value); + Ok(()) + } + + fn end(mut self) -> Result { + tri!(self + .map + .serialize_value(&Content::TupleStruct(self.name, self.fields))); + self.map.end() + } + } + + pub struct SerializeStructVariantAsMapValue { + map: M, + name: &'static str, + fields: Vec<(&'static str, Content)>, + } + + impl SerializeStructVariantAsMapValue { + pub fn new(map: M, name: &'static str, len: usize) -> Self { + SerializeStructVariantAsMapValue { + map, + name, + fields: Vec::with_capacity(len), + } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeStructVariant for SerializeStructVariantAsMapValue + where + M: ser::SerializeMap, + { + type Ok = M::Ok; + type Error = M::Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), M::Error> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push((key, value)); + Ok(()) + } + + fn end(mut self) -> Result { + tri!(self + .map + .serialize_value(&Content::Struct(self.name, self.fields))); + self.map.end() + } + } + + pub enum Content { + Bool(bool), + + U8(u8), + U16(u16), + U32(u32), + U64(u64), + + I8(i8), + I16(i16), + I32(i32), + I64(i64), + + F32(f32), + F64(f64), + + Char(char), + String(String), + Bytes(Vec), + + None, + Some(Box), + + Unit, + UnitStruct(&'static str), + UnitVariant(&'static str, u32, &'static str), + NewtypeStruct(&'static str, Box), + NewtypeVariant(&'static str, u32, &'static str, Box), + + Seq(Vec), + Tuple(Vec), + TupleStruct(&'static str, Vec), + TupleVariant(&'static str, u32, &'static str, Vec), + Map(Vec<(Content, Content)>), + Struct(&'static str, Vec<(&'static str, Content)>), + StructVariant( + &'static str, + u32, + &'static str, + Vec<(&'static str, Content)>, + ), + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl Serialize for Content { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match *self { + Content::Bool(b) => serializer.serialize_bool(b), + Content::U8(u) => serializer.serialize_u8(u), + Content::U16(u) => serializer.serialize_u16(u), + Content::U32(u) => serializer.serialize_u32(u), + Content::U64(u) => serializer.serialize_u64(u), + Content::I8(i) => serializer.serialize_i8(i), + Content::I16(i) => serializer.serialize_i16(i), + Content::I32(i) => serializer.serialize_i32(i), + Content::I64(i) => serializer.serialize_i64(i), + Content::F32(f) => serializer.serialize_f32(f), + Content::F64(f) => serializer.serialize_f64(f), + Content::Char(c) => serializer.serialize_char(c), + Content::String(ref s) => serializer.serialize_str(s), + Content::Bytes(ref b) => serializer.serialize_bytes(b), + Content::None => serializer.serialize_none(), + Content::Some(ref c) => serializer.serialize_some(&**c), + Content::Unit => serializer.serialize_unit(), + Content::UnitStruct(n) => serializer.serialize_unit_struct(n), + Content::UnitVariant(n, i, v) => serializer.serialize_unit_variant(n, i, v), + Content::NewtypeStruct(n, ref c) => serializer.serialize_newtype_struct(n, &**c), + Content::NewtypeVariant(n, i, v, ref c) => { + serializer.serialize_newtype_variant(n, i, v, &**c) + } + Content::Seq(ref elements) => elements.serialize(serializer), + Content::Tuple(ref elements) => { + use crate::ser::SerializeTuple; + let mut tuple = tri!(serializer.serialize_tuple(elements.len())); + for e in elements { + tri!(tuple.serialize_element(e)); + } + tuple.end() + } + Content::TupleStruct(n, ref fields) => { + use crate::ser::SerializeTupleStruct; + let mut ts = tri!(serializer.serialize_tuple_struct(n, fields.len())); + for f in fields { + tri!(ts.serialize_field(f)); + } + ts.end() + } + Content::TupleVariant(n, i, v, ref fields) => { + use crate::ser::SerializeTupleVariant; + let mut tv = tri!(serializer.serialize_tuple_variant(n, i, v, fields.len())); + for f in fields { + tri!(tv.serialize_field(f)); + } + tv.end() + } + Content::Map(ref entries) => { + use crate::ser::SerializeMap; + let mut map = tri!(serializer.serialize_map(Some(entries.len()))); + for (k, v) in entries { + tri!(map.serialize_entry(k, v)); + } + map.end() + } + Content::Struct(n, ref fields) => { + use crate::ser::SerializeStruct; + let mut s = tri!(serializer.serialize_struct(n, fields.len())); + for &(k, ref v) in fields { + tri!(s.serialize_field(k, v)); + } + s.end() + } + Content::StructVariant(n, i, v, ref fields) => { + use crate::ser::SerializeStructVariant; + let mut sv = tri!(serializer.serialize_struct_variant(n, i, v, fields.len())); + for &(k, ref v) in fields { + tri!(sv.serialize_field(k, v)); + } + sv.end() + } + } + } + } + + pub struct ContentSerializer { + error: PhantomData, + } + + impl ContentSerializer { + pub fn new() -> Self { + ContentSerializer { error: PhantomData } + } + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl Serializer for ContentSerializer + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + type SerializeSeq = SerializeSeq; + type SerializeTuple = SerializeTuple; + type SerializeTupleStruct = SerializeTupleStruct; + type SerializeTupleVariant = SerializeTupleVariant; + type SerializeMap = SerializeMap; + type SerializeStruct = SerializeStruct; + type SerializeStructVariant = SerializeStructVariant; + + fn serialize_bool(self, v: bool) -> Result { + Ok(Content::Bool(v)) + } + + fn serialize_i8(self, v: i8) -> Result { + Ok(Content::I8(v)) + } + + fn serialize_i16(self, v: i16) -> Result { + Ok(Content::I16(v)) + } + + fn serialize_i32(self, v: i32) -> Result { + Ok(Content::I32(v)) + } + + fn serialize_i64(self, v: i64) -> Result { + Ok(Content::I64(v)) + } + + fn serialize_u8(self, v: u8) -> Result { + Ok(Content::U8(v)) + } + + fn serialize_u16(self, v: u16) -> Result { + Ok(Content::U16(v)) + } + + fn serialize_u32(self, v: u32) -> Result { + Ok(Content::U32(v)) + } + + fn serialize_u64(self, v: u64) -> Result { + Ok(Content::U64(v)) + } + + fn serialize_f32(self, v: f32) -> Result { + Ok(Content::F32(v)) + } + + fn serialize_f64(self, v: f64) -> Result { + Ok(Content::F64(v)) + } + + fn serialize_char(self, v: char) -> Result { + Ok(Content::Char(v)) + } + + fn serialize_str(self, value: &str) -> Result { + Ok(Content::String(value.to_owned())) + } + + fn serialize_bytes(self, value: &[u8]) -> Result { + Ok(Content::Bytes(value.to_owned())) + } + + fn serialize_none(self) -> Result { + Ok(Content::None) + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + Ok(Content::Some(Box::new(tri!(value.serialize(self))))) + } + + fn serialize_unit(self) -> Result { + Ok(Content::Unit) + } + + fn serialize_unit_struct(self, name: &'static str) -> Result { + Ok(Content::UnitStruct(name)) + } + + fn serialize_unit_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + ) -> Result { + Ok(Content::UnitVariant(name, variant_index, variant)) + } + + fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result + where + T: ?Sized + Serialize, + { + Ok(Content::NewtypeStruct( + name, + Box::new(tri!(value.serialize(self))), + )) + } + + fn serialize_newtype_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + Ok(Content::NewtypeVariant( + name, + variant_index, + variant, + Box::new(tri!(value.serialize(self))), + )) + } + + fn serialize_seq(self, len: Option) -> Result { + Ok(SerializeSeq { + elements: Vec::with_capacity(len.unwrap_or(0)), + error: PhantomData, + }) + } + + fn serialize_tuple(self, len: usize) -> Result { + Ok(SerializeTuple { + elements: Vec::with_capacity(len), + error: PhantomData, + }) + } + + fn serialize_tuple_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + Ok(SerializeTupleStruct { + name, + fields: Vec::with_capacity(len), + error: PhantomData, + }) + } + + fn serialize_tuple_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + Ok(SerializeTupleVariant { + name, + variant_index, + variant, + fields: Vec::with_capacity(len), + error: PhantomData, + }) + } + + fn serialize_map(self, len: Option) -> Result { + Ok(SerializeMap { + entries: Vec::with_capacity(len.unwrap_or(0)), + key: None, + error: PhantomData, + }) + } + + fn serialize_struct( + self, + name: &'static str, + len: usize, + ) -> Result { + Ok(SerializeStruct { + name, + fields: Vec::with_capacity(len), + error: PhantomData, + }) + } + + fn serialize_struct_variant( + self, + name: &'static str, + variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + Ok(SerializeStructVariant { + name, + variant_index, + variant, + fields: Vec::with_capacity(len), + error: PhantomData, + }) + } + } + + pub struct SerializeSeq { + elements: Vec, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeSeq for SerializeSeq + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_element(&mut self, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.elements.push(value); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::Seq(self.elements)) + } + } + + pub struct SerializeTuple { + elements: Vec, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeTuple for SerializeTuple + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_element(&mut self, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.elements.push(value); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::Tuple(self.elements)) + } + } + + pub struct SerializeTupleStruct { + name: &'static str, + fields: Vec, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeTupleStruct for SerializeTupleStruct + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_field(&mut self, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push(value); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::TupleStruct(self.name, self.fields)) + } + } + + pub struct SerializeTupleVariant { + name: &'static str, + variant_index: u32, + variant: &'static str, + fields: Vec, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeTupleVariant for SerializeTupleVariant + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_field(&mut self, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push(value); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::TupleVariant( + self.name, + self.variant_index, + self.variant, + self.fields, + )) + } + } + + pub struct SerializeMap { + entries: Vec<(Content, Content)>, + key: Option, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeMap for SerializeMap + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_key(&mut self, key: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let key = tri!(key.serialize(ContentSerializer::::new())); + self.key = Some(key); + Ok(()) + } + + fn serialize_value(&mut self, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let key = self + .key + .take() + .expect("serialize_value called before serialize_key"); + let value = tri!(value.serialize(ContentSerializer::::new())); + self.entries.push((key, value)); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::Map(self.entries)) + } + + fn serialize_entry(&mut self, key: &K, value: &V) -> Result<(), E> + where + K: ?Sized + Serialize, + V: ?Sized + Serialize, + { + let key = tri!(key.serialize(ContentSerializer::::new())); + let value = tri!(value.serialize(ContentSerializer::::new())); + self.entries.push((key, value)); + Ok(()) + } + } + + pub struct SerializeStruct { + name: &'static str, + fields: Vec<(&'static str, Content)>, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeStruct for SerializeStruct + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push((key, value)); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::Struct(self.name, self.fields)) + } + } + + pub struct SerializeStructVariant { + name: &'static str, + variant_index: u32, + variant: &'static str, + fields: Vec<(&'static str, Content)>, + error: PhantomData, + } + + #[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] + impl ser::SerializeStructVariant for SerializeStructVariant + where + E: ser::Error, + { + type Ok = Content; + type Error = E; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), E> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push((key, value)); + Ok(()) + } + + fn end(self) -> Result { + Ok(Content::StructVariant( + self.name, + self.variant_index, + self.variant, + self.fields, + )) + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapSerializer<'a, M: 'a>(pub &'a mut M); + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, M> FlatMapSerializer<'a, M> +where + M: SerializeMap + 'a, +{ + fn bad_type(what: Unsupported) -> M::Error { + ser::Error::custom(format_args!( + "can only flatten structs and maps (got {})", + what + )) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, M> Serializer for FlatMapSerializer<'a, M> +where + M: SerializeMap + 'a, +{ + type Ok = (); + type Error = M::Error; + + type SerializeSeq = Impossible; + type SerializeTuple = Impossible; + type SerializeTupleStruct = Impossible; + type SerializeMap = FlatMapSerializeMap<'a, M>; + type SerializeStruct = FlatMapSerializeStruct<'a, M>; + type SerializeTupleVariant = FlatMapSerializeTupleVariantAsMapValue<'a, M>; + type SerializeStructVariant = FlatMapSerializeStructVariantAsMapValue<'a, M>; + + fn serialize_bool(self, _: bool) -> Result { + Err(Self::bad_type(Unsupported::Boolean)) + } + + fn serialize_i8(self, _: i8) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_i16(self, _: i16) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_i32(self, _: i32) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_i64(self, _: i64) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_u8(self, _: u8) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_u16(self, _: u16) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_u32(self, _: u32) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_u64(self, _: u64) -> Result { + Err(Self::bad_type(Unsupported::Integer)) + } + + fn serialize_f32(self, _: f32) -> Result { + Err(Self::bad_type(Unsupported::Float)) + } + + fn serialize_f64(self, _: f64) -> Result { + Err(Self::bad_type(Unsupported::Float)) + } + + fn serialize_char(self, _: char) -> Result { + Err(Self::bad_type(Unsupported::Char)) + } + + fn serialize_str(self, _: &str) -> Result { + Err(Self::bad_type(Unsupported::String)) + } + + fn serialize_bytes(self, _: &[u8]) -> Result { + Err(Self::bad_type(Unsupported::ByteArray)) + } + + fn serialize_none(self) -> Result { + Ok(()) + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + value.serialize(self) + } + + fn serialize_unit(self) -> Result { + Ok(()) + } + + fn serialize_unit_struct(self, _: &'static str) -> Result { + Ok(()) + } + + fn serialize_unit_variant( + self, + _: &'static str, + _: u32, + variant: &'static str, + ) -> Result { + self.0.serialize_entry(variant, &()) + } + + fn serialize_newtype_struct( + self, + _: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + value.serialize(self) + } + + fn serialize_newtype_variant( + self, + _: &'static str, + _: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + self.0.serialize_entry(variant, value) + } + + fn serialize_seq(self, _: Option) -> Result { + Err(Self::bad_type(Unsupported::Sequence)) + } + + fn serialize_tuple(self, _: usize) -> Result { + Err(Self::bad_type(Unsupported::Tuple)) + } + + fn serialize_tuple_struct( + self, + _: &'static str, + _: usize, + ) -> Result { + Err(Self::bad_type(Unsupported::TupleStruct)) + } + + fn serialize_tuple_variant( + self, + _: &'static str, + _: u32, + variant: &'static str, + _: usize, + ) -> Result { + tri!(self.0.serialize_key(variant)); + Ok(FlatMapSerializeTupleVariantAsMapValue::new(self.0)) + } + + fn serialize_map(self, _: Option) -> Result { + Ok(FlatMapSerializeMap(self.0)) + } + + fn serialize_struct( + self, + _: &'static str, + _: usize, + ) -> Result { + Ok(FlatMapSerializeStruct(self.0)) + } + + fn serialize_struct_variant( + self, + _: &'static str, + _: u32, + inner_variant: &'static str, + _: usize, + ) -> Result { + tri!(self.0.serialize_key(inner_variant)); + Ok(FlatMapSerializeStructVariantAsMapValue::new( + self.0, + inner_variant, + )) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapSerializeMap<'a, M: 'a>(&'a mut M); + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, M> ser::SerializeMap for FlatMapSerializeMap<'a, M> +where + M: SerializeMap + 'a, +{ + type Ok = (); + type Error = M::Error; + + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + self.0.serialize_key(key) + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + self.0.serialize_value(value) + } + + fn serialize_entry(&mut self, key: &K, value: &V) -> Result<(), Self::Error> + where + K: ?Sized + Serialize, + V: ?Sized + Serialize, + { + self.0.serialize_entry(key, value) + } + + fn end(self) -> Result<(), Self::Error> { + Ok(()) + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapSerializeStruct<'a, M: 'a>(&'a mut M); + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, M> ser::SerializeStruct for FlatMapSerializeStruct<'a, M> +where + M: SerializeMap + 'a, +{ + type Ok = (); + type Error = M::Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + self.0.serialize_entry(key, value) + } + + fn end(self) -> Result<(), Self::Error> { + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapSerializeTupleVariantAsMapValue<'a, M: 'a> { + map: &'a mut M, + fields: Vec, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, M> FlatMapSerializeTupleVariantAsMapValue<'a, M> +where + M: SerializeMap + 'a, +{ + fn new(map: &'a mut M) -> Self { + FlatMapSerializeTupleVariantAsMapValue { + map, + fields: Vec::new(), + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, M> ser::SerializeTupleVariant for FlatMapSerializeTupleVariantAsMapValue<'a, M> +where + M: SerializeMap + 'a, +{ + type Ok = (); + type Error = M::Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push(value); + Ok(()) + } + + fn end(self) -> Result<(), Self::Error> { + tri!(self.map.serialize_value(&Content::Seq(self.fields))); + Ok(()) + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +#[cfg(any(feature = "std", feature = "alloc"))] +pub struct FlatMapSerializeStructVariantAsMapValue<'a, M: 'a> { + map: &'a mut M, + name: &'static str, + fields: Vec<(&'static str, Content)>, +} + +#[cfg(any(feature = "std", feature = "alloc"))] +impl<'a, M> FlatMapSerializeStructVariantAsMapValue<'a, M> +where + M: SerializeMap + 'a, +{ + fn new(map: &'a mut M, name: &'static str) -> FlatMapSerializeStructVariantAsMapValue<'a, M> { + FlatMapSerializeStructVariantAsMapValue { + map, + name, + fields: Vec::new(), + } + } +} + +#[cfg(any(feature = "std", feature = "alloc"))] +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl<'a, M> ser::SerializeStructVariant for FlatMapSerializeStructVariantAsMapValue<'a, M> +where + M: SerializeMap + 'a, +{ + type Ok = (); + type Error = M::Error; + + fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let value = tri!(value.serialize(ContentSerializer::::new())); + self.fields.push((key, value)); + Ok(()) + } + + fn end(self) -> Result<(), Self::Error> { + tri!(self + .map + .serialize_value(&Content::Struct(self.name, self.fields))); + Ok(()) + } +} + +pub struct AdjacentlyTaggedEnumVariant { + pub enum_name: &'static str, + pub variant_index: u32, + pub variant_name: &'static str, +} + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl Serialize for AdjacentlyTaggedEnumVariant { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_unit_variant(self.enum_name, self.variant_index, self.variant_name) + } +} + +// Error when Serialize for a non_exhaustive remote enum encounters a variant +// that is not recognized. +pub struct CannotSerializeVariant(pub T); + +#[cfg_attr(not(no_diagnostic_namespace), diagnostic::do_not_recommend)] +impl Display for CannotSerializeVariant +where + T: Debug, +{ + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "enum variant cannot be serialized: {:?}", self.0) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..3a68bc1a6add8675b03c68aa8ce98b90cdfc1ef0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "bfa790b8e445dc67b7ab94d75adb1a92d6296c9a" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cd26cef0ad88c258f98c7d645fa426f3b6844bfb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml @@ -0,0 +1,147 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.31" +name = "syn" +version = "1.0.109" +authors = ["David Tolnay "] +include = [ + "/benches/**", + "/build.rs", + "/Cargo.toml", + "/LICENSE-APACHE", + "/LICENSE-MIT", + "/README.md", + "/src/**", + "/tests/**", +] +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +readme = "README.md" +keywords = [ + "macros", + "syn", +] +categories = [ + "development-tools::procedural-macro-helpers", + "parser-implementations", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/syn" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "doc_cfg", +] +targets = ["x86_64-unknown-linux-gnu"] + +[package.metadata.playground] +features = [ + "full", + "visit", + "visit-mut", + "fold", + "extra-traits", +] + +[lib] +doc-scrape-examples = false + +[[bench]] +name = "rust" +harness = false +required-features = [ + "full", + "parsing", +] + +[[bench]] +name = "file" +required-features = [ + "full", + "parsing", +] + +[dependencies.proc-macro2] +version = "1.0.46" +default-features = false + +[dependencies.quote] +version = "1.0" +optional = true +default-features = false + +[dependencies.unicode-ident] +version = "1.0" + +[dev-dependencies.anyhow] +version = "1.0" + +[dev-dependencies.automod] +version = "1.0" + +[dev-dependencies.flate2] +version = "1.0" + +[dev-dependencies.insta] +version = "1.0" + +[dev-dependencies.rayon] +version = "1.0" + +[dev-dependencies.ref-cast] +version = "1.0" + +[dev-dependencies.regex] +version = "1.0" + +[dev-dependencies.reqwest] +version = "0.11" +features = ["blocking"] + +[dev-dependencies.syn-test-suite] +version = "0" + +[dev-dependencies.tar] +version = "0.4.16" + +[dev-dependencies.termcolor] +version = "1.0" + +[dev-dependencies.walkdir] +version = "2.1" + +[features] +clone-impls = [] +default = [ + "derive", + "parsing", + "printing", + "clone-impls", + "proc-macro", +] +derive = [] +extra-traits = [] +fold = [] +full = [] +parsing = [] +printing = ["quote"] +proc-macro = [ + "proc-macro2/proc-macro", + "quote/proc-macro", +] +test = ["syn-test-suite/all-features"] +visit = [] +visit-mut = [] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..9046d9131f3893cefab60047e72d9d8802edff58 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/Cargo.toml.orig @@ -0,0 +1,91 @@ +[package] +name = "syn" +version = "1.0.109" # don't forget to update html_root_url and syn.json +authors = ["David Tolnay "] +categories = ["development-tools::procedural-macro-helpers", "parser-implementations"] +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +edition = "2018" +include = [ + "/benches/**", + "/build.rs", + "/Cargo.toml", + "/LICENSE-APACHE", + "/LICENSE-MIT", + "/README.md", + "/src/**", + "/tests/**", +] +keywords = ["macros", "syn"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/syn" +rust-version = "1.31" + +[features] +default = ["derive", "parsing", "printing", "clone-impls", "proc-macro"] +derive = [] +full = [] +parsing = [] +printing = ["quote"] +visit = [] +visit-mut = [] +fold = [] +clone-impls = [] +extra-traits = [] +proc-macro = ["proc-macro2/proc-macro", "quote/proc-macro"] +test = ["syn-test-suite/all-features"] + +[dependencies] +proc-macro2 = { version = "1.0.46", default-features = false } +quote = { version = "1.0", optional = true, default-features = false } +unicode-ident = "1.0" + +[dev-dependencies] +anyhow = "1.0" +automod = "1.0" +flate2 = "1.0" +insta = "1.0" +rayon = "1.0" +ref-cast = "1.0" +regex = "1.0" +reqwest = { version = "0.11", features = ["blocking"] } +syn-test-suite = { version = "0", path = "tests/features" } +tar = "0.4.16" +termcolor = "1.0" +walkdir = "2.1" + +[lib] +doc-scrape-examples = false + +[[bench]] +name = "rust" +harness = false +required-features = ["full", "parsing"] + +[[bench]] +name = "file" +required-features = ["full", "parsing"] + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = ["--cfg", "doc_cfg"] + +[package.metadata.playground] +features = ["full", "visit", "visit-mut", "fold", "extra-traits"] + +[workspace] +members = [ + "dev", + "examples/dump-syntax", + "examples/heapsize/example", + "examples/heapsize/heapsize", + "examples/heapsize/heapsize_derive", + "examples/lazy-static/example", + "examples/lazy-static/lazy-static", + "examples/trace-var/example", + "examples/trace-var/trace-var", + "json", + "tests/crates", + "tests/features", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/README.md new file mode 100644 index 0000000000000000000000000000000000000000..eeef83dd581e5ea3995f861782ca3afe356cd2e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/README.md @@ -0,0 +1,285 @@ +Parser for Rust source code +=========================== + +[github](https://github.com/dtolnay/syn) +[crates.io](https://crates.io/crates/syn) +[docs.rs](https://docs.rs/syn) +[build status](https://github.com/dtolnay/syn/actions?query=branch%3Amaster) + +Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree +of Rust source code. + +Currently this library is geared toward use in Rust procedural macros, but +contains some APIs that may be useful more generally. + +- **Data structures** — Syn provides a complete syntax tree that can represent + any valid Rust source code. The syntax tree is rooted at [`syn::File`] which + represents a full source file, but there are other entry points that may be + useful to procedural macros including [`syn::Item`], [`syn::Expr`] and + [`syn::Type`]. + +- **Derives** — Of particular interest to derive macros is [`syn::DeriveInput`] + which is any of the three legal input items to a derive macro. An example + below shows using this type in a library that can derive implementations of a + user-defined trait. + +- **Parsing** — Parsing in Syn is built around [parser functions] with the + signature `fn(ParseStream) -> Result`. Every syntax tree node defined by + Syn is individually parsable and may be used as a building block for custom + syntaxes, or you may dream up your own brand new syntax without involving any + of our syntax tree types. + +- **Location information** — Every token parsed by Syn is associated with a + `Span` that tracks line and column information back to the source of that + token. These spans allow a procedural macro to display detailed error messages + pointing to all the right places in the user's code. There is an example of + this below. + +- **Feature flags** — Functionality is aggressively feature gated so your + procedural macros enable only what they need, and do not pay in compile time + for all the rest. + +[`syn::File`]: https://docs.rs/syn/1.0/syn/struct.File.html +[`syn::Item`]: https://docs.rs/syn/1.0/syn/enum.Item.html +[`syn::Expr`]: https://docs.rs/syn/1.0/syn/enum.Expr.html +[`syn::Type`]: https://docs.rs/syn/1.0/syn/enum.Type.html +[`syn::DeriveInput`]: https://docs.rs/syn/1.0/syn/struct.DeriveInput.html +[parser functions]: https://docs.rs/syn/1.0/syn/parse/index.html + +*Version requirement: Syn supports rustc 1.31 and up.* + +[*Release notes*](https://github.com/dtolnay/syn/releases) + +
+ +## Resources + +The best way to learn about procedural macros is by writing some. Consider +working through [this procedural macro workshop][workshop] to get familiar with +the different types of procedural macros. The workshop contains relevant links +into the Syn documentation as you work through each project. + +[workshop]: https://github.com/dtolnay/proc-macro-workshop + +
+ +## Example of a derive macro + +The canonical derive macro using Syn looks like this. We write an ordinary Rust +function tagged with a `proc_macro_derive` attribute and the name of the trait +we are deriving. Any time that derive appears in the user's code, the Rust +compiler passes their data structure as tokens into our macro. We get to execute +arbitrary Rust code to figure out what to do with those tokens, then hand some +tokens back to the compiler to compile into the user's crate. + +[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html + +```toml +[dependencies] +syn = "1.0" +quote = "1.0" + +[lib] +proc-macro = true +``` + +```rust +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(MyMacro)] +pub fn my_macro(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + + // Build the output, possibly using quasi-quotation + let expanded = quote! { + // ... + }; + + // Hand the output tokens back to the compiler + TokenStream::from(expanded) +} +``` + +The [`heapsize`] example directory shows a complete working implementation of a +derive macro. It works on any Rust compiler 1.31+. The example derives a +`HeapSize` trait which computes an estimate of the amount of heap memory owned +by a value. + +[`heapsize`]: examples/heapsize + +```rust +pub trait HeapSize { + /// Total number of bytes of heap memory owned by `self`. + fn heap_size_of_children(&self) -> usize; +} +``` + +The derive macro allows users to write `#[derive(HeapSize)]` on data structures +in their program. + +```rust +#[derive(HeapSize)] +struct Demo<'a, T: ?Sized> { + a: Box, + b: u8, + c: &'a str, + d: String, +} +``` + +
+ +## Spans and error reporting + +The token-based procedural macro API provides great control over where the +compiler's error messages are displayed in user code. Consider the error the +user sees if one of their field types does not implement `HeapSize`. + +```rust +#[derive(HeapSize)] +struct Broken { + ok: String, + bad: std::thread::Thread, +} +``` + +By tracking span information all the way through the expansion of a procedural +macro as shown in the `heapsize` example, token-based macros in Syn are able to +trigger errors that directly pinpoint the source of the problem. + +```console +error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied + --> src/main.rs:7:5 + | +7 | bad: std::thread::Thread, + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` +``` + +
+ +## Parsing a custom syntax + +The [`lazy-static`] example directory shows the implementation of a +`functionlike!(...)` procedural macro in which the input tokens are parsed using +Syn's parsing API. + +[`lazy-static`]: examples/lazy-static + +The example reimplements the popular `lazy_static` crate from crates.io as a +procedural macro. + +```rust +lazy_static! { + static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); +} +``` + +The implementation shows how to trigger custom warnings and error messages on +the macro input. + +```console +warning: come on, pick a more creative name + --> src/main.rs:10:16 + | +10 | static ref FOO: String = "lazy_static".to_owned(); + | ^^^ +``` + +
+ +## Testing + +When testing macros, we often care not just that the macro can be used +successfully but also that when the macro is provided with invalid input it +produces maximally helpful error messages. Consider using the [`trybuild`] crate +to write tests for errors that are emitted by your macro or errors detected by +the Rust compiler in the expanded code following misuse of the macro. Such tests +help avoid regressions from later refactors that mistakenly make an error no +longer trigger or be less helpful than it used to be. + +[`trybuild`]: https://github.com/dtolnay/trybuild + +
+ +## Debugging + +When developing a procedural macro it can be helpful to look at what the +generated code looks like. Use `cargo rustc -- -Zunstable-options +--pretty=expanded` or the [`cargo expand`] subcommand. + +[`cargo expand`]: https://github.com/dtolnay/cargo-expand + +To show the expanded code for some crate that uses your procedural macro, run +`cargo expand` from that crate. To show the expanded code for one of your own +test cases, run `cargo expand --test the_test_case` where the last argument is +the name of the test file without the `.rs` extension. + +This write-up by Brandon W Maister discusses debugging in more detail: +[Debugging Rust's new Custom Derive system][debugging]. + +[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ + +
+ +## Optional features + +Syn puts a lot of functionality behind optional features in order to optimize +compile time for the most common use cases. The following features are +available. + +- **`derive`** *(enabled by default)* — Data structures for representing the + possible input to a derive macro, including structs and enums and types. +- **`full`** — Data structures for representing the syntax tree of all valid + Rust source code, including items and expressions. +- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a + syntax tree node of a chosen type. +- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as + tokens of Rust source code. +- **`visit`** — Trait for traversing a syntax tree. +- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. +- **`fold`** — Trait for transforming an owned syntax tree. +- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree + types. +- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree + types. +- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic + library libproc_macro from rustc toolchain. + +
+ +## Proc macro shim + +Syn operates on the token representation provided by the [proc-macro2] crate +from crates.io rather than using the compiler's built in proc-macro crate +directly. This enables code using Syn to execute outside of the context of a +procedural macro, such as in unit tests or build.rs, and we avoid needing +incompatible ecosystems for proc macros vs non-macro use cases. + +In general all of your code should be written against proc-macro2 rather than +proc-macro. The one exception is in the signatures of procedural macro entry +points, which are required by the language to use `proc_macro::TokenStream`. + +The proc-macro2 crate will automatically detect and use the compiler's data +structures when a procedural macro is active. + +[proc-macro2]: https://docs.rs/proc-macro2/1.0/proc_macro2/ + +
+ +#### License + + +Licensed under either of
Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a2c077bf0e17b992e375e67374f4b5eb3439751 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-1.0.109/build.rs @@ -0,0 +1,51 @@ +use std::env; +use std::process::Command; +use std::str; + +// The rustc-cfg strings below are *not* public API. Please let us know by +// opening a GitHub issue if your build environment requires some way to enable +// these cfgs other than by executing our build script. +fn main() { + let compiler = match rustc_version() { + Some(compiler) => compiler, + None => return, + }; + + if compiler.minor < 36 { + println!("cargo:rustc-cfg=syn_omit_await_from_token_macro"); + } + + if compiler.minor < 39 { + println!("cargo:rustc-cfg=syn_no_const_vec_new"); + } + + if compiler.minor < 40 { + println!("cargo:rustc-cfg=syn_no_non_exhaustive"); + } + + if compiler.minor < 56 { + println!("cargo:rustc-cfg=syn_no_negative_literal_parse"); + } + + if !compiler.nightly { + println!("cargo:rustc-cfg=syn_disable_nightly_tests"); + } +} + +struct Compiler { + minor: u32, + nightly: bool, +} + +fn rustc_version() -> Option { + let rustc = env::var_os("RUSTC")?; + let output = Command::new(rustc).arg("--version").output().ok()?; + let version = str::from_utf8(&output.stdout).ok()?; + let mut pieces = version.split('.'); + if pieces.next() != Some("rustc 1") { + return None; + } + let minor = pieces.next()?.parse().ok()?; + let nightly = version.contains("nightly") || version.ends_with("-dev"); + Some(Compiler { minor, nightly }) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..d7f3af32ad8701821bc91c301b982303c909a5f6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "211e0dff48b030dbd15e17cb7efda8c9067a26cc" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..2d2070505b535e5fd434a3b78aaa9b20e53f7617 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.lock @@ -0,0 +1,2093 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "automod" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebb4bd301db2e2ca1f5be131c24eb8ebf2d9559bc3744419e93baf8ddea7e670" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "aws-lc-rs" +version = "1.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "bumpalo" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "cc" +version = "1.2.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "filetime" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.60.2", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "system-configuration", + "tokio", + "tower-service", + "tracing", + "windows-registry", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "insta" +version = "1.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b66886d14d18d420ab5052cbff544fc5d34d0b2cdd35eb5976aaa10a4a472e5" +dependencies = [ + "console", + "once_cell", + "similar", + "tempfile", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.179" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5a2d376baa530d1238d133232d15e239abad80d05838b4b59354e5268af431f" + +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-probe" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +dependencies = [ + "bitflags", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "reqwest" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "mime", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +dependencies = [ + "aws-lc-rs", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.113" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678faa00651c9eb72dd2020cbdf275d92eccb2400d568e419efdd64838145cb4" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.114" +dependencies = [ + "anyhow", + "automod", + "flate2", + "insta", + "proc-macro2", + "quote", + "rayon", + "ref-cast", + "reqwest", + "rustversion", + "syn-test-suite", + "tar", + "termcolor", + "unicode-ident", + "walkdir", +] + +[[package]] +name = "syn-test-suite" +version = "0.0.0+test" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d661992f60e67c8bdd9a7d6360d30d1301f5783abf7d59933844f656762eb5" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "tempfile" +version = "3.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +dependencies = [ + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.113", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "xattr" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" +dependencies = [ + "libc", + "rustix", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fabae64378cb18147bb18bca364e63bdbe72a0ffe4adf0addfec8aa166b2c56" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9c2d862265a8bb4471d87e033e730f536e2a285cc7cb05dbce09a2a97075f90" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.113", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..73518ffee4bd8fca8379347d4ea9f197305c1c28 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml @@ -0,0 +1,272 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.68" +name = "syn" +version = "2.0.114" +authors = ["David Tolnay "] +build = false +include = [ + "/benches/**", + "/Cargo.toml", + "/LICENSE-APACHE", + "/LICENSE-MIT", + "/README.md", + "/src/**", + "/tests/**", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +readme = "README.md" +keywords = [ + "macros", + "syn", +] +categories = [ + "development-tools::procedural-macro-helpers", + "parser-implementations", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/syn" + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extend-css=src/gen/token.css", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", + "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", +] + +[package.metadata.playground] +features = [ + "full", + "visit", + "visit-mut", + "fold", + "extra-traits", +] + +[features] +clone-impls = [] +default = [ + "derive", + "parsing", + "printing", + "clone-impls", + "proc-macro", +] +derive = [] +extra-traits = [] +fold = [] +full = [] +parsing = [] +printing = ["dep:quote"] +proc-macro = [ + "proc-macro2/proc-macro", + "quote?/proc-macro", +] +test = ["syn-test-suite/all-features"] +visit = [] +visit-mut = [] + +[lib] +name = "syn" +path = "src/lib.rs" + +[[test]] +name = "regression" +path = "tests/regression.rs" + +[[test]] +name = "test_asyncness" +path = "tests/test_asyncness.rs" + +[[test]] +name = "test_attribute" +path = "tests/test_attribute.rs" + +[[test]] +name = "test_derive_input" +path = "tests/test_derive_input.rs" + +[[test]] +name = "test_expr" +path = "tests/test_expr.rs" + +[[test]] +name = "test_generics" +path = "tests/test_generics.rs" + +[[test]] +name = "test_grouping" +path = "tests/test_grouping.rs" + +[[test]] +name = "test_ident" +path = "tests/test_ident.rs" + +[[test]] +name = "test_item" +path = "tests/test_item.rs" + +[[test]] +name = "test_lit" +path = "tests/test_lit.rs" + +[[test]] +name = "test_meta" +path = "tests/test_meta.rs" + +[[test]] +name = "test_parse_buffer" +path = "tests/test_parse_buffer.rs" + +[[test]] +name = "test_parse_quote" +path = "tests/test_parse_quote.rs" + +[[test]] +name = "test_parse_stream" +path = "tests/test_parse_stream.rs" + +[[test]] +name = "test_pat" +path = "tests/test_pat.rs" + +[[test]] +name = "test_path" +path = "tests/test_path.rs" + +[[test]] +name = "test_precedence" +path = "tests/test_precedence.rs" + +[[test]] +name = "test_punctuated" +path = "tests/test_punctuated.rs" + +[[test]] +name = "test_receiver" +path = "tests/test_receiver.rs" + +[[test]] +name = "test_round_trip" +path = "tests/test_round_trip.rs" + +[[test]] +name = "test_shebang" +path = "tests/test_shebang.rs" + +[[test]] +name = "test_size" +path = "tests/test_size.rs" + +[[test]] +name = "test_stmt" +path = "tests/test_stmt.rs" + +[[test]] +name = "test_token_trees" +path = "tests/test_token_trees.rs" + +[[test]] +name = "test_ty" +path = "tests/test_ty.rs" + +[[test]] +name = "test_unparenthesize" +path = "tests/test_unparenthesize.rs" + +[[test]] +name = "test_visibility" +path = "tests/test_visibility.rs" + +[[test]] +name = "zzz_stable" +path = "tests/zzz_stable.rs" + +[[bench]] +name = "file" +path = "benches/file.rs" +required-features = [ + "full", + "parsing", +] + +[[bench]] +name = "rust" +path = "benches/rust.rs" +harness = false +required-features = [ + "full", + "parsing", +] + +[dependencies.proc-macro2] +version = "1.0.91" +default-features = false + +[dependencies.quote] +version = "1.0.35" +optional = true +default-features = false + +[dependencies.unicode-ident] +version = "1" + +[dev-dependencies.anyhow] +version = "1" + +[dev-dependencies.automod] +version = "1" + +[dev-dependencies.insta] +version = "1" + +[dev-dependencies.ref-cast] +version = "1" + +[dev-dependencies.rustversion] +version = "1" + +[dev-dependencies.syn-test-suite] +version = "0" + +[dev-dependencies.termcolor] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.flate2] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.rayon] +version = "1" + +[target."cfg(not(miri))".dev-dependencies.reqwest] +version = "0.13" +features = ["blocking"] + +[target."cfg(not(miri))".dev-dependencies.tar] +version = "0.4.16" + +[target."cfg(not(miri))".dev-dependencies.walkdir] +version = "2.3.2" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..62d7cdc7a728776471cec6b162b27e06cbe8e32f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/Cargo.toml.orig @@ -0,0 +1,95 @@ +[package] +name = "syn" +version = "2.0.114" +authors = ["David Tolnay "] +categories = ["development-tools::procedural-macro-helpers", "parser-implementations"] +description = "Parser for Rust source code" +documentation = "https://docs.rs/syn" +edition = "2021" +include = [ + "/benches/**", + "/Cargo.toml", + "/LICENSE-APACHE", + "/LICENSE-MIT", + "/README.md", + "/src/**", + "/tests/**", +] +keywords = ["macros", "syn"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/syn" +rust-version = "1.68" + +[features] +default = ["derive", "parsing", "printing", "clone-impls", "proc-macro"] +derive = [] +full = [] +parsing = [] +printing = ["dep:quote"] +visit = [] +visit-mut = [] +fold = [] +clone-impls = [] +extra-traits = [] +proc-macro = ["proc-macro2/proc-macro", "quote?/proc-macro"] +test = ["syn-test-suite/all-features"] + +[dependencies] +proc-macro2 = { version = "1.0.91", default-features = false } +quote = { version = "1.0.35", optional = true, default-features = false } +unicode-ident = "1" + +[dev-dependencies] +anyhow = "1" +automod = "1" +insta = "1" +ref-cast = "1" +rustversion = "1" +syn-test-suite = { version = "0", path = "tests/features" } +termcolor = "1" + +[target.'cfg(not(miri))'.dev-dependencies] +flate2 = "1" +rayon = "1" +reqwest = { version = "0.13", features = ["blocking"] } +tar = "0.4.16" +walkdir = "2.3.2" + +[[bench]] +name = "rust" +harness = false +required-features = ["full", "parsing"] + +[[bench]] +name = "file" +required-features = ["full", "parsing"] + +[package.metadata.docs.rs] +all-features = true +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = [ + "--generate-link-to-definition", + "--generate-macro-expansion", + "--extend-css=src/gen/token.css", + "--extern-html-root-url=core=https://doc.rust-lang.org", + "--extern-html-root-url=alloc=https://doc.rust-lang.org", + "--extern-html-root-url=std=https://doc.rust-lang.org", + "--extern-html-root-url=proc_macro=https://doc.rust-lang.org", +] + +[package.metadata.playground] +features = ["full", "visit", "visit-mut", "fold", "extra-traits"] + +[workspace] +members = [ + "dev", + "examples/dump-syntax", + "examples/heapsize/example", + "examples/heapsize/heapsize", + "examples/heapsize/heapsize_derive", + "examples/lazy-static/example", + "examples/lazy-static/lazy-static", + "examples/trace-var/example", + "examples/trace-var/trace-var", + "tests/features", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..1b5ec8b78e237b5c3b3d812a7c0a6589d0f7161d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9ae472ea6699740f96c42e7ca9bb86d359fdeaab --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/syn-2.0.114/README.md @@ -0,0 +1,282 @@ +Parser for Rust source code +=========================== + +[github](https://github.com/dtolnay/syn) +[crates.io](https://crates.io/crates/syn) +[docs.rs](https://docs.rs/syn) +[build status](https://github.com/dtolnay/syn/actions?query=branch%3Amaster) + +Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree +of Rust source code. + +Currently this library is geared toward use in Rust procedural macros, but +contains some APIs that may be useful more generally. + +- **Data structures** — Syn provides a complete syntax tree that can represent + any valid Rust source code. The syntax tree is rooted at [`syn::File`] which + represents a full source file, but there are other entry points that may be + useful to procedural macros including [`syn::Item`], [`syn::Expr`] and + [`syn::Type`]. + +- **Derives** — Of particular interest to derive macros is [`syn::DeriveInput`] + which is any of the three legal input items to a derive macro. An example + below shows using this type in a library that can derive implementations of a + user-defined trait. + +- **Parsing** — Parsing in Syn is built around [parser functions] with the + signature `fn(ParseStream) -> Result`. Every syntax tree node defined by + Syn is individually parsable and may be used as a building block for custom + syntaxes, or you may dream up your own brand new syntax without involving any + of our syntax tree types. + +- **Location information** — Every token parsed by Syn is associated with a + `Span` that tracks line and column information back to the source of that + token. These spans allow a procedural macro to display detailed error messages + pointing to all the right places in the user's code. There is an example of + this below. + +- **Feature flags** — Functionality is aggressively feature gated so your + procedural macros enable only what they need, and do not pay in compile time + for all the rest. + +[`syn::File`]: https://docs.rs/syn/2.0/syn/struct.File.html +[`syn::Item`]: https://docs.rs/syn/2.0/syn/enum.Item.html +[`syn::Expr`]: https://docs.rs/syn/2.0/syn/enum.Expr.html +[`syn::Type`]: https://docs.rs/syn/2.0/syn/enum.Type.html +[`syn::DeriveInput`]: https://docs.rs/syn/2.0/syn/struct.DeriveInput.html +[parser functions]: https://docs.rs/syn/2.0/syn/parse/index.html + +[*Release notes*](https://github.com/dtolnay/syn/releases) + +
+ +## Resources + +The best way to learn about procedural macros is by writing some. Consider +working through [this procedural macro workshop][workshop] to get familiar with +the different types of procedural macros. The workshop contains relevant links +into the Syn documentation as you work through each project. + +[workshop]: https://github.com/dtolnay/proc-macro-workshop + +
+ +## Example of a derive macro + +The canonical derive macro using Syn looks like this. We write an ordinary Rust +function tagged with a `proc_macro_derive` attribute and the name of the trait +we are deriving. Any time that derive appears in the user's code, the Rust +compiler passes their data structure as tokens into our macro. We get to execute +arbitrary Rust code to figure out what to do with those tokens, then hand some +tokens back to the compiler to compile into the user's crate. + +[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html + +```toml +[dependencies] +syn = "2.0" +quote = "1.0" + +[lib] +proc-macro = true +``` + +```rust +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(MyMacro)] +pub fn my_macro(input: TokenStream) -> TokenStream { + // Parse the input tokens into a syntax tree + let input = parse_macro_input!(input as DeriveInput); + + // Build the output, possibly using quasi-quotation + let expanded = quote! { + // ... + }; + + // Hand the output tokens back to the compiler + TokenStream::from(expanded) +} +``` + +The [`heapsize`] example directory shows a complete working implementation of a +derive macro. The example derives a `HeapSize` trait which computes an estimate +of the amount of heap memory owned by a value. + +[`heapsize`]: examples/heapsize + +```rust +pub trait HeapSize { + /// Total number of bytes of heap memory owned by `self`. + fn heap_size_of_children(&self) -> usize; +} +``` + +The derive macro allows users to write `#[derive(HeapSize)]` on data structures +in their program. + +```rust +#[derive(HeapSize)] +struct Demo<'a, T: ?Sized> { + a: Box, + b: u8, + c: &'a str, + d: String, +} +``` + +
+ +## Spans and error reporting + +The token-based procedural macro API provides great control over where the +compiler's error messages are displayed in user code. Consider the error the +user sees if one of their field types does not implement `HeapSize`. + +```rust +#[derive(HeapSize)] +struct Broken { + ok: String, + bad: std::thread::Thread, +} +``` + +By tracking span information all the way through the expansion of a procedural +macro as shown in the `heapsize` example, token-based macros in Syn are able to +trigger errors that directly pinpoint the source of the problem. + +```console +error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied + --> src/main.rs:7:5 + | +7 | bad: std::thread::Thread, + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` +``` + +
+ +## Parsing a custom syntax + +The [`lazy-static`] example directory shows the implementation of a +`functionlike!(...)` procedural macro in which the input tokens are parsed using +Syn's parsing API. + +[`lazy-static`]: examples/lazy-static + +The example reimplements the popular `lazy_static` crate from crates.io as a +procedural macro. + +```rust +lazy_static! { + static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); +} +``` + +The implementation shows how to trigger custom warnings and error messages on +the macro input. + +```console +warning: come on, pick a more creative name + --> src/main.rs:10:16 + | +10 | static ref FOO: String = "lazy_static".to_owned(); + | ^^^ +``` + +
+ +## Testing + +When testing macros, we often care not just that the macro can be used +successfully but also that when the macro is provided with invalid input it +produces maximally helpful error messages. Consider using the [`trybuild`] crate +to write tests for errors that are emitted by your macro or errors detected by +the Rust compiler in the expanded code following misuse of the macro. Such tests +help avoid regressions from later refactors that mistakenly make an error no +longer trigger or be less helpful than it used to be. + +[`trybuild`]: https://github.com/dtolnay/trybuild + +
+ +## Debugging + +When developing a procedural macro it can be helpful to look at what the +generated code looks like. Use `cargo rustc -- -Zunstable-options +--pretty=expanded` or the [`cargo expand`] subcommand. + +[`cargo expand`]: https://github.com/dtolnay/cargo-expand + +To show the expanded code for some crate that uses your procedural macro, run +`cargo expand` from that crate. To show the expanded code for one of your own +test cases, run `cargo expand --test the_test_case` where the last argument is +the name of the test file without the `.rs` extension. + +This write-up by Brandon W Maister discusses debugging in more detail: +[Debugging Rust's new Custom Derive system][debugging]. + +[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ + +
+ +## Optional features + +Syn puts a lot of functionality behind optional features in order to optimize +compile time for the most common use cases. The following features are +available. + +- **`derive`** *(enabled by default)* — Data structures for representing the + possible input to a derive macro, including structs and enums and types. +- **`full`** — Data structures for representing the syntax tree of all valid + Rust source code, including items and expressions. +- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a + syntax tree node of a chosen type. +- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as + tokens of Rust source code. +- **`visit`** — Trait for traversing a syntax tree. +- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. +- **`fold`** — Trait for transforming an owned syntax tree. +- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree + types. +- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree + types. +- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic + library libproc_macro from rustc toolchain. + +
+ +## Proc macro shim + +Syn operates on the token representation provided by the [proc-macro2] crate +from crates.io rather than using the compiler's built in proc-macro crate +directly. This enables code using Syn to execute outside of the context of a +procedural macro, such as in unit tests or build.rs, and we avoid needing +incompatible ecosystems for proc macros vs non-macro use cases. + +In general all of your code should be written against proc-macro2 rather than +proc-macro. The one exception is in the signatures of procedural macro entry +points, which are required by the language to use `proc_macro::TokenStream`. + +The proc-macro2 crate will automatically detect and use the compiler's data +structures when a procedural macro is active. + +[proc-macro2]: https://docs.rs/proc-macro2/1.0/proc_macro2/ + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/add_extension.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/add_extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..095646df373783a96a89dbcd377fe08fda911ea7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/add_extension.rs @@ -0,0 +1,167 @@ +//! Middleware that clones a value into each request's [extensions]. +//! +//! [extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +//! +//! # Example +//! +//! ``` +//! use tower_http::add_extension::AddExtensionLayer; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{sync::Arc, convert::Infallible}; +//! +//! # struct DatabaseConnectionPool; +//! # impl DatabaseConnectionPool { +//! # fn new() -> DatabaseConnectionPool { DatabaseConnectionPool } +//! # } +//! # +//! // Shared state across all request handlers --- in this case, a pool of database connections. +//! struct State { +//! pool: DatabaseConnectionPool, +//! } +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // Grab the state from the request extensions. +//! let state = req.extensions().get::>().unwrap(); +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Construct the shared state. +//! let state = State { +//! pool: DatabaseConnectionPool::new(), +//! }; +//! +//! let mut service = ServiceBuilder::new() +//! // Share an `Arc` with all requests. +//! .layer(AddExtensionLayer::new(Arc::new(state))) +//! .service_fn(handle); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use std::task::{Context, Poll}; +use tower_layer::Layer; +use tower_service::Service; + +/// [`Layer`] for adding some shareable value to [request extensions]. +/// +/// See the [module docs](crate::add_extension) for more details. +/// +/// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +#[derive(Clone, Copy, Debug)] +pub struct AddExtensionLayer { + value: T, +} + +impl AddExtensionLayer { + /// Create a new [`AddExtensionLayer`]. + pub fn new(value: T) -> Self { + AddExtensionLayer { value } + } +} + +impl Layer for AddExtensionLayer +where + T: Clone, +{ + type Service = AddExtension; + + fn layer(&self, inner: S) -> Self::Service { + AddExtension { + inner, + value: self.value.clone(), + } + } +} + +/// Middleware for adding some shareable value to [request extensions]. +/// +/// See the [module docs](crate::add_extension) for more details. +/// +/// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html +#[derive(Clone, Copy, Debug)] +pub struct AddExtension { + inner: S, + value: T, +} + +impl AddExtension { + /// Create a new [`AddExtension`]. + pub fn new(inner: S, value: T) -> Self { + Self { inner, value } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `AddExtension` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(value: T) -> AddExtensionLayer { + AddExtensionLayer::new(value) + } +} + +impl Service> for AddExtension +where + S: Service, Response = Response>, + T: Clone + Send + Sync + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + req.extensions_mut().insert(self.value.clone()); + self.inner.call(req) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::Response; + use std::{convert::Infallible, sync::Arc}; + use tower::{service_fn, ServiceBuilder, ServiceExt}; + + struct State(i32); + + #[tokio::test] + async fn basic() { + let state = Arc::new(State(1)); + + let svc = ServiceBuilder::new() + .layer(AddExtensionLayer::new(state)) + .service(service_fn(|req: Request| async move { + let state = req.extensions().get::>().unwrap(); + Ok::<_, Infallible>(Response::new(state.0)) + })); + + let res = svc + .oneshot(Request::new(Body::empty())) + .await + .unwrap() + .into_body(); + + assert_eq!(1, res); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/add_authorization.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/add_authorization.rs new file mode 100644 index 0000000000000000000000000000000000000000..246c13b6c3f6dab3a0c45fe2e3cdf2fade804060 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/add_authorization.rs @@ -0,0 +1,267 @@ +//! Add authorization to requests using the [`Authorization`] header. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeader, ValidateRequestHeaderLayer}; +//! use tower_http::auth::AddAuthorizationLayer; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! # async fn handle(request: Request>) -> Result>, BoxError> { +//! # Ok(Response::new(Full::default())) +//! # } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! # let service_that_requires_auth = ValidateRequestHeader::basic( +//! # tower::service_fn(handle), +//! # "username", +//! # "password", +//! # ); +//! let mut client = ServiceBuilder::new() +//! // Use basic auth with the given username and password +//! .layer(AddAuthorizationLayer::basic("username", "password")) +//! .service(service_that_requires_auth); +//! +//! // Make a request, we don't have to add the `Authorization` header manually +//! let response = client +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! # Ok(()) +//! # } +//! ``` + +use base64::Engine as _; +use http::{HeaderValue, Request, Response}; +use std::{ + convert::TryFrom, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +const BASE64: base64::engine::GeneralPurpose = base64::engine::general_purpose::STANDARD; + +/// Layer that applies [`AddAuthorization`] which adds authorization to all requests using the +/// [`Authorization`] header. +/// +/// See the [module docs](crate::auth::add_authorization) for an example. +/// +/// You can also use [`SetRequestHeader`] if you have a use case that isn't supported by this +/// middleware. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +/// [`SetRequestHeader`]: crate::set_header::SetRequestHeader +#[derive(Debug, Clone)] +pub struct AddAuthorizationLayer { + value: HeaderValue, +} + +impl AddAuthorizationLayer { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header will be set to `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(username: &str, password: &str) -> Self { + let encoded = BASE64.encode(format!("{}:{}", username, password)); + let value = HeaderValue::try_from(format!("Basic {}", encoded)).unwrap(); + Self { value } + } + + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header will be set to `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(token: &str) -> Self { + let value = + HeaderValue::try_from(format!("Bearer {}", token)).expect("token is not valid header"); + Self { value } + } + + /// Mark the header as [sensitive]. + /// + /// This can for example be used to hide the header value from logs. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + #[allow(clippy::wrong_self_convention)] + pub fn as_sensitive(mut self, sensitive: bool) -> Self { + self.value.set_sensitive(sensitive); + self + } +} + +impl Layer for AddAuthorizationLayer { + type Service = AddAuthorization; + + fn layer(&self, inner: S) -> Self::Service { + AddAuthorization { + inner, + value: self.value.clone(), + } + } +} + +/// Middleware that adds authorization all requests using the [`Authorization`] header. +/// +/// See the [module docs](crate::auth::add_authorization) for an example. +/// +/// You can also use [`SetRequestHeader`] if you have a use case that isn't supported by this +/// middleware. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +/// [`SetRequestHeader`]: crate::set_header::SetRequestHeader +#[derive(Debug, Clone)] +pub struct AddAuthorization { + inner: S, + value: HeaderValue, +} + +impl AddAuthorization { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header will be set to `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(inner: S, username: &str, password: &str) -> Self { + AddAuthorizationLayer::basic(username, password).layer(inner) + } + + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header will be set to `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(inner: S, token: &str) -> Self { + AddAuthorizationLayer::bearer(token).layer(inner) + } + + define_inner_service_accessors!(); + + /// Mark the header as [sensitive]. + /// + /// This can for example be used to hide the header value from logs. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + #[allow(clippy::wrong_self_convention)] + pub fn as_sensitive(mut self, sensitive: bool) -> Self { + self.value.set_sensitive(sensitive); + self + } +} + +impl Service> for AddAuthorization +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + req.headers_mut() + .insert(http::header::AUTHORIZATION, self.value.clone()); + self.inner.call(req) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use crate::validate_request::ValidateRequestHeaderLayer; + use http::{Response, StatusCode}; + use std::convert::Infallible; + use tower::{BoxError, Service, ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn basic() { + // service that requires auth for all requests + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + // make a client that adds auth + let mut client = AddAuthorization::basic(svc, "foo", "bar"); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn token() { + // service that requires auth for all requests + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foo")) + .service_fn(echo); + + // make a client that adds auth + let mut client = AddAuthorization::bearer(svc, "foo"); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn making_header_sensitive() { + let svc = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foo")) + .service_fn(|request: Request| async move { + let auth = request.headers().get(http::header::AUTHORIZATION).unwrap(); + assert!(auth.is_sensitive()); + + Ok::<_, Infallible>(Response::new(Body::empty())) + }); + + let mut client = AddAuthorization::bearer(svc, "foo").as_sensitive(true); + + let res = client + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/async_require_authorization.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/async_require_authorization.rs new file mode 100644 index 0000000000000000000000000000000000000000..fda9abea3db1a3426c65961399d00e450223a417 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/async_require_authorization.rs @@ -0,0 +1,385 @@ +//! Authorize requests using the [`Authorization`] header asynchronously. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::auth::{AsyncRequireAuthorizationLayer, AsyncAuthorizeRequest}; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use futures_core::future::BoxFuture; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! #[derive(Clone, Copy)] +//! struct MyAuth; +//! +//! impl AsyncAuthorizeRequest for MyAuth +//! where +//! B: Send + Sync + 'static, +//! { +//! type RequestBody = B; +//! type ResponseBody = Full; +//! type Future = BoxFuture<'static, Result, Response>>; +//! +//! fn authorize(&mut self, mut request: Request) -> Self::Future { +//! Box::pin(async { +//! if let Some(user_id) = check_auth(&request).await { +//! // Set `user_id` as a request extension so it can be accessed by other +//! // services down the stack. +//! request.extensions_mut().insert(user_id); +//! +//! Ok(request) +//! } else { +//! let unauthorized_response = Response::builder() +//! .status(StatusCode::UNAUTHORIZED) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! Err(unauthorized_response) +//! } +//! }) +//! } +//! } +//! +//! async fn check_auth(request: &Request) -> Option { +//! // ... +//! # None +//! } +//! +//! #[derive(Debug, Clone)] +//! struct UserId(String); +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! // Access the `UserId` that was set in `on_authorized`. If `handle` gets called the +//! // request was authorized and `UserId` will be present. +//! let user_id = request +//! .extensions() +//! .get::() +//! .expect("UserId will be there if request was authorized"); +//! +//! println!("request from {:?}", user_id); +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! // Authorize requests using `MyAuth` +//! .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! Or using a closure: +//! +//! ``` +//! use tower_http::auth::{AsyncRequireAuthorizationLayer, AsyncAuthorizeRequest}; +//! use http::{Request, Response, StatusCode}; +//! use tower::{Service, ServiceExt, ServiceBuilder, BoxError}; +//! use futures_core::future::BoxFuture; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! async fn check_auth(request: &Request) -> Option { +//! // ... +//! # None +//! } +//! +//! #[derive(Debug)] +//! struct UserId(String); +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! # todo!(); +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! .layer(AsyncRequireAuthorizationLayer::new(|request: Request>| async move { +//! if let Some(user_id) = check_auth(&request).await { +//! Ok(request) +//! } else { +//! let unauthorized_response = Response::builder() +//! .status(StatusCode::UNAUTHORIZED) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! Err(unauthorized_response) +//! } +//! })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + mem, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`AsyncRequireAuthorization`] which authorizes all requests using the +/// [`Authorization`] header. +/// +/// See the [module docs](crate::auth::async_require_authorization) for an example. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +#[derive(Debug, Clone)] +pub struct AsyncRequireAuthorizationLayer { + auth: T, +} + +impl AsyncRequireAuthorizationLayer { + /// Authorize requests using a custom scheme. + pub fn new(auth: T) -> AsyncRequireAuthorizationLayer { + Self { auth } + } +} + +impl Layer for AsyncRequireAuthorizationLayer +where + T: Clone, +{ + type Service = AsyncRequireAuthorization; + + fn layer(&self, inner: S) -> Self::Service { + AsyncRequireAuthorization::new(inner, self.auth.clone()) + } +} + +/// Middleware that authorizes all requests using the [`Authorization`] header. +/// +/// See the [module docs](crate::auth::async_require_authorization) for an example. +/// +/// [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +#[derive(Clone, Debug)] +pub struct AsyncRequireAuthorization { + inner: S, + auth: T, +} + +impl AsyncRequireAuthorization { + define_inner_service_accessors!(); +} + +impl AsyncRequireAuthorization { + /// Authorize requests using a custom scheme. + /// + /// The `Authorization` header is required to have the value provided. + pub fn new(inner: S, auth: T) -> AsyncRequireAuthorization { + Self { inner, auth } + } + + /// Returns a new [`Layer`] that wraps services with an [`AsyncRequireAuthorizationLayer`] + /// middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(auth: T) -> AsyncRequireAuthorizationLayer { + AsyncRequireAuthorizationLayer::new(auth) + } +} + +impl Service> for AsyncRequireAuthorization +where + Auth: AsyncAuthorizeRequest, + S: Service, Response = Response> + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let mut inner = self.inner.clone(); + let authorize = self.auth.authorize(req); + // mem::swap due to https://docs.rs/tower/latest/tower/trait.Service.html#be-careful-when-cloning-inner-services + mem::swap(&mut self.inner, &mut inner); + + ResponseFuture { + state: State::Authorize { authorize }, + service: inner, + } + } +} + +pin_project! { + /// Response future for [`AsyncRequireAuthorization`]. + pub struct ResponseFuture + where + Auth: AsyncAuthorizeRequest, + S: Service>, + { + #[pin] + state: State, + service: S, + } +} + +pin_project! { + #[project = StateProj] + enum State { + Authorize { + #[pin] + authorize: A, + }, + Authorized { + #[pin] + fut: SFut, + }, + } +} + +impl Future for ResponseFuture +where + Auth: AsyncAuthorizeRequest, + S: Service, Response = Response>, +{ + type Output = Result, S::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + loop { + match this.state.as_mut().project() { + StateProj::Authorize { authorize } => { + let auth = ready!(authorize.poll(cx)); + match auth { + Ok(req) => { + let fut = this.service.call(req); + this.state.set(State::Authorized { fut }) + } + Err(res) => { + return Poll::Ready(Ok(res)); + } + }; + } + StateProj::Authorized { fut } => { + return fut.poll(cx); + } + } + } + } +} + +/// Trait for authorizing requests. +pub trait AsyncAuthorizeRequest { + /// The type of request body returned by `authorize`. + /// + /// Set this to `B` unless you need to change the request body type. + type RequestBody; + + /// The body type used for responses to unauthorized requests. + type ResponseBody; + + /// The Future type returned by `authorize` + type Future: Future, Response>>; + + /// Authorize the request. + /// + /// If the future resolves to `Ok(request)` then the request is allowed through, otherwise not. + fn authorize(&mut self, request: Request) -> Self::Future; +} + +impl AsyncAuthorizeRequest for F +where + F: FnMut(Request) -> Fut, + Fut: Future, Response>>, +{ + type RequestBody = ReqBody; + type ResponseBody = ResBody; + type Future = Fut; + + fn authorize(&mut self, request: Request) -> Self::Future { + self(request) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use futures_core::future::BoxFuture; + use http::{header, StatusCode}; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + + #[derive(Clone, Copy)] + struct MyAuth; + + impl AsyncAuthorizeRequest for MyAuth + where + B: Send + 'static, + { + type RequestBody = B; + type ResponseBody = Body; + type Future = BoxFuture<'static, Result, Response>>; + + fn authorize(&mut self, request: Request) -> Self::Future { + Box::pin(async move { + let authorized = request + .headers() + .get(header::AUTHORIZATION) + .and_then(|auth| auth.to_str().ok()?.strip_prefix("Bearer ")) + == Some("69420"); + + if authorized { + Ok(request) + } else { + Err(Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body(Body::empty()) + .unwrap()) + } + }) + } + } + + #[tokio::test] + async fn require_async_auth_works() { + let mut service = ServiceBuilder::new() + .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer 69420") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn require_async_auth_401() { + let mut service = ServiceBuilder::new() + .layer(AsyncRequireAuthorizationLayer::new(MyAuth)) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer deez") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc8c2308f11b84aa1ecd1b656896e1fe30678d1e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/mod.rs @@ -0,0 +1,13 @@ +//! Authorization related middleware. + +pub mod add_authorization; +pub mod async_require_authorization; +pub mod require_authorization; + +#[doc(inline)] +pub use self::{ + add_authorization::{AddAuthorization, AddAuthorizationLayer}, + async_require_authorization::{ + AsyncAuthorizeRequest, AsyncRequireAuthorization, AsyncRequireAuthorizationLayer, + }, +}; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/require_authorization.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/require_authorization.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c2bfcba58a5e6c502573375844d2541d3ac122e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/auth/require_authorization.rs @@ -0,0 +1,405 @@ +#![deprecated(since = "0.6.7", note = "too basic to be useful in real applications")] +//! Authorize requests using [`ValidateRequest`]. +//! +//! [`Authorization`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequest, ValidateRequestHeader, ValidateRequestHeaderLayer}; +//! use http::{Request, Response, StatusCode, header::AUTHORIZATION}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let mut service = ServiceBuilder::new() +//! // Require the `Authorization` header to be `Bearer passwordlol` +//! .layer(ValidateRequestHeaderLayer::bearer("passwordlol")) +//! .service_fn(handle); +//! +//! // Requests with the correct token are allowed through +//! let request = Request::builder() +//! .header(AUTHORIZATION, "Bearer passwordlol") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! +//! // Requests with an invalid token get a `401 Unauthorized` response +//! let request = Request::builder() +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::UNAUTHORIZED, response.status()); +//! # Ok(()) +//! # } +//! ``` +//! +//! Custom validation can be made by implementing [`ValidateRequest`]. + +use crate::validate_request::{ValidateRequest, ValidateRequestHeader, ValidateRequestHeaderLayer}; +use base64::Engine as _; +use http::{ + header::{self, HeaderValue}, + Request, Response, StatusCode, +}; +use std::{fmt, marker::PhantomData}; + +const BASE64: base64::engine::GeneralPurpose = base64::engine::general_purpose::STANDARD; + +impl ValidateRequestHeader> { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header is required to be `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(inner: S, username: &str, value: &str) -> Self + where + ResBody: Default, + { + Self::custom(inner, Basic::new(username, value)) + } +} + +impl ValidateRequestHeaderLayer> { + /// Authorize requests using a username and password pair. + /// + /// The `Authorization` header is required to be `Basic {credentials}` where `credentials` is + /// `base64_encode("{username}:{password}")`. + /// + /// Since the username and password is sent in clear text it is recommended to use HTTPS/TLS + /// with this method. However use of HTTPS/TLS is not enforced by this middleware. + pub fn basic(username: &str, password: &str) -> Self + where + ResBody: Default, + { + Self::custom(Basic::new(username, password)) + } +} + +impl ValidateRequestHeader> { + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header is required to be `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(inner: S, token: &str) -> Self + where + ResBody: Default, + { + Self::custom(inner, Bearer::new(token)) + } +} + +impl ValidateRequestHeaderLayer> { + /// Authorize requests using a "bearer token". Commonly used for OAuth 2. + /// + /// The `Authorization` header is required to be `Bearer {token}`. + /// + /// # Panics + /// + /// Panics if the token is not a valid [`HeaderValue`]. + pub fn bearer(token: &str) -> Self + where + ResBody: Default, + { + Self::custom(Bearer::new(token)) + } +} + +/// Type that performs "bearer token" authorization. +/// +/// See [`ValidateRequestHeader::bearer`] for more details. +pub struct Bearer { + header_value: HeaderValue, + _ty: PhantomData ResBody>, +} + +impl Bearer { + fn new(token: &str) -> Self + where + ResBody: Default, + { + Self { + header_value: format!("Bearer {}", token) + .parse() + .expect("token is not a valid header value"), + _ty: PhantomData, + } + } +} + +impl Clone for Bearer { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for Bearer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Bearer") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for Bearer +where + ResBody: Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + match request.headers().get(header::AUTHORIZATION) { + Some(actual) if actual == self.header_value => Ok(()), + _ => { + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::UNAUTHORIZED; + Err(res) + } + } + } +} + +/// Type that performs basic authorization. +/// +/// See [`ValidateRequestHeader::basic`] for more details. +pub struct Basic { + header_value: HeaderValue, + _ty: PhantomData ResBody>, +} + +impl Basic { + fn new(username: &str, password: &str) -> Self + where + ResBody: Default, + { + let encoded = BASE64.encode(format!("{}:{}", username, password)); + let header_value = format!("Basic {}", encoded).parse().unwrap(); + Self { + header_value, + _ty: PhantomData, + } + } +} + +impl Clone for Basic { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for Basic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Basic") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for Basic +where + ResBody: Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + match request.headers().get(header::AUTHORIZATION) { + Some(actual) if actual == self.header_value => Ok(()), + _ => { + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::UNAUTHORIZED; + res.headers_mut() + .insert(header::WWW_AUTHENTICATE, "Basic".parse().unwrap()); + Err(res) + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::validate_request::ValidateRequestHeaderLayer; + + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::header; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + use tower_service::Service; + + #[tokio::test] + async fn valid_basic_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn invalid_basic_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("wrong:credentials")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + + let www_authenticate = res.headers().get(header::WWW_AUTHENTICATE).unwrap(); + assert_eq!(www_authenticate, "Basic"); + } + + #[tokio::test] + async fn valid_bearer_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn basic_auth_is_case_sensitive_in_prefix() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("basic {}", BASE64.encode("foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn basic_auth_is_case_sensitive_in_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::basic("foo", "bar")) + .service_fn(echo); + + let request = Request::get("/") + .header( + header::AUTHORIZATION, + format!("Basic {}", BASE64.encode("Foo:bar")), + ) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn invalid_bearer_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer wat") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn bearer_token_is_case_sensitive_in_prefix() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "bearer foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn bearer_token_is_case_sensitive_in_token() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::bearer("foobar")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::AUTHORIZATION, "Bearer Foobar") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::UNAUTHORIZED); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..815a0d109a05c4eeab6921b8b0a1a355dc36b348 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/body.rs @@ -0,0 +1,121 @@ +//! Body types. +//! +//! All these are wrappers around other body types. You shouldn't have to use them in your code. +//! Use `http-body-util` instead. +//! +//! They exist because we don't want to expose types from `http-body-util` in `tower-http`s public +//! API. + +#![allow(missing_docs)] + +use std::convert::Infallible; + +use bytes::{Buf, Bytes}; +use http_body::Body; +use pin_project_lite::pin_project; + +use crate::BoxError; + +macro_rules! body_methods { + () => { + #[inline] + fn poll_frame( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + Body::is_end_stream(&self.inner) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + Body::size_hint(&self.inner) + } + }; +} + +pin_project! { + #[derive(Default)] + pub struct Full { + #[pin] + pub(crate) inner: http_body_util::Full + } +} + +impl Full { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::Full) -> Self { + Self { inner } + } +} + +impl Body for Full { + type Data = Bytes; + type Error = Infallible; + + body_methods!(); +} + +pin_project! { + pub struct Limited { + #[pin] + pub(crate) inner: http_body_util::Limited + } +} + +impl Limited { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::Limited) -> Self { + Self { inner } + } +} + +impl Body for Limited +where + B: Body, + B::Error: Into, +{ + type Data = B::Data; + type Error = BoxError; + + body_methods!(); +} + +pin_project! { + pub struct UnsyncBoxBody { + #[pin] + pub(crate) inner: http_body_util::combinators::UnsyncBoxBody + } +} + +impl Default for UnsyncBoxBody +where + D: Buf + 'static, +{ + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} + +impl UnsyncBoxBody { + #[allow(dead_code)] + pub(crate) fn new(inner: http_body_util::combinators::UnsyncBoxBody) -> Self { + Self { inner } + } +} + +impl Body for UnsyncBoxBody +where + D: Buf, +{ + type Data = D; + type Error = E; + + body_methods!(); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/builder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..3bdcf64a5dfb088804701dbfd312205afed4c0d6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/builder.rs @@ -0,0 +1,616 @@ +use tower::ServiceBuilder; + +#[allow(unused_imports)] +use http::header::HeaderName; +#[allow(unused_imports)] +use tower_layer::Stack; + +mod sealed { + #[allow(unreachable_pub, unused)] + pub trait Sealed {} +} + +/// Extension trait that adds methods to [`tower::ServiceBuilder`] for adding middleware from +/// tower-http. +/// +/// [`Service`]: tower::Service +/// +/// # Example +/// +/// ```rust +/// use http::{Request, Response, header::HeaderName}; +/// use bytes::Bytes; +/// use http_body_util::Full; +/// use std::{time::Duration, convert::Infallible}; +/// use tower::{ServiceBuilder, ServiceExt, Service}; +/// use tower_http::ServiceBuilderExt; +/// +/// async fn handle(request: Request>) -> Result>, Infallible> { +/// Ok(Response::new(Full::default())) +/// } +/// +/// # #[tokio::main] +/// # async fn main() { +/// let service = ServiceBuilder::new() +/// // Methods from tower +/// .timeout(Duration::from_secs(30)) +/// // Methods from tower-http +/// .trace_for_http() +/// .propagate_header(HeaderName::from_static("x-request-id")) +/// .service_fn(handle); +/// # let mut service = service; +/// # service.ready().await.unwrap().call(Request::new(Full::default())).await.unwrap(); +/// # } +/// ``` +#[cfg(feature = "util")] +// ^ work around rustdoc not inferring doc(cfg)s for cfg's from surrounding scopes +pub trait ServiceBuilderExt: sealed::Sealed + Sized { + /// Propagate a header from the request to the response. + /// + /// See [`tower_http::propagate_header`] for more details. + /// + /// [`tower_http::propagate_header`]: crate::propagate_header + #[cfg(feature = "propagate-header")] + fn propagate_header( + self, + header: HeaderName, + ) -> ServiceBuilder>; + + /// Add some shareable value to [request extensions]. + /// + /// See [`tower_http::add_extension`] for more details. + /// + /// [`tower_http::add_extension`]: crate::add_extension + /// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html + #[cfg(feature = "add-extension")] + fn add_extension( + self, + value: T, + ) -> ServiceBuilder, L>>; + + /// Apply a transformation to the request body. + /// + /// See [`tower_http::map_request_body`] for more details. + /// + /// [`tower_http::map_request_body`]: crate::map_request_body + #[cfg(feature = "map-request-body")] + fn map_request_body( + self, + f: F, + ) -> ServiceBuilder, L>>; + + /// Apply a transformation to the response body. + /// + /// See [`tower_http::map_response_body`] for more details. + /// + /// [`tower_http::map_response_body`]: crate::map_response_body + #[cfg(feature = "map-response-body")] + fn map_response_body( + self, + f: F, + ) -> ServiceBuilder, L>>; + + /// Compresses response bodies. + /// + /// See [`tower_http::compression`] for more details. + /// + /// [`tower_http::compression`]: crate::compression + #[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + ))] + fn compression(self) -> ServiceBuilder>; + + /// Decompress response bodies. + /// + /// See [`tower_http::decompression`] for more details. + /// + /// [`tower_http::decompression`]: crate::decompression + #[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + ))] + fn decompression(self) -> ServiceBuilder>; + + /// High level tracing that classifies responses using HTTP status codes. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_http( + self, + ) -> ServiceBuilder, L>>; + + /// High level tracing that classifies responses using gRPC headers. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_grpc( + self, + ) -> ServiceBuilder, L>>; + + /// Follow redirect resposes using the [`Standard`] policy. + /// + /// See [`tower_http::follow_redirect`] for more details. + /// + /// [`tower_http::follow_redirect`]: crate::follow_redirect + /// [`Standard`]: crate::follow_redirect::policy::Standard + #[cfg(feature = "follow-redirect")] + fn follow_redirects( + self, + ) -> ServiceBuilder< + Stack< + crate::follow_redirect::FollowRedirectLayer, + L, + >, + >; + + /// Mark headers as [sensitive] on both requests and responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_headers( + self, + headers: I, + ) -> ServiceBuilder> + where + I: IntoIterator; + + /// Mark headers as [sensitive] on requests. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_request_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder>; + + /// Mark headers as [sensitive] on responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_response_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder>; + + /// Insert a header into the request. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Append a header into the request. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the request, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_request_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the response. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Append a header into the response. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Insert a header into the response, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_response_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>>; + + /// Add request id header and extension. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_request_id( + self, + header_name: HeaderName, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId; + + /// Add request id header and extension, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_x_request_id( + self, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId, + { + self.set_request_id(crate::request_id::X_REQUEST_ID, make_request_id) + } + + /// Propgate request ids from requests to responses. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_request_id( + self, + header_name: HeaderName, + ) -> ServiceBuilder>; + + /// Propgate request ids from requests to responses, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_x_request_id( + self, + ) -> ServiceBuilder> { + self.propagate_request_id(crate::request_id::X_REQUEST_ID) + } + + /// Catch panics and convert them into `500 Internal Server` responses. + /// + /// See [`tower_http::catch_panic`] for more details. + /// + /// [`tower_http::catch_panic`]: crate::catch_panic + #[cfg(feature = "catch-panic")] + fn catch_panic( + self, + ) -> ServiceBuilder< + Stack, L>, + >; + + /// Intercept requests with over-sized payloads and convert them into + /// `413 Payload Too Large` responses. + /// + /// See [`tower_http::limit`] for more details. + /// + /// [`tower_http::limit`]: crate::limit + #[cfg(feature = "limit")] + fn request_body_limit( + self, + limit: usize, + ) -> ServiceBuilder>; + + /// Remove trailing slashes from paths. + /// + /// See [`tower_http::normalize_path`] for more details. + /// + /// [`tower_http::normalize_path`]: crate::normalize_path + #[cfg(feature = "normalize-path")] + fn trim_trailing_slash( + self, + ) -> ServiceBuilder>; + + /// Append trailing slash to paths. + /// + /// See [`tower_http::normalize_path`] for more details. + /// + /// [`tower_http::normalize_path`]: crate::normalize_path + #[cfg(feature = "normalize-path")] + fn append_trailing_slash( + self, + ) -> ServiceBuilder>; +} + +impl sealed::Sealed for ServiceBuilder {} + +impl ServiceBuilderExt for ServiceBuilder { + #[cfg(feature = "propagate-header")] + fn propagate_header( + self, + header: HeaderName, + ) -> ServiceBuilder> { + self.layer(crate::propagate_header::PropagateHeaderLayer::new(header)) + } + + #[cfg(feature = "add-extension")] + fn add_extension( + self, + value: T, + ) -> ServiceBuilder, L>> { + self.layer(crate::add_extension::AddExtensionLayer::new(value)) + } + + #[cfg(feature = "map-request-body")] + fn map_request_body( + self, + f: F, + ) -> ServiceBuilder, L>> { + self.layer(crate::map_request_body::MapRequestBodyLayer::new(f)) + } + + #[cfg(feature = "map-response-body")] + fn map_response_body( + self, + f: F, + ) -> ServiceBuilder, L>> { + self.layer(crate::map_response_body::MapResponseBodyLayer::new(f)) + } + + #[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + ))] + fn compression(self) -> ServiceBuilder> { + self.layer(crate::compression::CompressionLayer::new()) + } + + #[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + ))] + fn decompression(self) -> ServiceBuilder> { + self.layer(crate::decompression::DecompressionLayer::new()) + } + + #[cfg(feature = "trace")] + fn trace_for_http( + self, + ) -> ServiceBuilder, L>> { + self.layer(crate::trace::TraceLayer::new_for_http()) + } + + #[cfg(feature = "trace")] + fn trace_for_grpc( + self, + ) -> ServiceBuilder, L>> { + self.layer(crate::trace::TraceLayer::new_for_grpc()) + } + + #[cfg(feature = "follow-redirect")] + fn follow_redirects( + self, + ) -> ServiceBuilder< + Stack< + crate::follow_redirect::FollowRedirectLayer, + L, + >, + > { + self.layer(crate::follow_redirect::FollowRedirectLayer::new()) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_headers( + self, + headers: I, + ) -> ServiceBuilder> + where + I: IntoIterator, + { + self.layer(crate::sensitive_headers::SetSensitiveHeadersLayer::new( + headers, + )) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_request_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder> { + self.layer(crate::sensitive_headers::SetSensitiveRequestHeadersLayer::from_shared(headers)) + } + + #[cfg(feature = "sensitive-headers")] + fn sensitive_response_headers( + self, + headers: std::sync::Arc<[HeaderName]>, + ) -> ServiceBuilder> { + self.layer(crate::sensitive_headers::SetSensitiveResponseHeadersLayer::from_shared(headers)) + } + + #[cfg(feature = "set-header")] + fn override_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::overriding( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn append_request_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::appending( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn insert_request_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetRequestHeaderLayer::if_not_present( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn override_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::overriding( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn append_response_header( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::appending( + header_name, + make, + )) + } + + #[cfg(feature = "set-header")] + fn insert_response_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> ServiceBuilder, L>> { + self.layer(crate::set_header::SetResponseHeaderLayer::if_not_present( + header_name, + make, + )) + } + + #[cfg(feature = "request-id")] + fn set_request_id( + self, + header_name: HeaderName, + make_request_id: M, + ) -> ServiceBuilder, L>> + where + M: crate::request_id::MakeRequestId, + { + self.layer(crate::request_id::SetRequestIdLayer::new( + header_name, + make_request_id, + )) + } + + #[cfg(feature = "request-id")] + fn propagate_request_id( + self, + header_name: HeaderName, + ) -> ServiceBuilder> { + self.layer(crate::request_id::PropagateRequestIdLayer::new(header_name)) + } + + #[cfg(feature = "catch-panic")] + fn catch_panic( + self, + ) -> ServiceBuilder< + Stack, L>, + > { + self.layer(crate::catch_panic::CatchPanicLayer::new()) + } + + #[cfg(feature = "limit")] + fn request_body_limit( + self, + limit: usize, + ) -> ServiceBuilder> { + self.layer(crate::limit::RequestBodyLimitLayer::new(limit)) + } + + #[cfg(feature = "normalize-path")] + fn trim_trailing_slash( + self, + ) -> ServiceBuilder> { + self.layer(crate::normalize_path::NormalizePathLayer::trim_trailing_slash()) + } + + #[cfg(feature = "normalize-path")] + fn append_trailing_slash( + self, + ) -> ServiceBuilder> { + self.layer(crate::normalize_path::NormalizePathLayer::append_trailing_slash()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/catch_panic.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/catch_panic.rs new file mode 100644 index 0000000000000000000000000000000000000000..3f1c227921a51ccbc049f2a582d4ea075d4807d7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/catch_panic.rs @@ -0,0 +1,409 @@ +//! Convert panics into responses. +//! +//! Note that using panics for error handling is _not_ recommended. Prefer instead to use `Result` +//! whenever possible. +//! +//! # Example +//! +//! ```rust +//! use http::{Request, Response, header::HeaderName}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::catch_panic::CatchPanicLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! panic!("something went wrong...") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Catch panics and convert them into responses. +//! .layer(CatchPanicLayer::new()) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), 500); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Using a custom panic handler: +//! +//! ```rust +//! use http::{Request, StatusCode, Response, header::{self, HeaderName}}; +//! use std::{any::Any, convert::Infallible}; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::catch_panic::CatchPanicLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! panic!("something went wrong...") +//! } +//! +//! fn handle_panic(err: Box) -> Response> { +//! let details = if let Some(s) = err.downcast_ref::() { +//! s.clone() +//! } else if let Some(s) = err.downcast_ref::<&str>() { +//! s.to_string() +//! } else { +//! "Unknown panic message".to_string() +//! }; +//! +//! let body = serde_json::json!({ +//! "error": { +//! "kind": "panic", +//! "details": details, +//! } +//! }); +//! let body = serde_json::to_string(&body).unwrap(); +//! +//! Response::builder() +//! .status(StatusCode::INTERNAL_SERVER_ERROR) +//! .header(header::CONTENT_TYPE, "application/json") +//! .body(Full::from(body)) +//! .unwrap() +//! } +//! +//! let svc = ServiceBuilder::new() +//! // Use `handle_panic` to create the response. +//! .layer(CatchPanicLayer::custom(handle_panic)) +//! .service_fn(handle); +//! # +//! # Ok(()) +//! # } +//! ``` + +use bytes::Bytes; +use futures_util::future::{CatchUnwind, FutureExt}; +use http::{HeaderValue, Request, Response, StatusCode}; +use http_body::Body; +use http_body_util::BodyExt; +use pin_project_lite::pin_project; +use std::{ + any::Any, + future::Future, + panic::AssertUnwindSafe, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +use crate::{ + body::{Full, UnsyncBoxBody}, + BoxError, +}; + +/// Layer that applies the [`CatchPanic`] middleware that catches panics and converts them into +/// `500 Internal Server` responses. +/// +/// See the [module docs](self) for an example. +#[derive(Debug, Clone, Copy, Default)] +pub struct CatchPanicLayer { + panic_handler: T, +} + +impl CatchPanicLayer { + /// Create a new `CatchPanicLayer` with the default panic handler. + pub fn new() -> Self { + CatchPanicLayer { + panic_handler: DefaultResponseForPanic, + } + } +} + +impl CatchPanicLayer { + /// Create a new `CatchPanicLayer` with a custom panic handler. + pub fn custom(panic_handler: T) -> Self + where + T: ResponseForPanic, + { + Self { panic_handler } + } +} + +impl Layer for CatchPanicLayer +where + T: Clone, +{ + type Service = CatchPanic; + + fn layer(&self, inner: S) -> Self::Service { + CatchPanic { + inner, + panic_handler: self.panic_handler.clone(), + } + } +} + +/// Middleware that catches panics and converts them into `500 Internal Server` responses. +/// +/// See the [module docs](self) for an example. +#[derive(Debug, Clone, Copy)] +pub struct CatchPanic { + inner: S, + panic_handler: T, +} + +impl CatchPanic { + /// Create a new `CatchPanic` with the default panic handler. + pub fn new(inner: S) -> Self { + Self { + inner, + panic_handler: DefaultResponseForPanic, + } + } +} + +impl CatchPanic { + define_inner_service_accessors!(); + + /// Create a new `CatchPanic` with a custom panic handler. + pub fn custom(inner: S, panic_handler: T) -> Self + where + T: ResponseForPanic, + { + Self { + inner, + panic_handler, + } + } +} + +impl Service> for CatchPanic +where + S: Service, Response = Response>, + ResBody: Body + Send + 'static, + ResBody::Error: Into, + T: ResponseForPanic + Clone, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + match std::panic::catch_unwind(AssertUnwindSafe(|| self.inner.call(req))) { + Ok(future) => ResponseFuture { + kind: Kind::Future { + future: AssertUnwindSafe(future).catch_unwind(), + panic_handler: Some(self.panic_handler.clone()), + }, + }, + Err(panic_err) => ResponseFuture { + kind: Kind::Panicked { + panic_err: Some(panic_err), + panic_handler: Some(self.panic_handler.clone()), + }, + }, + } + } +} + +pin_project! { + /// Response future for [`CatchPanic`]. + pub struct ResponseFuture { + #[pin] + kind: Kind, + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + Panicked { + panic_err: Option>, + panic_handler: Option, + }, + Future { + #[pin] + future: CatchUnwind>, + panic_handler: Option, + } + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + ResBody: Body + Send + 'static, + ResBody::Error: Into, + T: ResponseForPanic, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + KindProj::Panicked { + panic_err, + panic_handler, + } => { + let panic_handler = panic_handler + .take() + .expect("future polled after completion"); + let panic_err = panic_err.take().expect("future polled after completion"); + Poll::Ready(Ok(response_for_panic(panic_handler, panic_err))) + } + KindProj::Future { + future, + panic_handler, + } => match ready!(future.poll(cx)) { + Ok(Ok(res)) => { + Poll::Ready(Ok(res.map(|body| { + UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync()) + }))) + } + Ok(Err(svc_err)) => Poll::Ready(Err(svc_err)), + Err(panic_err) => Poll::Ready(Ok(response_for_panic( + panic_handler + .take() + .expect("future polled after completion"), + panic_err, + ))), + }, + } + } +} + +fn response_for_panic( + mut panic_handler: T, + err: Box, +) -> Response> +where + T: ResponseForPanic, + T::ResponseBody: Body + Send + 'static, + ::Error: Into, +{ + panic_handler + .response_for_panic(err) + .map(|body| UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync())) +} + +/// Trait for creating responses from panics. +pub trait ResponseForPanic: Clone { + /// The body type used for responses to panics. + type ResponseBody; + + /// Create a response from the panic error. + fn response_for_panic( + &mut self, + err: Box, + ) -> Response; +} + +impl ResponseForPanic for F +where + F: FnMut(Box) -> Response + Clone, +{ + type ResponseBody = B; + + fn response_for_panic( + &mut self, + err: Box, + ) -> Response { + self(err) + } +} + +/// The default `ResponseForPanic` used by `CatchPanic`. +/// +/// It will log the panic message and return a `500 Internal Server` error response with an empty +/// body. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct DefaultResponseForPanic; + +impl ResponseForPanic for DefaultResponseForPanic { + type ResponseBody = Full; + + fn response_for_panic( + &mut self, + err: Box, + ) -> Response { + if let Some(s) = err.downcast_ref::() { + tracing::error!("Service panicked: {}", s); + } else if let Some(s) = err.downcast_ref::<&str>() { + tracing::error!("Service panicked: {}", s); + } else { + tracing::error!( + "Service panicked but `CatchPanic` was unable to downcast the panic info" + ); + }; + + let mut res = Response::new(Full::new(http_body_util::Full::from("Service panicked"))); + *res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + + #[allow(clippy::declare_interior_mutable_const)] + const TEXT_PLAIN: HeaderValue = HeaderValue::from_static("text/plain; charset=utf-8"); + res.headers_mut() + .insert(http::header::CONTENT_TYPE, TEXT_PLAIN); + + res + } +} + +#[cfg(test)] +mod tests { + #![allow(unreachable_code)] + + use super::*; + use crate::test_helpers::Body; + use http::Response; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn panic_before_returning_future() { + let svc = ServiceBuilder::new() + .layer(CatchPanicLayer::new()) + .service_fn(|_: Request| { + panic!("service panic"); + async { Ok::<_, Infallible>(Response::new(Body::empty())) } + }); + + let req = Request::new(Body::empty()); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + let body = crate::test_helpers::to_bytes(res).await.unwrap(); + assert_eq!(&body[..], b"Service panicked"); + } + + #[tokio::test] + async fn panic_in_future() { + let svc = ServiceBuilder::new() + .layer(CatchPanicLayer::new()) + .service_fn(|_: Request| async { + panic!("future panic"); + Ok::<_, Infallible>(Response::new(Body::empty())) + }); + + let req = Request::new(Body::empty()); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR); + let body = crate::test_helpers::to_bytes(res).await.unwrap(); + assert_eq!(&body[..], b"Service panicked"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/grpc_errors_as_failures.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/grpc_errors_as_failures.rs new file mode 100644 index 0000000000000000000000000000000000000000..3fc96c33e65a042368b4348315e843eaba75aaac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/grpc_errors_as_failures.rs @@ -0,0 +1,357 @@ +use super::{ClassifiedResponse, ClassifyEos, ClassifyResponse, SharedClassifier}; +use bitflags::bitflags; +use http::{HeaderMap, Response}; +use std::{fmt, num::NonZeroI32}; + +/// gRPC status codes. +/// +/// These variants match the [gRPC status codes]. +/// +/// [gRPC status codes]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md#status-codes-and-their-use-in-grpc +#[derive(Clone, Copy, Debug)] +pub enum GrpcCode { + /// The operation completed successfully. + Ok, + /// The operation was cancelled. + Cancelled, + /// Unknown error. + Unknown, + /// Client specified an invalid argument. + InvalidArgument, + /// Deadline expired before operation could complete. + DeadlineExceeded, + /// Some requested entity was not found. + NotFound, + /// Some entity that we attempted to create already exists. + AlreadyExists, + /// The caller does not have permission to execute the specified operation. + PermissionDenied, + /// Some resource has been exhausted. + ResourceExhausted, + /// The system is not in a state required for the operation's execution. + FailedPrecondition, + /// The operation was aborted. + Aborted, + /// Operation was attempted past the valid range. + OutOfRange, + /// Operation is not implemented or not supported. + Unimplemented, + /// Internal error. + Internal, + /// The service is currently unavailable. + Unavailable, + /// Unrecoverable data loss or corruption. + DataLoss, + /// The request does not have valid authentication credentials + Unauthenticated, +} + +impl GrpcCode { + pub(crate) fn into_bitmask(self) -> GrpcCodeBitmask { + match self { + Self::Ok => GrpcCodeBitmask::OK, + Self::Cancelled => GrpcCodeBitmask::CANCELLED, + Self::Unknown => GrpcCodeBitmask::UNKNOWN, + Self::InvalidArgument => GrpcCodeBitmask::INVALID_ARGUMENT, + Self::DeadlineExceeded => GrpcCodeBitmask::DEADLINE_EXCEEDED, + Self::NotFound => GrpcCodeBitmask::NOT_FOUND, + Self::AlreadyExists => GrpcCodeBitmask::ALREADY_EXISTS, + Self::PermissionDenied => GrpcCodeBitmask::PERMISSION_DENIED, + Self::ResourceExhausted => GrpcCodeBitmask::RESOURCE_EXHAUSTED, + Self::FailedPrecondition => GrpcCodeBitmask::FAILED_PRECONDITION, + Self::Aborted => GrpcCodeBitmask::ABORTED, + Self::OutOfRange => GrpcCodeBitmask::OUT_OF_RANGE, + Self::Unimplemented => GrpcCodeBitmask::UNIMPLEMENTED, + Self::Internal => GrpcCodeBitmask::INTERNAL, + Self::Unavailable => GrpcCodeBitmask::UNAVAILABLE, + Self::DataLoss => GrpcCodeBitmask::DATA_LOSS, + Self::Unauthenticated => GrpcCodeBitmask::UNAUTHENTICATED, + } + } +} + +bitflags! { + #[derive(Debug, Clone, Copy)] + pub(crate) struct GrpcCodeBitmask: u32 { + const OK = 0b00000000000000001; + const CANCELLED = 0b00000000000000010; + const UNKNOWN = 0b00000000000000100; + const INVALID_ARGUMENT = 0b00000000000001000; + const DEADLINE_EXCEEDED = 0b00000000000010000; + const NOT_FOUND = 0b00000000000100000; + const ALREADY_EXISTS = 0b00000000001000000; + const PERMISSION_DENIED = 0b00000000010000000; + const RESOURCE_EXHAUSTED = 0b00000000100000000; + const FAILED_PRECONDITION = 0b00000001000000000; + const ABORTED = 0b00000010000000000; + const OUT_OF_RANGE = 0b00000100000000000; + const UNIMPLEMENTED = 0b00001000000000000; + const INTERNAL = 0b00010000000000000; + const UNAVAILABLE = 0b00100000000000000; + const DATA_LOSS = 0b01000000000000000; + const UNAUTHENTICATED = 0b10000000000000000; + } +} + +impl GrpcCodeBitmask { + fn try_from_u32(code: u32) -> Option { + match code { + 0 => Some(Self::OK), + 1 => Some(Self::CANCELLED), + 2 => Some(Self::UNKNOWN), + 3 => Some(Self::INVALID_ARGUMENT), + 4 => Some(Self::DEADLINE_EXCEEDED), + 5 => Some(Self::NOT_FOUND), + 6 => Some(Self::ALREADY_EXISTS), + 7 => Some(Self::PERMISSION_DENIED), + 8 => Some(Self::RESOURCE_EXHAUSTED), + 9 => Some(Self::FAILED_PRECONDITION), + 10 => Some(Self::ABORTED), + 11 => Some(Self::OUT_OF_RANGE), + 12 => Some(Self::UNIMPLEMENTED), + 13 => Some(Self::INTERNAL), + 14 => Some(Self::UNAVAILABLE), + 15 => Some(Self::DATA_LOSS), + 16 => Some(Self::UNAUTHENTICATED), + _ => None, + } + } +} + +/// Response classifier for gRPC responses. +/// +/// gRPC doesn't use normal HTTP statuses for indicating success or failure but instead a special +/// header that might appear in a trailer. +/// +/// Responses are considered successful if +/// +/// - `grpc-status` header value contains a success value. +/// - `grpc-status` header is missing. +/// - `grpc-status` header value isn't a valid `String`. +/// - `grpc-status` header value can't parsed into an `i32`. +/// +/// All others are considered failures. +#[derive(Debug, Clone)] +pub struct GrpcErrorsAsFailures { + success_codes: GrpcCodeBitmask, +} + +impl Default for GrpcErrorsAsFailures { + fn default() -> Self { + Self::new() + } +} + +impl GrpcErrorsAsFailures { + /// Create a new [`GrpcErrorsAsFailures`]. + pub fn new() -> Self { + Self { + success_codes: GrpcCodeBitmask::OK, + } + } + + /// Change which gRPC codes are considered success. + /// + /// Defaults to only considering `Ok` as success. + /// + /// `Ok` will always be considered a success. + /// + /// # Example + /// + /// Servers might not want to consider `Invalid Argument` or `Not Found` as failures since + /// thats likely the clients fault: + /// + /// ```rust + /// use tower_http::classify::{GrpcErrorsAsFailures, GrpcCode}; + /// + /// let classifier = GrpcErrorsAsFailures::new() + /// .with_success(GrpcCode::InvalidArgument) + /// .with_success(GrpcCode::NotFound); + /// ``` + pub fn with_success(mut self, code: GrpcCode) -> Self { + self.success_codes |= code.into_bitmask(); + self + } + + /// Returns a [`MakeClassifier`](super::MakeClassifier) that produces `GrpcErrorsAsFailures`. + /// + /// This is a convenience function that simply calls `SharedClassifier::new`. + pub fn make_classifier() -> SharedClassifier { + SharedClassifier::new(Self::new()) + } +} + +impl ClassifyResponse for GrpcErrorsAsFailures { + type FailureClass = GrpcFailureClass; + type ClassifyEos = GrpcEosErrorsAsFailures; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + match classify_grpc_metadata(res.headers(), self.success_codes) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => ClassifiedResponse::Ready(Ok(())), + ParsedGrpcStatus::NonSuccess(status) => { + ClassifiedResponse::Ready(Err(GrpcFailureClass::Code(status))) + } + ParsedGrpcStatus::GrpcStatusHeaderMissing => { + ClassifiedResponse::RequiresEos(GrpcEosErrorsAsFailures { + success_codes: self.success_codes, + }) + } + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + GrpcFailureClass::Error(error.to_string()) + } +} + +/// The [`ClassifyEos`] for [`GrpcErrorsAsFailures`]. +#[derive(Debug, Clone)] +pub struct GrpcEosErrorsAsFailures { + success_codes: GrpcCodeBitmask, +} + +impl ClassifyEos for GrpcEosErrorsAsFailures { + type FailureClass = GrpcFailureClass; + + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + if let Some(trailers) = trailers { + match classify_grpc_metadata(trailers, self.success_codes) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::GrpcStatusHeaderMissing + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Ok(()), + ParsedGrpcStatus::NonSuccess(status) => Err(GrpcFailureClass::Code(status)), + } + } else { + Ok(()) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + GrpcFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`GrpcErrorsAsFailures`]. +#[derive(Debug)] +pub enum GrpcFailureClass { + /// A gRPC response was classified as a failure with the corresponding status. + Code(std::num::NonZeroI32), + /// A gRPC response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for GrpcFailureClass { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Code(code) => write!(f, "Code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +pub(crate) fn classify_grpc_metadata( + headers: &HeaderMap, + success_codes: GrpcCodeBitmask, +) -> ParsedGrpcStatus { + macro_rules! or_else { + ($expr:expr, $other:ident) => { + if let Some(value) = $expr { + value + } else { + return ParsedGrpcStatus::$other; + } + }; + } + + let status = or_else!(headers.get("grpc-status"), GrpcStatusHeaderMissing); + let status = or_else!(status.to_str().ok(), HeaderNotString); + let status = or_else!(status.parse::().ok(), HeaderNotInt); + + if GrpcCodeBitmask::try_from_u32(status as _) + .filter(|code| success_codes.contains(*code)) + .is_some() + { + ParsedGrpcStatus::Success + } else { + ParsedGrpcStatus::NonSuccess(NonZeroI32::new(status).unwrap()) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum ParsedGrpcStatus { + Success, + NonSuccess(NonZeroI32), + GrpcStatusHeaderMissing, + // these two are treated as `Success` but kept separate for clarity + HeaderNotString, + HeaderNotInt, +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! classify_grpc_metadata_test { + ( + name: $name:ident, + status: $status:expr, + success_flags: $success_flags:expr, + expected: $expected:expr, + ) => { + #[test] + fn $name() { + let mut headers = HeaderMap::new(); + headers.insert("grpc-status", $status.parse().unwrap()); + let status = classify_grpc_metadata(&headers, $success_flags); + assert_eq!(status, $expected); + } + }; + } + + classify_grpc_metadata_test! { + name: basic_ok, + status: "0", + success_flags: GrpcCodeBitmask::OK, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: basic_error, + status: "1", + success_flags: GrpcCodeBitmask::OK, + expected: ParsedGrpcStatus::NonSuccess(NonZeroI32::new(1).unwrap()), + } + + classify_grpc_metadata_test! { + name: two_success_codes_first_matches, + status: "0", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: two_success_codes_second_matches, + status: "3", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::Success, + } + + classify_grpc_metadata_test! { + name: two_success_codes_none_matches, + status: "16", + success_flags: GrpcCodeBitmask::OK | GrpcCodeBitmask::INVALID_ARGUMENT, + expected: ParsedGrpcStatus::NonSuccess(NonZeroI32::new(16).unwrap()), + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/map_failure_class.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/map_failure_class.rs new file mode 100644 index 0000000000000000000000000000000000000000..680593b56e98aab663993c191ba4a437c252fe8e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/map_failure_class.rs @@ -0,0 +1,80 @@ +use super::{ClassifiedResponse, ClassifyEos, ClassifyResponse}; +use http::{HeaderMap, Response}; +use std::fmt; + +/// Response classifier that transforms the failure class of some other +/// classifier. +/// +/// Created with [`ClassifyResponse::map_failure_class`] or +/// [`ClassifyEos::map_failure_class`]. +#[derive(Clone, Copy)] +pub struct MapFailureClass { + inner: C, + f: F, +} + +impl MapFailureClass { + pub(super) fn new(classify: C, f: F) -> Self { + Self { inner: classify, f } + } +} + +impl fmt::Debug for MapFailureClass +where + C: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapFailureClass") + .field("inner", &self.inner) + .field("f", &format_args!("{}", std::any::type_name::())) + .finish() + } +} + +impl ClassifyResponse for MapFailureClass +where + C: ClassifyResponse, + F: FnOnce(C::FailureClass) -> NewClass, +{ + type FailureClass = NewClass; + type ClassifyEos = MapFailureClass; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + match self.inner.classify_response(res) { + ClassifiedResponse::Ready(result) => ClassifiedResponse::Ready(result.map_err(self.f)), + ClassifiedResponse::RequiresEos(classify_eos) => { + let mapped_classify_eos = MapFailureClass::new(classify_eos, self.f); + ClassifiedResponse::RequiresEos(mapped_classify_eos) + } + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + (self.f)(self.inner.classify_error(error)) + } +} + +impl ClassifyEos for MapFailureClass +where + C: ClassifyEos, + F: FnOnce(C::FailureClass) -> NewClass, +{ + type FailureClass = NewClass; + + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + self.inner.classify_eos(trailers).map_err(self.f) + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + (self.f)(self.inner.classify_error(error)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a314784360b330faa43d84c8bd97d11fa0da614c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/mod.rs @@ -0,0 +1,436 @@ +//! Tools for classifying responses as either success or failure. + +use http::{HeaderMap, Request, Response, StatusCode}; +use std::{convert::Infallible, fmt, marker::PhantomData}; + +pub(crate) mod grpc_errors_as_failures; +mod map_failure_class; +mod status_in_range_is_error; + +pub use self::{ + grpc_errors_as_failures::{ + GrpcCode, GrpcEosErrorsAsFailures, GrpcErrorsAsFailures, GrpcFailureClass, + }, + map_failure_class::MapFailureClass, + status_in_range_is_error::{StatusInRangeAsFailures, StatusInRangeFailureClass}, +}; + +/// Trait for producing response classifiers from a request. +/// +/// This is useful when a classifier depends on data from the request. For example, this could +/// include the URI or HTTP method. +/// +/// This trait is generic over the [`Error` type] of the `Service`s used with the classifier. +/// This is necessary for [`ClassifyResponse::classify_error`]. +/// +/// [`Error` type]: https://docs.rs/tower/latest/tower/trait.Service.html#associatedtype.Error +pub trait MakeClassifier { + /// The response classifier produced. + type Classifier: ClassifyResponse< + FailureClass = Self::FailureClass, + ClassifyEos = Self::ClassifyEos, + >; + + /// The type of failure classifications. + /// + /// This might include additional information about the error, such as + /// whether it was a client or server error, or whether or not it should + /// be considered retryable. + type FailureClass; + + /// The type used to classify the response end of stream (EOS). + type ClassifyEos: ClassifyEos; + + /// Returns a response classifier for this request + fn make_classifier(&self, req: &Request) -> Self::Classifier; +} + +/// A [`MakeClassifier`] that produces new classifiers by cloning an inner classifier. +/// +/// When a type implementing [`ClassifyResponse`] doesn't depend on information +/// from the request, [`SharedClassifier`] can be used to turn an instance of that type +/// into a [`MakeClassifier`]. +/// +/// # Example +/// +/// ``` +/// use std::fmt; +/// use tower_http::classify::{ +/// ClassifyResponse, ClassifiedResponse, NeverClassifyEos, +/// SharedClassifier, MakeClassifier, +/// }; +/// use http::Response; +/// +/// // A response classifier that only considers errors to be failures. +/// #[derive(Clone, Copy)] +/// struct MyClassifier; +/// +/// impl ClassifyResponse for MyClassifier { +/// type FailureClass = String; +/// type ClassifyEos = NeverClassifyEos; +/// +/// fn classify_response( +/// self, +/// _res: &Response, +/// ) -> ClassifiedResponse { +/// ClassifiedResponse::Ready(Ok(())) +/// } +/// +/// fn classify_error(self, error: &E) -> Self::FailureClass +/// where +/// E: fmt::Display + 'static, +/// { +/// error.to_string() +/// } +/// } +/// +/// // Some function that requires a `MakeClassifier` +/// fn use_make_classifier(make: M) { +/// // ... +/// } +/// +/// // `MyClassifier` doesn't implement `MakeClassifier` but since it doesn't +/// // care about the incoming request we can make `MyClassifier`s by cloning. +/// // That is what `SharedClassifier` does. +/// let make_classifier = SharedClassifier::new(MyClassifier); +/// +/// // We now have a `MakeClassifier`! +/// use_make_classifier(make_classifier); +/// ``` +#[derive(Debug, Clone)] +pub struct SharedClassifier { + classifier: C, +} + +impl SharedClassifier { + /// Create a new `SharedClassifier` from the given classifier. + pub fn new(classifier: C) -> Self + where + C: ClassifyResponse + Clone, + { + Self { classifier } + } +} + +impl MakeClassifier for SharedClassifier +where + C: ClassifyResponse + Clone, +{ + type FailureClass = C::FailureClass; + type ClassifyEos = C::ClassifyEos; + type Classifier = C; + + fn make_classifier(&self, _req: &Request) -> Self::Classifier { + self.classifier.clone() + } +} + +/// Trait for classifying responses as either success or failure. Designed to support both unary +/// requests (single request for a single response) as well as streaming responses. +/// +/// Response classifiers are used in cases where middleware needs to determine +/// whether a response completed successfully or failed. For example, they may +/// be used by logging or metrics middleware to record failures differently +/// from successes. +/// +/// Furthermore, when a response fails, a response classifier may provide +/// additional information about the failure. This can, for example, be used to +/// build [retry policies] by indicating whether or not a particular failure is +/// retryable. +/// +/// [retry policies]: https://docs.rs/tower/latest/tower/retry/trait.Policy.html +pub trait ClassifyResponse { + /// The type returned when a response is classified as a failure. + /// + /// Depending on the classifier, this may simply indicate that the + /// request failed, or it may contain additional information about + /// the failure, such as whether or not it is retryable. + type FailureClass; + + /// The type used to classify the response end of stream (EOS). + type ClassifyEos: ClassifyEos; + + /// Attempt to classify the beginning of a response. + /// + /// In some cases, the response can be classified immediately, without + /// waiting for a body to complete. This may include: + /// + /// - When the response has an error status code. + /// - When a successful response does not have a streaming body. + /// - When the classifier does not care about streaming bodies. + /// + /// When the response can be classified immediately, `classify_response` + /// returns a [`ClassifiedResponse::Ready`] which indicates whether the + /// response succeeded or failed. + /// + /// In other cases, however, the classifier may need to wait until the + /// response body stream completes before it can classify the response. + /// For example, gRPC indicates RPC failures using the `grpc-status` + /// trailer. In this case, `classify_response` returns a + /// [`ClassifiedResponse::RequiresEos`] containing a type which will + /// be used to classify the response when the body stream ends. + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse; + + /// Classify an error. + /// + /// Errors are always errors (doh) but sometimes it might be useful to have multiple classes of + /// errors. A retry policy might allow retrying some errors and not others. + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static; + + /// Transform the failure classification using a function. + /// + /// # Example + /// + /// ``` + /// use tower_http::classify::{ + /// ServerErrorsAsFailures, ServerErrorsFailureClass, + /// ClassifyResponse, ClassifiedResponse + /// }; + /// use http::{Response, StatusCode}; + /// use http_body_util::Empty; + /// use bytes::Bytes; + /// + /// fn transform_failure_class(class: ServerErrorsFailureClass) -> NewFailureClass { + /// match class { + /// // Convert status codes into u16 + /// ServerErrorsFailureClass::StatusCode(status) => { + /// NewFailureClass::Status(status.as_u16()) + /// } + /// // Don't change errors. + /// ServerErrorsFailureClass::Error(error) => { + /// NewFailureClass::Error(error) + /// } + /// } + /// } + /// + /// enum NewFailureClass { + /// Status(u16), + /// Error(String), + /// } + /// + /// // Create a classifier who's failure class will be transformed by `transform_failure_class` + /// let classifier = ServerErrorsAsFailures::new().map_failure_class(transform_failure_class); + /// + /// let response = Response::builder() + /// .status(StatusCode::INTERNAL_SERVER_ERROR) + /// .body(Empty::::new()) + /// .unwrap(); + /// + /// let classification = classifier.classify_response(&response); + /// + /// assert!(matches!( + /// classification, + /// ClassifiedResponse::Ready(Err(NewFailureClass::Status(500))) + /// )); + /// ``` + fn map_failure_class(self, f: F) -> MapFailureClass + where + Self: Sized, + F: FnOnce(Self::FailureClass) -> NewClass, + { + MapFailureClass::new(self, f) + } +} + +/// Trait for classifying end of streams (EOS) as either success or failure. +pub trait ClassifyEos { + /// The type of failure classifications. + type FailureClass; + + /// Perform the classification from response trailers. + fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass>; + + /// Classify an error. + /// + /// Errors are always errors (doh) but sometimes it might be useful to have multiple classes of + /// errors. A retry policy might allow retrying some errors and not others. + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static; + + /// Transform the failure classification using a function. + /// + /// See [`ClassifyResponse::map_failure_class`] for more details. + fn map_failure_class(self, f: F) -> MapFailureClass + where + Self: Sized, + F: FnOnce(Self::FailureClass) -> NewClass, + { + MapFailureClass::new(self, f) + } +} + +/// Result of doing a classification. +#[derive(Debug)] +pub enum ClassifiedResponse { + /// The response was able to be classified immediately. + Ready(Result<(), FailureClass>), + /// We have to wait until the end of a streaming response to classify it. + RequiresEos(ClassifyEos), +} + +/// A [`ClassifyEos`] type that can be used in [`ClassifyResponse`] implementations that never have +/// to classify streaming responses. +/// +/// `NeverClassifyEos` exists only as type. `NeverClassifyEos` values cannot be constructed. +pub struct NeverClassifyEos { + _output_ty: PhantomData T>, + _never: Infallible, +} + +impl ClassifyEos for NeverClassifyEos { + type FailureClass = T; + + fn classify_eos(self, _trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { + // `NeverClassifyEos` contains an `Infallible` so it can never be constructed + unreachable!() + } + + fn classify_error(self, _error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + // `NeverClassifyEos` contains an `Infallible` so it can never be constructed + unreachable!() + } +} + +impl fmt::Debug for NeverClassifyEos { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NeverClassifyEos").finish() + } +} + +/// The default classifier used for normal HTTP responses. +/// +/// Responses with a `5xx` status code are considered failures, all others are considered +/// successes. +#[derive(Clone, Debug, Default)] +pub struct ServerErrorsAsFailures { + _priv: (), +} + +impl ServerErrorsAsFailures { + /// Create a new [`ServerErrorsAsFailures`]. + pub fn new() -> Self { + Self::default() + } + + /// Returns a [`MakeClassifier`] that produces `ServerErrorsAsFailures`. + /// + /// This is a convenience function that simply calls `SharedClassifier::new`. + pub fn make_classifier() -> SharedClassifier { + SharedClassifier::new(Self::new()) + } +} + +impl ClassifyResponse for ServerErrorsAsFailures { + type FailureClass = ServerErrorsFailureClass; + type ClassifyEos = NeverClassifyEos; + + fn classify_response( + self, + res: &Response, + ) -> ClassifiedResponse { + if res.status().is_server_error() { + ClassifiedResponse::Ready(Err(ServerErrorsFailureClass::StatusCode(res.status()))) + } else { + ClassifiedResponse::Ready(Ok(())) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: fmt::Display + 'static, + { + ServerErrorsFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`ServerErrorsAsFailures`]. +#[derive(Debug)] +pub enum ServerErrorsFailureClass { + /// A response was classified as a failure with the corresponding status. + StatusCode(StatusCode), + /// A response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for ServerErrorsFailureClass { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::StatusCode(code) => write!(f, "Status code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +// Just verify that we can actually use this response classifier to determine retries as well +#[cfg(test)] +mod usable_for_retries { + #![allow(dead_code)] + + use std::fmt; + + use http::{Request, Response}; + use tower::retry::Policy; + + use super::{ClassifiedResponse, ClassifyResponse}; + + trait IsRetryable { + fn is_retryable(&self) -> bool; + } + + #[derive(Clone)] + struct RetryBasedOnClassification { + classifier: C, + // ... + } + + impl Policy, Response, E> for RetryBasedOnClassification + where + C: ClassifyResponse + Clone, + E: fmt::Display + 'static, + C::FailureClass: IsRetryable, + ResB: http_body::Body, + Request: Clone, + E: std::error::Error + 'static, + { + type Future = std::future::Ready<()>; + + fn retry( + &mut self, + _req: &mut Request, + res: &mut Result, E>, + ) -> Option { + match res { + Ok(res) => { + if let ClassifiedResponse::Ready(class) = + self.classifier.clone().classify_response(res) + { + if class.err()?.is_retryable() { + return Some(std::future::ready(())); + } + } + + None + } + Err(err) => self + .classifier + .clone() + .classify_error(err) + .is_retryable() + .then(|| std::future::ready(())), + } + } + + fn clone_request(&mut self, req: &Request) -> Option> { + Some(req.clone()) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/status_in_range_is_error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/status_in_range_is_error.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ff830b9270a61c77794e0245efae5fa8460c195 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/classify/status_in_range_is_error.rs @@ -0,0 +1,160 @@ +use super::{ClassifiedResponse, ClassifyResponse, NeverClassifyEos, SharedClassifier}; +use http::StatusCode; +use std::{fmt, ops::RangeInclusive}; + +/// Response classifier that considers responses with a status code within some range to be +/// failures. +/// +/// # Example +/// +/// A client with tracing where server errors _and_ client errors are considered failures. +/// +/// ```no_run +/// use tower_http::{trace::TraceLayer, classify::StatusInRangeAsFailures}; +/// use tower::{ServiceBuilder, Service, ServiceExt}; +/// use http::{Request, Method}; +/// use http_body_util::Full; +/// use bytes::Bytes; +/// use hyper_util::{rt::TokioExecutor, client::legacy::Client}; +/// +/// # async fn foo() -> Result<(), tower::BoxError> { +/// let classifier = StatusInRangeAsFailures::new(400..=599); +/// +/// let client = Client::builder(TokioExecutor::new()).build_http(); +/// let mut client = ServiceBuilder::new() +/// .layer(TraceLayer::new(classifier.into_make_classifier())) +/// .service(client); +/// +/// let request = Request::builder() +/// .method(Method::GET) +/// .uri("https://example.com") +/// .body(Full::::default()) +/// .unwrap(); +/// +/// let response = client.ready().await?.call(request).await?; +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug, Clone)] +pub struct StatusInRangeAsFailures { + range: RangeInclusive, +} + +impl StatusInRangeAsFailures { + /// Creates a new `StatusInRangeAsFailures`. + /// + /// # Panics + /// + /// Panics if the start or end of `range` aren't valid status codes as determined by + /// [`StatusCode::from_u16`]. + /// + /// [`StatusCode::from_u16`]: https://docs.rs/http/latest/http/status/struct.StatusCode.html#method.from_u16 + pub fn new(range: RangeInclusive) -> Self { + assert!( + StatusCode::from_u16(*range.start()).is_ok(), + "range start isn't a valid status code" + ); + assert!( + StatusCode::from_u16(*range.end()).is_ok(), + "range end isn't a valid status code" + ); + + Self { range } + } + + /// Creates a new `StatusInRangeAsFailures` that classifies client and server responses as + /// failures. + /// + /// This is a convenience for `StatusInRangeAsFailures::new(400..=599)`. + pub fn new_for_client_and_server_errors() -> Self { + Self::new(400..=599) + } + + /// Convert this `StatusInRangeAsFailures` into a [`MakeClassifier`]. + /// + /// [`MakeClassifier`]: super::MakeClassifier + pub fn into_make_classifier(self) -> SharedClassifier { + SharedClassifier::new(self) + } +} + +impl ClassifyResponse for StatusInRangeAsFailures { + type FailureClass = StatusInRangeFailureClass; + type ClassifyEos = NeverClassifyEos; + + fn classify_response( + self, + res: &http::Response, + ) -> ClassifiedResponse { + if self.range.contains(&res.status().as_u16()) { + let class = StatusInRangeFailureClass::StatusCode(res.status()); + ClassifiedResponse::Ready(Err(class)) + } else { + ClassifiedResponse::Ready(Ok(())) + } + } + + fn classify_error(self, error: &E) -> Self::FailureClass + where + E: std::fmt::Display + 'static, + { + StatusInRangeFailureClass::Error(error.to_string()) + } +} + +/// The failure class for [`StatusInRangeAsFailures`]. +#[derive(Debug)] +pub enum StatusInRangeFailureClass { + /// A response was classified as a failure with the corresponding status. + StatusCode(StatusCode), + /// A response was classified as an error with the corresponding error description. + Error(String), +} + +impl fmt::Display for StatusInRangeFailureClass { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::StatusCode(code) => write!(f, "Status code: {}", code), + Self::Error(error) => write!(f, "Error: {}", error), + } + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use http::Response; + + #[test] + fn basic() { + let classifier = StatusInRangeAsFailures::new(400..=599); + + assert!(matches!( + classifier + .clone() + .classify_response(&response_with_status(200)), + ClassifiedResponse::Ready(Ok(())), + )); + + assert!(matches!( + classifier + .clone() + .classify_response(&response_with_status(400)), + ClassifiedResponse::Ready(Err(StatusInRangeFailureClass::StatusCode( + StatusCode::BAD_REQUEST + ))), + )); + + assert!(matches!( + classifier.classify_response(&response_with_status(500)), + ClassifiedResponse::Ready(Err(StatusInRangeFailureClass::StatusCode( + StatusCode::INTERNAL_SERVER_ERROR + ))), + )); + } + + fn response_with_status(status: u16) -> Response<()> { + Response::builder().status(status).body(()).unwrap() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..259e4a278d3f251a18c8b98d1fd30fe2f14df95e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/body.rs @@ -0,0 +1,387 @@ +#![allow(unused_imports)] + +use crate::compression::CompressionLevel; +use crate::{ + compression_utils::{AsyncReadBody, BodyIntoStream, DecorateAsyncRead, WrapBody}, + BoxError, +}; +#[cfg(feature = "compression-br")] +use async_compression::tokio::bufread::BrotliEncoder; +#[cfg(feature = "compression-gzip")] +use async_compression::tokio::bufread::GzipEncoder; +#[cfg(feature = "compression-deflate")] +use async_compression::tokio::bufread::ZlibEncoder; +#[cfg(feature = "compression-zstd")] +use async_compression::tokio::bufread::ZstdEncoder; + +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + io, + marker::PhantomData, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio_util::io::StreamReader; + +use super::pin_project_cfg::pin_project_cfg; + +pin_project! { + /// Response body of [`Compression`]. + /// + /// [`Compression`]: super::Compression + pub struct CompressionBody + where + B: Body, + { + #[pin] + pub(crate) inner: BodyInner, + } +} + +impl Default for CompressionBody +where + B: Body + Default, +{ + fn default() -> Self { + Self { + inner: BodyInner::Identity { + inner: B::default(), + }, + } + } +} + +impl CompressionBody +where + B: Body, +{ + pub(crate) fn new(inner: BodyInner) -> Self { + Self { inner } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + match &self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + BodyInner::Identity { inner } => inner, + } + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + match &mut self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + BodyInner::Identity { inner } => inner, + } + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + match self.project().inner.project() { + #[cfg(feature = "compression-gzip")] + BodyInnerProj::Gzip { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-deflate")] + BodyInnerProj::Deflate { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-br")] + BodyInnerProj::Brotli { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "compression-zstd")] + BodyInnerProj::Zstd { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + BodyInnerProj::Identity { inner } => inner, + } + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + match self.inner { + #[cfg(feature = "compression-gzip")] + BodyInner::Gzip { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-deflate")] + BodyInner::Deflate { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-br")] + BodyInner::Brotli { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "compression-zstd")] + BodyInner::Zstd { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + BodyInner::Identity { inner } => inner, + } + } +} + +#[cfg(feature = "compression-gzip")] +type GzipBody = WrapBody>; + +#[cfg(feature = "compression-deflate")] +type DeflateBody = WrapBody>; + +#[cfg(feature = "compression-br")] +type BrotliBody = WrapBody>; + +#[cfg(feature = "compression-zstd")] +type ZstdBody = WrapBody>; + +pin_project_cfg! { + #[project = BodyInnerProj] + pub(crate) enum BodyInner + where + B: Body, + { + #[cfg(feature = "compression-gzip")] + Gzip { + #[pin] + inner: GzipBody, + }, + #[cfg(feature = "compression-deflate")] + Deflate { + #[pin] + inner: DeflateBody, + }, + #[cfg(feature = "compression-br")] + Brotli { + #[pin] + inner: BrotliBody, + }, + #[cfg(feature = "compression-zstd")] + Zstd { + #[pin] + inner: ZstdBody, + }, + Identity { + #[pin] + inner: B, + }, + } +} + +impl BodyInner { + #[cfg(feature = "compression-gzip")] + pub(crate) fn gzip(inner: WrapBody>) -> Self { + Self::Gzip { inner } + } + + #[cfg(feature = "compression-deflate")] + pub(crate) fn deflate(inner: WrapBody>) -> Self { + Self::Deflate { inner } + } + + #[cfg(feature = "compression-br")] + pub(crate) fn brotli(inner: WrapBody>) -> Self { + Self::Brotli { inner } + } + + #[cfg(feature = "compression-zstd")] + pub(crate) fn zstd(inner: WrapBody>) -> Self { + Self::Zstd { inner } + } + + pub(crate) fn identity(inner: B) -> Self { + Self::Identity { inner } + } +} + +impl Body for CompressionBody +where + B: Body, + B::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + #[cfg(feature = "compression-gzip")] + BodyInnerProj::Gzip { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-deflate")] + BodyInnerProj::Deflate { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-br")] + BodyInnerProj::Brotli { inner } => inner.poll_frame(cx), + #[cfg(feature = "compression-zstd")] + BodyInnerProj::Zstd { inner } => inner.poll_frame(cx), + BodyInnerProj::Identity { inner } => match ready!(inner.poll_frame(cx)) { + Some(Ok(frame)) => { + let frame = frame.map_data(|mut buf| buf.copy_to_bytes(buf.remaining())); + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + }, + } + } + + fn size_hint(&self) -> http_body::SizeHint { + if let BodyInner::Identity { inner } = &self.inner { + inner.size_hint() + } else { + http_body::SizeHint::new() + } + } + + fn is_end_stream(&self) -> bool { + if let BodyInner::Identity { inner } = &self.inner { + inner.is_end_stream() + } else { + false + } + } +} + +#[cfg(feature = "compression-gzip")] +impl DecorateAsyncRead for GzipEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = GzipEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + GzipEncoder::with_quality(input, quality.into_async_compression()) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-deflate")] +impl DecorateAsyncRead for ZlibEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZlibEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + ZlibEncoder::with_quality(input, quality.into_async_compression()) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-br")] +impl DecorateAsyncRead for BrotliEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = BrotliEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + // The brotli crate used under the hood here has a default compression level of 11, + // which is the max for brotli. This causes extremely slow compression times, so we + // manually set a default of 4 here. + // + // This is the same default used by NGINX for on-the-fly brotli compression. + let level = match quality { + CompressionLevel::Default => async_compression::Level::Precise(4), + other => other.into_async_compression(), + }; + BrotliEncoder::with_quality(input, level) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "compression-zstd")] +impl DecorateAsyncRead for ZstdEncoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZstdEncoder; + + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output { + // See https://issues.chromium.org/issues/41493659: + // "For memory usage reasons, Chromium limits the window size to 8MB" + // See https://datatracker.ietf.org/doc/html/rfc8878#name-window-descriptor + // "For improved interoperability, it's recommended for decoders to support values + // of Window_Size up to 8 MB and for encoders not to generate frames requiring a + // Window_Size larger than 8 MB." + // Level 17 in zstd (as of v1.5.6) is the first level with a window size of 8 MB (2^23): + // https://github.com/facebook/zstd/blob/v1.5.6/lib/compress/clevels.h#L25-L51 + // Set the parameter for all levels >= 17. This will either have no effect (but reduce + // the risk of future changes in zstd) or limit the window log to 8MB. + let needs_window_limit = match quality { + CompressionLevel::Best => true, // level 20 + CompressionLevel::Precise(level) => level >= 17, + _ => false, + }; + // The parameter is not set for levels below 17 as it will increase the window size + // for those levels. + if needs_window_limit { + let params = [async_compression::zstd::CParameter::window_log(23)]; + ZstdEncoder::with_quality_and_params(input, quality.into_async_compression(), ¶ms) + } else { + ZstdEncoder::with_quality(input, quality.into_async_compression()) + } + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e899a736c8869dc29e696c298d9bfd6d60d56d0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/future.rs @@ -0,0 +1,133 @@ +#![allow(unused_imports)] + +use super::{body::BodyInner, CompressionBody}; +use crate::compression::predicate::Predicate; +use crate::compression::CompressionLevel; +use crate::compression_utils::WrapBody; +use crate::content_encoding::Encoding; +use http::{header, HeaderMap, HeaderValue, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +pin_project! { + /// Response future of [`Compression`]. + /// + /// [`Compression`]: super::Compression + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) encoding: Encoding, + pub(crate) predicate: P, + pub(crate) quality: CompressionLevel, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Body, + P: Predicate, +{ + type Output = Result>, E>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = ready!(self.as_mut().project().inner.poll(cx)?); + + // never recompress responses that are already compressed + let should_compress = !res.headers().contains_key(header::CONTENT_ENCODING) + // never compress responses that are ranges + && !res.headers().contains_key(header::CONTENT_RANGE) + && self.predicate.should_compress(&res); + + let (mut parts, body) = res.into_parts(); + + if should_compress + && !parts.headers.get_all(header::VARY).iter().any(|value| { + contains_ignore_ascii_case( + value.as_bytes(), + header::ACCEPT_ENCODING.as_str().as_bytes(), + ) + }) + { + parts + .headers + .append(header::VARY, header::ACCEPT_ENCODING.into()); + } + + let body = match (should_compress, self.encoding) { + // if compression is _not_ supported or the client doesn't accept it + (false, _) | (_, Encoding::Identity) => { + return Poll::Ready(Ok(Response::from_parts( + parts, + CompressionBody::new(BodyInner::identity(body)), + ))) + } + + #[cfg(feature = "compression-gzip")] + (_, Encoding::Gzip) => { + CompressionBody::new(BodyInner::gzip(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-deflate")] + (_, Encoding::Deflate) => { + CompressionBody::new(BodyInner::deflate(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-br")] + (_, Encoding::Brotli) => { + CompressionBody::new(BodyInner::brotli(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "compression-zstd")] + (_, Encoding::Zstd) => { + CompressionBody::new(BodyInner::zstd(WrapBody::new(body, self.quality))) + } + #[cfg(feature = "fs")] + #[allow(unreachable_patterns)] + (true, _) => { + // This should never happen because the `AcceptEncoding` struct which is used to determine + // `self.encoding` will only enable the different compression algorithms if the + // corresponding crate feature has been enabled. This means + // Encoding::[Gzip|Brotli|Deflate] should be impossible at this point without the + // features enabled. + // + // The match arm is still required though because the `fs` feature uses the + // Encoding struct independently and requires no compression logic to be enabled. + // This means a combination of an individual compression feature and `fs` will fail + // to compile without this branch even though it will never be reached. + // + // To safeguard against refactors that changes this relationship or other bugs the + // server will return an uncompressed response instead of panicking since that could + // become a ddos attack vector. + return Poll::Ready(Ok(Response::from_parts( + parts, + CompressionBody::new(BodyInner::identity(body)), + ))); + } + }; + + parts.headers.remove(header::ACCEPT_RANGES); + parts.headers.remove(header::CONTENT_LENGTH); + + parts + .headers + .insert(header::CONTENT_ENCODING, self.encoding.into_header_value()); + + let res = Response::from_parts(parts, body); + Poll::Ready(Ok(res)) + } +} + +fn contains_ignore_ascii_case(mut haystack: &[u8], needle: &[u8]) -> bool { + while needle.len() <= haystack.len() { + if haystack[..needle.len()].eq_ignore_ascii_case(needle) { + return true; + } + haystack = &haystack[1..]; + } + + false +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..5eca0c50090d716ef090c927a854ef11540712e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/layer.rs @@ -0,0 +1,240 @@ +use super::{Compression, Predicate}; +use crate::compression::predicate::DefaultPredicate; +use crate::compression::CompressionLevel; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Compress response bodies of the underlying service. +/// +/// This uses the `Accept-Encoding` header to pick an appropriate encoding and adds the +/// `Content-Encoding` header to responses. +/// +/// See the [module docs](crate::compression) for more details. +#[derive(Clone, Debug, Default)] +pub struct CompressionLayer

{ + accept: AcceptEncoding, + predicate: P, + quality: CompressionLevel, +} + +impl Layer for CompressionLayer

+where + P: Predicate, +{ + type Service = Compression; + + fn layer(&self, inner: S) -> Self::Service { + Compression { + inner, + accept: self.accept, + predicate: self.predicate.clone(), + quality: self.quality, + } + } +} + +impl CompressionLayer { + /// Creates a new [`CompressionLayer`]. + pub fn new() -> Self { + Self::default() + } + + /// Sets whether to enable the gzip encoding. + #[cfg(feature = "compression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to enable the Deflate encoding. + #[cfg(feature = "compression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to enable the Brotli encoding. + #[cfg(feature = "compression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to enable the Zstd encoding. + #[cfg(feature = "compression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Sets the compression quality. + pub fn quality(mut self, quality: CompressionLevel) -> Self { + self.quality = quality; + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Replace the current compression predicate. + /// + /// See [`Compression::compress_when`] for more details. + pub fn compress_when(self, predicate: C) -> CompressionLayer + where + C: Predicate, + { + CompressionLayer { + accept: self.accept, + predicate, + quality: self.quality, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use http::{header::ACCEPT_ENCODING, Request, Response}; + use http_body_util::BodyExt; + use std::convert::Infallible; + use tokio::fs::File; + use tokio_util::io::ReaderStream; + use tower::{Service, ServiceBuilder, ServiceExt}; + + async fn handle(_req: Request) -> Result, Infallible> { + // Open the file. + let file = File::open("Cargo.toml").await.expect("file missing"); + // Convert the file into a `Stream`. + let stream = ReaderStream::new(file); + // Convert the `Stream` into a `Body`. + let body = Body::from_stream(stream); + // Create response. + Ok(Response::new(body)) + } + + #[tokio::test] + async fn accept_encoding_configuration_works() -> Result<(), crate::BoxError> { + let deflate_only_layer = CompressionLayer::new() + .quality(CompressionLevel::Best) + .no_br() + .no_gzip(); + + let mut service = ServiceBuilder::new() + // Compress responses based on the `Accept-Encoding` header. + .layer(deflate_only_layer) + .service_fn(handle); + + // Call the service with the deflate only layer + let request = Request::builder() + .header(ACCEPT_ENCODING, "gzip, deflate, br") + .body(Body::empty())?; + + let response = service.ready().await?.call(request).await?; + + assert_eq!(response.headers()["content-encoding"], "deflate"); + + // Read the body + let body = response.into_body(); + let bytes = body.collect().await.unwrap().to_bytes(); + + let deflate_bytes_len = bytes.len(); + + let br_only_layer = CompressionLayer::new() + .quality(CompressionLevel::Best) + .no_gzip() + .no_deflate(); + + let mut service = ServiceBuilder::new() + // Compress responses based on the `Accept-Encoding` header. + .layer(br_only_layer) + .service_fn(handle); + + // Call the service with the br only layer + let request = Request::builder() + .header(ACCEPT_ENCODING, "gzip, deflate, br") + .body(Body::empty())?; + + let response = service.ready().await?.call(request).await?; + + assert_eq!(response.headers()["content-encoding"], "br"); + + // Read the body + let body = response.into_body(); + let bytes = body.collect().await.unwrap().to_bytes(); + + let br_byte_length = bytes.len(); + + // check the corresponding algorithms are actually used + // br should compresses better than deflate + assert!(br_byte_length < deflate_bytes_len * 9 / 10); + + Ok(()) + } + + /// Test ensuring that zstd compression will not exceed an 8MiB window size; browsers do not + /// accept responses using 16MiB+ window sizes. + #[tokio::test] + async fn zstd_is_web_safe() -> Result<(), crate::BoxError> { + async fn zeroes(_req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(vec![0u8; 18_874_368]))) + } + // zstd will (I believe) lower its window size if a larger one isn't beneficial and + // it knows the size of the input; use an 18MiB body to ensure it would want a + // >=16MiB window (though it might not be able to see the input size here). + + let zstd_layer = CompressionLayer::new() + .quality(CompressionLevel::Best) + .no_br() + .no_deflate() + .no_gzip(); + + let mut service = ServiceBuilder::new().layer(zstd_layer).service_fn(zeroes); + + let request = Request::builder() + .header(ACCEPT_ENCODING, "zstd") + .body(Body::empty())?; + + let response = service.ready().await?.call(request).await?; + + assert_eq!(response.headers()["content-encoding"], "zstd"); + + let body = response.into_body(); + let bytes = body.collect().await?.to_bytes(); + let mut dec = zstd::Decoder::new(&*bytes)?; + dec.window_log_max(23)?; // Limit window size accepted by decoder to 2 ^ 23 bytes (8MiB) + + std::io::copy(&mut dec, &mut std::io::sink())?; + + Ok(()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..5772b9ba2d686052b4b516bd469aa35ec2e054c4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/mod.rs @@ -0,0 +1,511 @@ +//! Middleware that compresses response bodies. +//! +//! # Example +//! +//! Example showing how to respond with the compressed contents of a file. +//! +//! ```rust +//! use bytes::{Bytes, BytesMut}; +//! use http::{Request, Response, header::ACCEPT_ENCODING}; +//! use http_body_util::{Full, BodyExt, StreamBody, combinators::UnsyncBoxBody}; +//! use http_body::Frame; +//! use std::convert::Infallible; +//! use tokio::fs::{self, File}; +//! use tokio_util::io::ReaderStream; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::{compression::CompressionLayer, BoxError}; +//! use futures_util::TryStreamExt; +//! +//! type BoxBody = UnsyncBoxBody; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! async fn handle(req: Request>) -> Result, Infallible> { +//! // Open the file. +//! let file = File::open("Cargo.toml").await.expect("file missing"); +//! // Convert the file into a `Stream` of `Bytes`. +//! let stream = ReaderStream::new(file); +//! // Convert the stream into a stream of data `Frame`s. +//! let stream = stream.map_ok(Frame::data); +//! // Convert the `Stream` into a `Body`. +//! let body = StreamBody::new(stream); +//! // Erase the type because its very hard to name in the function signature. +//! let body = body.boxed_unsync(); +//! // Create response. +//! Ok(Response::new(body)) +//! } +//! +//! let mut service = ServiceBuilder::new() +//! // Compress responses based on the `Accept-Encoding` header. +//! .layer(CompressionLayer::new()) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder() +//! .header(ACCEPT_ENCODING, "gzip") +//! .body(Full::::default())?; +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(response.headers()["content-encoding"], "gzip"); +//! +//! // Read the body +//! let bytes = response +//! .into_body() +//! .collect() +//! .await? +//! .to_bytes(); +//! +//! // The compressed body should be smaller 🤞 +//! let uncompressed_len = fs::read_to_string("Cargo.toml").await?.len(); +//! assert!(bytes.len() < uncompressed_len); +//! # +//! # Ok(()) +//! # } +//! ``` +//! + +pub mod predicate; + +mod body; +mod future; +mod layer; +mod pin_project_cfg; +mod service; + +#[doc(inline)] +pub use self::{ + body::CompressionBody, + future::ResponseFuture, + layer::CompressionLayer, + predicate::{DefaultPredicate, Predicate}, + service::Compression, +}; +pub use crate::compression_utils::CompressionLevel; + +#[cfg(test)] +mod tests { + use crate::compression::predicate::SizeAbove; + + use super::*; + use crate::test_helpers::{Body, WithTrailers}; + use async_compression::tokio::write::{BrotliDecoder, BrotliEncoder}; + use flate2::read::GzDecoder; + use http::header::{ + ACCEPT_ENCODING, ACCEPT_RANGES, CONTENT_ENCODING, CONTENT_RANGE, CONTENT_TYPE, RANGE, + }; + use http::{HeaderMap, HeaderName, HeaderValue, Request, Response}; + use http_body::Body as _; + use http_body_util::BodyExt; + use std::convert::Infallible; + use std::io::Read; + use std::sync::{Arc, RwLock}; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use tokio_util::io::StreamReader; + use tower::{service_fn, Service, ServiceExt}; + + // Compression filter allows every other request to be compressed + #[derive(Clone)] + struct Always; + + impl Predicate for Always { + fn should_compress(&self, _: &http::Response) -> bool + where + B: http_body::Body, + { + true + } + } + + #[tokio::test] + async fn gzip_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + + // trailers are maintained + assert_eq!(trailers["foo"], "bar"); + } + + #[tokio::test] + async fn x_gzip_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "x-gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // we treat x-gzip as equivalent to gzip and don't have to return x-gzip + // taking extra caution by checking all headers with this name + assert_eq!( + res.headers() + .get_all("content-encoding") + .iter() + .collect::>(), + vec!(HeaderValue::from_static("gzip")) + ); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + + // trailers are maintained + assert_eq!(trailers["foo"], "bar"); + } + + #[tokio::test] + async fn zstd_works() { + let svc = service_fn(handle); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header("accept-encoding", "zstd") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let compressed_data = body.collect().await.unwrap().to_bytes(); + + // decompress the body + let decompressed = zstd::stream::decode_all(std::io::Cursor::new(compressed_data)).unwrap(); + let decompressed = String::from_utf8(decompressed).unwrap(); + + assert_eq!(decompressed, "Hello, World!"); + } + + #[tokio::test] + async fn no_recompress() { + const DATA: &str = "Hello, World! I'm already compressed with br!"; + + let svc = service_fn(|_| async { + let buf = { + let mut buf = Vec::new(); + + let mut enc = BrotliEncoder::new(&mut buf); + enc.write_all(DATA.as_bytes()).await?; + enc.flush().await?; + buf + }; + + let resp = Response::builder() + .header("content-encoding", "br") + .body(Body::from(buf)) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + let mut svc = Compression::new(svc); + + // call the service + // + // note: the accept-encoding doesn't match the content-encoding above, so that + // we're able to see if the compression layer triggered or not + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // check we didn't recompress + assert_eq!( + res.headers() + .get("content-encoding") + .and_then(|h| h.to_str().ok()) + .unwrap_or_default(), + "br", + ); + + // read the compressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + + // decompress the body + let data = { + let mut output_buf = Vec::new(); + let mut decoder = BrotliDecoder::new(&mut output_buf); + decoder + .write_all(&data) + .await + .expect("couldn't brotli-decode"); + decoder.flush().await.expect("couldn't flush"); + output_buf + }; + + assert_eq!(data, DATA.as_bytes()); + } + + async fn handle(_req: Request) -> Result>, Infallible> { + let mut trailers = HeaderMap::new(); + trailers.insert(HeaderName::from_static("foo"), "bar".parse().unwrap()); + let body = Body::from("Hello, World!").with_trailers(trailers); + Ok(Response::builder().body(body).unwrap()) + } + + #[tokio::test] + async fn will_not_compress_if_filtered_out() { + use predicate::Predicate; + + const DATA: &str = "Hello world uncompressed"; + + let svc_fn = service_fn(|_| async { + let resp = Response::builder() + // .header("content-encoding", "br") + .body(Body::from(DATA.as_bytes())) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + + // Compression filter allows every other request to be compressed + #[derive(Default, Clone)] + struct EveryOtherResponse(Arc>); + + #[allow(clippy::dbg_macro)] + impl Predicate for EveryOtherResponse { + fn should_compress(&self, _: &http::Response) -> bool + where + B: http_body::Body, + { + let mut guard = self.0.write().unwrap(); + let should_compress = *guard % 2 != 0; + *guard += 1; + dbg!(should_compress) + } + } + + let mut svc = Compression::new(svc_fn).compress_when(EveryOtherResponse::default()); + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the uncompressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + let still_uncompressed = String::from_utf8(data.to_vec()).unwrap(); + assert_eq!(DATA, &still_uncompressed); + + // Compression filter will compress the next body + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let data = body.collect().await.unwrap().to_bytes(); + assert!(String::from_utf8(data.to_vec()).is_err()); + } + + #[tokio::test] + async fn doesnt_compress_images() { + async fn handle(_req: Request) -> Result, Infallible> { + let mut res = Response::new(Body::from( + "a".repeat((SizeAbove::DEFAULT_MIN_SIZE * 2) as usize), + )); + res.headers_mut() + .insert(CONTENT_TYPE, "image/png".parse().unwrap()); + Ok(res) + } + + let svc = Compression::new(service_fn(handle)); + + let res = svc + .oneshot( + Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert!(res.headers().get(CONTENT_ENCODING).is_none()); + } + + #[tokio::test] + async fn does_compress_svg() { + async fn handle(_req: Request) -> Result, Infallible> { + let mut res = Response::new(Body::from( + "a".repeat((SizeAbove::DEFAULT_MIN_SIZE * 2) as usize), + )); + res.headers_mut() + .insert(CONTENT_TYPE, "image/svg+xml".parse().unwrap()); + Ok(res) + } + + let svc = Compression::new(service_fn(handle)); + + let res = svc + .oneshot( + Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(res.headers()[CONTENT_ENCODING], "gzip"); + } + + #[tokio::test] + async fn compress_with_quality() { + const DATA: &str = "Check compression quality level! Check compression quality level! Check compression quality level!"; + let level = CompressionLevel::Best; + + let svc = service_fn(|_| async { + let resp = Response::builder() + .body(Body::from(DATA.as_bytes())) + .unwrap(); + Ok::<_, std::io::Error>(resp) + }); + + let mut svc = Compression::new(svc).quality(level); + + // call the service + let req = Request::builder() + .header("accept-encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + + // read the compressed body + let body = res.into_body(); + let compressed_data = body.collect().await.unwrap().to_bytes(); + + // build the compressed body with the same quality level + let compressed_with_level = { + use async_compression::tokio::bufread::BrotliEncoder; + + let stream = Box::pin(futures_util::stream::once(async move { + Ok::<_, std::io::Error>(DATA.as_bytes()) + })); + let reader = StreamReader::new(stream); + let mut enc = BrotliEncoder::with_quality(reader, level.into_async_compression()); + + let mut buf = Vec::new(); + enc.read_to_end(&mut buf).await.unwrap(); + buf + }; + + assert_eq!( + compressed_data, + compressed_with_level.as_slice(), + "Compression level is not respected" + ); + } + + #[tokio::test] + async fn should_not_compress_ranges() { + let svc = service_fn(|_| async { + let mut res = Response::new(Body::from("Hello")); + let headers = res.headers_mut(); + headers.insert(ACCEPT_RANGES, "bytes".parse().unwrap()); + headers.insert(CONTENT_RANGE, "bytes 0-4/*".parse().unwrap()); + Ok::<_, std::io::Error>(res) + }); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .header(RANGE, "bytes=0-4") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + let headers = res.headers().clone(); + + // read the uncompressed body + let collected = res.into_body().collect().await.unwrap().to_bytes(); + + assert_eq!(headers[ACCEPT_RANGES], "bytes"); + assert!(!headers.contains_key(CONTENT_ENCODING)); + assert_eq!(collected, "Hello"); + } + + #[tokio::test] + async fn should_strip_accept_ranges_header_when_compressing() { + let svc = service_fn(|_| async { + let mut res = Response::new(Body::from("Hello, World!")); + res.headers_mut() + .insert(ACCEPT_RANGES, "bytes".parse().unwrap()); + Ok::<_, std::io::Error>(res) + }); + let mut svc = Compression::new(svc).compress_when(Always); + + // call the service + let req = Request::builder() + .header(ACCEPT_ENCODING, "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + let headers = res.headers().clone(); + + // read the compressed body + let collected = res.into_body().collect().await.unwrap(); + let compressed_data = collected.to_bytes(); + + // decompress the body + // doing this with flate2 as that is much easier than async-compression and blocking during + // tests is fine + let mut decoder = GzDecoder::new(&compressed_data[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + assert!(!headers.contains_key(ACCEPT_RANGES)); + assert_eq!(headers[CONTENT_ENCODING], "gzip"); + assert_eq!(decompressed, "Hello, World!"); + } + + #[tokio::test] + async fn size_hint_identity() { + let msg = "Hello, world!"; + let svc = service_fn(|_| async { Ok::<_, std::io::Error>(Response::new(Body::from(msg))) }); + let mut svc = Compression::new(svc); + + let req = Request::new(Body::empty()); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + let body = res.into_body(); + assert_eq!(body.size_hint().exact().unwrap(), msg.len() as u64); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/pin_project_cfg.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/pin_project_cfg.rs new file mode 100644 index 0000000000000000000000000000000000000000..655b8d94e9a0e6aad480d7225d00235bbc693fff --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/pin_project_cfg.rs @@ -0,0 +1,144 @@ +// Full credit to @tesaguri who posted this gist under CC0 1.0 Universal licence +// https://gist.github.com/tesaguri/2a1c0790a48bbda3dd7f71c26d02a793 + +macro_rules! pin_project_cfg { + ($(#[$($attr:tt)*])* $vis:vis enum $($rest:tt)+) => { + pin_project_cfg! { + @outer [$(#[$($attr)*])* $vis enum] $($rest)+ + } + }; + // Accumulate type parameters and `where` clause. + (@outer [$($accum:tt)*] $tt:tt $($rest:tt)+) => { + pin_project_cfg! { + @outer [$($accum)* $tt] $($rest)+ + } + }; + (@outer [$($accum:tt)*] { $($body:tt)* }) => { + pin_project_cfg! { + @body #[cfg(all())] [$($accum)*] {} $($body)* + } + }; + // Process a variant with `cfg`. + ( + @body + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + + #[cfg($($pred:tt)*)] + $(#[$($attr:tt)*])* $variant:ident { $($body:tt)* }, + $($rest:tt)* + ) => { + // Create two versions of the enum with `cfg($pred)` and `cfg(not($pred))`. + pin_project_cfg! { + @variant_body + { $($body)* } + {} + #[cfg(all($($pred_accum)* $($pred)*,))] + $outer + { $($accum)* $(#[$($attr)*])* $variant } + $($rest)* + } + pin_project_cfg! { + @body + #[cfg(all($($pred_accum)* not($($pred)*),))] + $outer + { $($accum)* } + $($rest)* + } + }; + // Process a variant without `cfg`. + ( + @body + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + + $(#[$($attr:tt)*])* $variant:ident { $($body:tt)* }, + $($rest:tt)* + ) => { + pin_project_cfg! { + @variant_body + { $($body)* } + {} + #[cfg(all($($pred_accum)*))] + $outer + { $($accum)* $(#[$($attr)*])* $variant } + $($rest)* + } + }; + // Process a variant field with `cfg`. + ( + @variant_body + { + #[cfg($($pred:tt)*)] + $(#[$($attr:tt)*])* $field:ident: $ty:ty, + $($rest:tt)* + } + { $($accum:tt)* } + #[cfg(all($($pred_accum:tt)*))] + $($outer:tt)* + ) => { + pin_project_cfg! { + @variant_body + {$($rest)*} + { $($accum)* $(#[$($attr)*])* $field: $ty, } + #[cfg(all($($pred_accum)* $($pred)*,))] + $($outer)* + } + pin_project_cfg! { + @variant_body + { $($rest)* } + { $($accum)* } + #[cfg(all($($pred_accum)* not($($pred)*),))] + $($outer)* + } + }; + // Process a variant field without `cfg`. + ( + @variant_body + { + $(#[$($attr:tt)*])* $field:ident: $ty:ty, + $($rest:tt)* + } + { $($accum:tt)* } + $($outer:tt)* + ) => { + pin_project_cfg! { + @variant_body + {$($rest)*} + { $($accum)* $(#[$($attr)*])* $field: $ty, } + $($outer)* + } + }; + ( + @variant_body + {} + $body:tt + #[cfg(all($($pred_accum:tt)*))] + $outer:tt + { $($accum:tt)* } + $($rest:tt)* + ) => { + pin_project_cfg! { + @body + #[cfg(all($($pred_accum)*))] + $outer + { $($accum)* $body, } + $($rest)* + } + }; + ( + @body + #[$cfg:meta] + [$($outer:tt)*] + $body:tt + ) => { + #[$cfg] + pin_project_lite::pin_project! { + $($outer)* $body + } + }; +} + +pub(crate) use pin_project_cfg; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/predicate.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/predicate.rs new file mode 100644 index 0000000000000000000000000000000000000000..88c3101c119b9e04fb068f71d791b02fa6d6677b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/predicate.rs @@ -0,0 +1,272 @@ +//! Predicates for disabling compression of responses. +//! +//! Predicates are applied with [`Compression::compress_when`] or +//! [`CompressionLayer::compress_when`]. +//! +//! [`Compression::compress_when`]: super::Compression::compress_when +//! [`CompressionLayer::compress_when`]: super::CompressionLayer::compress_when + +use http::{header, Extensions, HeaderMap, StatusCode, Version}; +use http_body::Body; +use std::{fmt, sync::Arc}; + +/// Predicate used to determine if a response should be compressed or not. +pub trait Predicate: Clone { + /// Should this response be compressed or not? + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body; + + /// Combine two predicates into one. + /// + /// The resulting predicate enables compression if both inner predicates do. + fn and(self, other: Other) -> And + where + Self: Sized, + Other: Predicate, + { + And { + lhs: self, + rhs: other, + } + } +} + +impl Predicate for F +where + F: Fn(StatusCode, Version, &HeaderMap, &Extensions) -> bool + Clone, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + let status = response.status(); + let version = response.version(); + let headers = response.headers(); + let extensions = response.extensions(); + self(status, version, headers, extensions) + } +} + +impl Predicate for Option +where + T: Predicate, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.as_ref() + .map(|inner| inner.should_compress(response)) + .unwrap_or(true) + } +} + +/// Two predicates combined into one. +/// +/// Created with [`Predicate::and`] +#[derive(Debug, Clone, Default, Copy)] +pub struct And { + lhs: Lhs, + rhs: Rhs, +} + +impl Predicate for And +where + Lhs: Predicate, + Rhs: Predicate, +{ + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.lhs.should_compress(response) && self.rhs.should_compress(response) + } +} + +/// The default predicate used by [`Compression`] and [`CompressionLayer`]. +/// +/// This will compress responses unless: +/// +/// - They're gRPC, which has its own protocol specific compression scheme. +/// - It's an image as determined by the `content-type` starting with `image/`. +/// - They're Server-Sent Events (SSE) as determined by the `content-type` being `text/event-stream`. +/// - The response is less than 32 bytes. +/// +/// # Configuring the defaults +/// +/// `DefaultPredicate` doesn't support any configuration. Instead you can build your own predicate +/// by combining types in this module: +/// +/// ```rust +/// use tower_http::compression::predicate::{SizeAbove, NotForContentType, Predicate}; +/// +/// // slightly large min size than the default 32 +/// let predicate = SizeAbove::new(256) +/// // still don't compress gRPC +/// .and(NotForContentType::GRPC) +/// // still don't compress images +/// .and(NotForContentType::IMAGES) +/// // also don't compress JSON +/// .and(NotForContentType::const_new("application/json")); +/// ``` +/// +/// [`Compression`]: super::Compression +/// [`CompressionLayer`]: super::CompressionLayer +#[derive(Clone)] +pub struct DefaultPredicate( + And, NotForContentType>, NotForContentType>, +); + +impl DefaultPredicate { + /// Create a new `DefaultPredicate`. + pub fn new() -> Self { + let inner = SizeAbove::new(SizeAbove::DEFAULT_MIN_SIZE) + .and(NotForContentType::GRPC) + .and(NotForContentType::IMAGES) + .and(NotForContentType::SSE); + Self(inner) + } +} + +impl Default for DefaultPredicate { + fn default() -> Self { + Self::new() + } +} + +impl Predicate for DefaultPredicate { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + self.0.should_compress(response) + } +} + +/// [`Predicate`] that will only allow compression of responses above a certain size. +#[derive(Clone, Copy, Debug)] +pub struct SizeAbove(u16); + +impl SizeAbove { + pub(crate) const DEFAULT_MIN_SIZE: u16 = 32; + + /// Create a new `SizeAbove` predicate that will only compress responses larger than + /// `min_size_bytes`. + /// + /// The response will be compressed if the exact size cannot be determined through either the + /// `content-length` header or [`Body::size_hint`]. + pub const fn new(min_size_bytes: u16) -> Self { + Self(min_size_bytes) + } +} + +impl Default for SizeAbove { + fn default() -> Self { + Self(Self::DEFAULT_MIN_SIZE) + } +} + +impl Predicate for SizeAbove { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + let content_size = response.body().size_hint().exact().or_else(|| { + response + .headers() + .get(header::CONTENT_LENGTH) + .and_then(|h| h.to_str().ok()) + .and_then(|val| val.parse().ok()) + }); + + match content_size { + Some(size) => size >= (self.0 as u64), + _ => true, + } + } +} + +/// Predicate that wont allow responses with a specific `content-type` to be compressed. +#[derive(Clone, Debug)] +pub struct NotForContentType { + content_type: Str, + exception: Option, +} + +impl NotForContentType { + /// Predicate that wont compress gRPC responses. + pub const GRPC: Self = Self::const_new("application/grpc"); + + /// Predicate that wont compress images. + pub const IMAGES: Self = Self { + content_type: Str::Static("image/"), + exception: Some(Str::Static("image/svg+xml")), + }; + + /// Predicate that wont compress Server-Sent Events (SSE) responses. + pub const SSE: Self = Self::const_new("text/event-stream"); + + /// Create a new `NotForContentType`. + pub fn new(content_type: &str) -> Self { + Self { + content_type: Str::Shared(content_type.into()), + exception: None, + } + } + + /// Create a new `NotForContentType` from a static string. + pub const fn const_new(content_type: &'static str) -> Self { + Self { + content_type: Str::Static(content_type), + exception: None, + } + } +} + +impl Predicate for NotForContentType { + fn should_compress(&self, response: &http::Response) -> bool + where + B: Body, + { + if let Some(except) = &self.exception { + if content_type(response) == except.as_str() { + return true; + } + } + + !content_type(response).starts_with(self.content_type.as_str()) + } +} + +#[derive(Clone)] +enum Str { + Static(&'static str), + Shared(Arc), +} + +impl Str { + fn as_str(&self) -> &str { + match self { + Str::Static(s) => s, + Str::Shared(s) => s, + } + } +} + +impl fmt::Debug for Str { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Static(inner) => inner.fmt(f), + Self::Shared(inner) => inner.fmt(f), + } + } +} + +fn content_type(response: &http::Response) -> &str { + response + .headers() + .get(header::CONTENT_TYPE) + .and_then(|h| h.to_str().ok()) + .unwrap_or_default() +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..22dcf73ae5a039f1cebf367a9a5f432ec07dc221 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression/service.rs @@ -0,0 +1,185 @@ +use super::{CompressionBody, CompressionLayer, ResponseFuture}; +use crate::compression::predicate::{DefaultPredicate, Predicate}; +use crate::compression::CompressionLevel; +use crate::{compression_utils::AcceptEncoding, content_encoding::Encoding}; +use http::{Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Compress response bodies of the underlying service. +/// +/// This uses the `Accept-Encoding` header to pick an appropriate encoding and adds the +/// `Content-Encoding` header to responses. +/// +/// See the [module docs](crate::compression) for more details. +#[derive(Clone, Copy)] +pub struct Compression { + pub(crate) inner: S, + pub(crate) accept: AcceptEncoding, + pub(crate) predicate: P, + pub(crate) quality: CompressionLevel, +} + +impl Compression { + /// Creates a new `Compression` wrapping the `service`. + pub fn new(service: S) -> Compression { + Self { + inner: service, + accept: AcceptEncoding::default(), + predicate: DefaultPredicate::default(), + quality: CompressionLevel::default(), + } + } +} + +impl Compression { + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Compression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> CompressionLayer { + CompressionLayer::new() + } + + /// Sets whether to enable the gzip encoding. + #[cfg(feature = "compression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to enable the Deflate encoding. + #[cfg(feature = "compression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to enable the Brotli encoding. + #[cfg(feature = "compression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to enable the Zstd encoding. + #[cfg(feature = "compression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Sets the compression quality. + pub fn quality(mut self, quality: CompressionLevel) -> Self { + self.quality = quality; + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Replace the current compression predicate. + /// + /// Predicates are used to determine whether a response should be compressed or not. + /// + /// The default predicate is [`DefaultPredicate`]. See its documentation for more + /// details on which responses it wont compress. + /// + /// # Changing the compression predicate + /// + /// ``` + /// use tower_http::compression::{ + /// Compression, + /// predicate::{Predicate, NotForContentType, DefaultPredicate}, + /// }; + /// use tower::util::service_fn; + /// + /// // Placeholder service_fn + /// let service = service_fn(|_: ()| async { + /// Ok::<_, std::io::Error>(http::Response::new(())) + /// }); + /// + /// // build our custom compression predicate + /// // its recommended to still include `DefaultPredicate` as part of + /// // custom predicates + /// let predicate = DefaultPredicate::new() + /// // don't compress responses who's `content-type` starts with `application/json` + /// .and(NotForContentType::new("application/json")); + /// + /// let service = Compression::new(service).compress_when(predicate); + /// ``` + /// + /// See [`predicate`](super::predicate) for more utilities for building compression predicates. + /// + /// Responses that are already compressed (ie have a `content-encoding` header) will _never_ be + /// recompressed, regardless what they predicate says. + pub fn compress_when(self, predicate: C) -> Compression + where + C: Predicate, + { + Compression { + inner: self.inner, + accept: self.accept, + predicate, + quality: self.quality, + } + } +} + +impl Service> for Compression +where + S: Service, Response = Response>, + ResBody: Body, + P: Predicate, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let encoding = Encoding::from_headers(req.headers(), self.accept); + + ResponseFuture { + inner: self.inner.call(req), + encoding, + predicate: self.predicate.clone(), + quality: self.quality, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression_utils.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..1fbccb856205c5cc0017b8d08ac921164ecffd5c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/compression_utils.rs @@ -0,0 +1,480 @@ +//! Types used by compression and decompression middleware. + +use crate::{content_encoding::SupportedEncodings, BoxError}; +use bytes::{Buf, Bytes, BytesMut}; +use futures_core::Stream; +use http::HeaderValue; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + io, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::io::AsyncRead; +use tokio_util::io::StreamReader; + +#[derive(Debug, Clone, Copy)] +pub(crate) struct AcceptEncoding { + pub(crate) gzip: bool, + pub(crate) deflate: bool, + pub(crate) br: bool, + pub(crate) zstd: bool, +} + +impl AcceptEncoding { + #[allow(dead_code)] + pub(crate) fn to_header_value(self) -> Option { + let accept = match (self.gzip(), self.deflate(), self.br(), self.zstd()) { + (true, true, true, false) => "gzip,deflate,br", + (true, true, false, false) => "gzip,deflate", + (true, false, true, false) => "gzip,br", + (true, false, false, false) => "gzip", + (false, true, true, false) => "deflate,br", + (false, true, false, false) => "deflate", + (false, false, true, false) => "br", + (true, true, true, true) => "zstd,gzip,deflate,br", + (true, true, false, true) => "zstd,gzip,deflate", + (true, false, true, true) => "zstd,gzip,br", + (true, false, false, true) => "zstd,gzip", + (false, true, true, true) => "zstd,deflate,br", + (false, true, false, true) => "zstd,deflate", + (false, false, true, true) => "zstd,br", + (false, false, false, true) => "zstd", + (false, false, false, false) => return None, + }; + Some(HeaderValue::from_static(accept)) + } + + #[allow(dead_code)] + pub(crate) fn set_gzip(&mut self, enable: bool) { + self.gzip = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_deflate(&mut self, enable: bool) { + self.deflate = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_br(&mut self, enable: bool) { + self.br = enable; + } + + #[allow(dead_code)] + pub(crate) fn set_zstd(&mut self, enable: bool) { + self.zstd = enable; + } +} + +impl SupportedEncodings for AcceptEncoding { + #[allow(dead_code)] + fn gzip(&self) -> bool { + #[cfg(any(feature = "decompression-gzip", feature = "compression-gzip"))] + return self.gzip; + + #[cfg(not(any(feature = "decompression-gzip", feature = "compression-gzip")))] + return false; + } + + #[allow(dead_code)] + fn deflate(&self) -> bool { + #[cfg(any(feature = "decompression-deflate", feature = "compression-deflate"))] + return self.deflate; + + #[cfg(not(any(feature = "decompression-deflate", feature = "compression-deflate")))] + return false; + } + + #[allow(dead_code)] + fn br(&self) -> bool { + #[cfg(any(feature = "decompression-br", feature = "compression-br"))] + return self.br; + + #[cfg(not(any(feature = "decompression-br", feature = "compression-br")))] + return false; + } + + #[allow(dead_code)] + fn zstd(&self) -> bool { + #[cfg(any(feature = "decompression-zstd", feature = "compression-zstd"))] + return self.zstd; + + #[cfg(not(any(feature = "decompression-zstd", feature = "compression-zstd")))] + return false; + } +} + +impl Default for AcceptEncoding { + fn default() -> Self { + AcceptEncoding { + gzip: true, + deflate: true, + br: true, + zstd: true, + } + } +} + +/// A `Body` that has been converted into an `AsyncRead`. +pub(crate) type AsyncReadBody = + StreamReader, ::Error>, ::Data>; + +/// Trait for applying some decorator to an `AsyncRead` +pub(crate) trait DecorateAsyncRead { + type Input: AsyncRead; + type Output: AsyncRead; + + /// Apply the decorator + fn apply(input: Self::Input, quality: CompressionLevel) -> Self::Output; + + /// Get a pinned mutable reference to the original input. + /// + /// This is necessary to implement `Body::poll_trailers`. + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input>; +} + +pin_project! { + /// `Body` that has been decorated by an `AsyncRead` + pub(crate) struct WrapBody { + #[pin] + // rust-analyer thinks this field is private if its `pub(crate)` but works fine when its + // `pub` + pub read: M::Output, + // A buffer to temporarily store the data read from the underlying body. + // Reused as much as possible to optimize allocations. + buf: BytesMut, + read_all_data: bool, + } +} + +impl WrapBody { + const INTERNAL_BUF_CAPACITY: usize = 4096; +} + +impl WrapBody { + #[allow(dead_code)] + pub(crate) fn new(body: B, quality: CompressionLevel) -> Self + where + B: Body, + M: DecorateAsyncRead>, + { + // convert `Body` into a `Stream` + let stream = BodyIntoStream::new(body); + + // an adapter that converts the error type into `io::Error` while storing the actual error + // `StreamReader` requires the error type is `io::Error` + let stream = StreamErrorIntoIoError::<_, B::Error>::new(stream); + + // convert `Stream` into an `AsyncRead` + let read = StreamReader::new(stream); + + // apply decorator to `AsyncRead` yielding another `AsyncRead` + let read = M::apply(read, quality); + + Self { + read, + buf: BytesMut::with_capacity(Self::INTERNAL_BUF_CAPACITY), + read_all_data: false, + } + } +} + +impl Body for WrapBody +where + B: Body, + B::Error: Into, + M: DecorateAsyncRead>, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + if !*this.read_all_data { + if this.buf.capacity() == 0 { + this.buf.reserve(Self::INTERNAL_BUF_CAPACITY); + } + + let result = tokio_util::io::poll_read_buf(this.read.as_mut(), cx, &mut this.buf); + + match ready!(result) { + Ok(0) => { + *this.read_all_data = true; + } + Ok(_) => { + let chunk = this.buf.split().freeze(); + return Poll::Ready(Some(Ok(Frame::data(chunk)))); + } + Err(err) => { + let body_error: Option = M::get_pin_mut(this.read.as_mut()) + .get_pin_mut() + .project() + .error + .take(); + + let read_some_data = M::get_pin_mut(this.read.as_mut()) + .get_pin_mut() + .project() + .read_some_data; + + if let Some(body_error) = body_error { + return Poll::Ready(Some(Err(body_error.into()))); + } else if err.raw_os_error() == Some(SENTINEL_ERROR_CODE) { + // SENTINEL_ERROR_CODE only gets used when storing + // an underlying body error + unreachable!() + } else if *read_some_data { + return Poll::Ready(Some(Err(err.into()))); + } + } + } + } + + // poll any remaining frames, such as trailers + let body = M::get_pin_mut(this.read).get_pin_mut().get_pin_mut(); + match ready!(body.poll_frame(cx)) { + Some(Ok(frame)) if frame.is_trailers() => Poll::Ready(Some(Ok( + frame.map_data(|mut data| data.copy_to_bytes(data.remaining())) + ))), + Some(Ok(frame)) => { + if let Ok(bytes) = frame.into_data() { + if bytes.has_remaining() { + return Poll::Ready(Some(Err( + "there are extra bytes after body has been decompressed".into(), + ))); + } + } + Poll::Ready(None) + } + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + } + } +} + +pin_project! { + pub(crate) struct BodyIntoStream + where + B: Body, + { + #[pin] + body: B, + yielded_all_data: bool, + non_data_frame: Option>, + } +} + +#[allow(dead_code)] +impl BodyIntoStream +where + B: Body, +{ + pub(crate) fn new(body: B) -> Self { + Self { + body, + yielded_all_data: false, + non_data_frame: None, + } + } + + /// Get a reference to the inner body + pub(crate) fn get_ref(&self) -> &B { + &self.body + } + + /// Get a mutable reference to the inner body + pub(crate) fn get_mut(&mut self) -> &mut B { + &mut self.body + } + + /// Get a pinned mutable reference to the inner body + pub(crate) fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + self.project().body + } + + /// Consume `self`, returning the inner body + pub(crate) fn into_inner(self) -> B { + self.body + } +} + +impl Stream for BodyIntoStream +where + B: Body, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + let this = self.as_mut().project(); + + if *this.yielded_all_data { + return Poll::Ready(None); + } + + match std::task::ready!(this.body.poll_frame(cx)) { + Some(Ok(frame)) => match frame.into_data() { + Ok(data) => return Poll::Ready(Some(Ok(data))), + Err(frame) => { + *this.yielded_all_data = true; + *this.non_data_frame = Some(frame); + } + }, + Some(Err(err)) => return Poll::Ready(Some(Err(err))), + None => { + *this.yielded_all_data = true; + } + } + } + } +} + +impl Body for BodyIntoStream +where + B: Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + // First drive the stream impl. This consumes all data frames and buffer at most one + // trailers frame. + if let Some(frame) = std::task::ready!(self.as_mut().poll_next(cx)) { + return Poll::Ready(Some(frame.map(Frame::data))); + } + + let this = self.project(); + + // Yield the trailers frame `poll_next` hit. + if let Some(frame) = this.non_data_frame.take() { + return Poll::Ready(Some(Ok(frame))); + } + + // Yield any remaining frames in the body. There shouldn't be any after the trailers but + // you never know. + this.body.poll_frame(cx) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.body.size_hint() + } +} + +pin_project! { + pub(crate) struct StreamErrorIntoIoError { + #[pin] + inner: S, + error: Option, + read_some_data: bool + } +} + +impl StreamErrorIntoIoError { + pub(crate) fn new(inner: S) -> Self { + Self { + inner, + error: None, + read_some_data: false, + } + } + + /// Get a reference to the inner body + pub(crate) fn get_ref(&self) -> &S { + &self.inner + } + + /// Get a mutable reference to the inner inner + pub(crate) fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + /// Get a pinned mutable reference to the inner inner + pub(crate) fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut S> { + self.project().inner + } + + /// Consume `self`, returning the inner inner + pub(crate) fn into_inner(self) -> S { + self.inner + } +} + +impl Stream for StreamErrorIntoIoError +where + S: Stream>, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + match ready!(this.inner.poll_next(cx)) { + None => Poll::Ready(None), + Some(Ok(value)) => { + *this.read_some_data = true; + Poll::Ready(Some(Ok(value))) + } + Some(Err(err)) => { + *this.error = Some(err); + Poll::Ready(Some(Err(io::Error::from_raw_os_error(SENTINEL_ERROR_CODE)))) + } + } + } +} + +pub(crate) const SENTINEL_ERROR_CODE: i32 = -837459418; + +/// Level of compression data should be compressed with. +#[non_exhaustive] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)] +pub enum CompressionLevel { + /// Fastest quality of compression, usually produces bigger size. + Fastest, + /// Best quality of compression, usually produces the smallest size. + Best, + /// Default quality of compression defined by the selected compression + /// algorithm. + #[default] + Default, + /// Precise quality based on the underlying compression algorithms' + /// qualities. + /// + /// The interpretation of this depends on the algorithm chosen and the + /// specific implementation backing it. + /// + /// Qualities are implicitly clamped to the algorithm's maximum. + Precise(i32), +} + +#[cfg(any( + feature = "compression-br", + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-zstd" +))] +use async_compression::Level as AsyncCompressionLevel; + +#[cfg(any( + feature = "compression-br", + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-zstd" +))] +impl CompressionLevel { + pub(crate) fn into_async_compression(self) -> AsyncCompressionLevel { + match self { + CompressionLevel::Fastest => AsyncCompressionLevel::Fastest, + CompressionLevel::Best => AsyncCompressionLevel::Best, + CompressionLevel::Default => AsyncCompressionLevel::Default, + CompressionLevel::Precise(quality) => AsyncCompressionLevel::Precise(quality), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/content_encoding.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/content_encoding.rs new file mode 100644 index 0000000000000000000000000000000000000000..91c21d45672211f265c62960012997e00216f979 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/content_encoding.rs @@ -0,0 +1,605 @@ +pub(crate) trait SupportedEncodings: Copy { + fn gzip(&self) -> bool; + fn deflate(&self) -> bool; + fn br(&self) -> bool; + fn zstd(&self) -> bool; +} + +// This enum's variants are ordered from least to most preferred. +#[derive(Copy, Clone, Debug, Ord, PartialOrd, PartialEq, Eq)] +pub(crate) enum Encoding { + #[allow(dead_code)] + Identity, + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + Deflate, + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + Gzip, + #[cfg(any(feature = "fs", feature = "compression-br"))] + Brotli, + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + Zstd, +} + +impl Encoding { + #[allow(dead_code)] + fn to_str(self) -> &'static str { + match self { + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + Encoding::Gzip => "gzip", + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + Encoding::Deflate => "deflate", + #[cfg(any(feature = "fs", feature = "compression-br"))] + Encoding::Brotli => "br", + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + Encoding::Zstd => "zstd", + Encoding::Identity => "identity", + } + } + + #[cfg(feature = "fs")] + pub(crate) fn to_file_extension(self) -> Option<&'static std::ffi::OsStr> { + match self { + Encoding::Gzip => Some(std::ffi::OsStr::new(".gz")), + Encoding::Deflate => Some(std::ffi::OsStr::new(".zz")), + Encoding::Brotli => Some(std::ffi::OsStr::new(".br")), + Encoding::Zstd => Some(std::ffi::OsStr::new(".zst")), + Encoding::Identity => None, + } + } + + #[allow(dead_code)] + pub(crate) fn into_header_value(self) -> http::HeaderValue { + http::HeaderValue::from_static(self.to_str()) + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-zstd", + feature = "fs", + ))] + fn parse(s: &str, _supported_encoding: impl SupportedEncodings) -> Option { + #[cfg(any(feature = "fs", feature = "compression-gzip"))] + if (s.eq_ignore_ascii_case("gzip") || s.eq_ignore_ascii_case("x-gzip")) + && _supported_encoding.gzip() + { + return Some(Encoding::Gzip); + } + + #[cfg(any(feature = "fs", feature = "compression-deflate"))] + if s.eq_ignore_ascii_case("deflate") && _supported_encoding.deflate() { + return Some(Encoding::Deflate); + } + + #[cfg(any(feature = "fs", feature = "compression-br"))] + if s.eq_ignore_ascii_case("br") && _supported_encoding.br() { + return Some(Encoding::Brotli); + } + + #[cfg(any(feature = "fs", feature = "compression-zstd"))] + if s.eq_ignore_ascii_case("zstd") && _supported_encoding.zstd() { + return Some(Encoding::Zstd); + } + + if s.eq_ignore_ascii_case("identity") { + return Some(Encoding::Identity); + } + + None + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + ))] + // based on https://github.com/http-rs/accept-encoding + pub(crate) fn from_headers( + headers: &http::HeaderMap, + supported_encoding: impl SupportedEncodings, + ) -> Self { + Encoding::preferred_encoding(encodings(headers, supported_encoding)) + .unwrap_or(Encoding::Identity) + } + + #[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", + ))] + pub(crate) fn preferred_encoding( + accepted_encodings: impl Iterator, + ) -> Option { + accepted_encodings + .filter(|(_, qvalue)| qvalue.0 > 0) + .max_by_key(|&(encoding, qvalue)| (qvalue, encoding)) + .map(|(encoding, _)| encoding) + } +} + +// Allowed q-values are numbers between 0 and 1 with at most 3 digits in the fractional part. They +// are presented here as an unsigned integer between 0 and 1000. +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) struct QValue(u16); + +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +impl QValue { + #[inline] + pub(crate) fn one() -> Self { + Self(1000) + } + + // Parse a q-value as specified in RFC 7231 section 5.3.1. + fn parse(s: &str) -> Option { + let mut c = s.chars(); + // Parse "q=" (case-insensitively). + match c.next() { + Some('q' | 'Q') => (), + _ => return None, + }; + match c.next() { + Some('=') => (), + _ => return None, + }; + + // Parse leading digit. Since valid q-values are between 0.000 and 1.000, only "0" and "1" + // are allowed. + let mut value = match c.next() { + Some('0') => 0, + Some('1') => 1000, + _ => return None, + }; + + // Parse optional decimal point. + match c.next() { + Some('.') => (), + None => return Some(Self(value)), + _ => return None, + }; + + // Parse optional fractional digits. The value of each digit is multiplied by `factor`. + // Since the q-value is represented as an integer between 0 and 1000, `factor` is `100` for + // the first digit, `10` for the next, and `1` for the digit after that. + let mut factor = 100; + loop { + match c.next() { + Some(n @ '0'..='9') => { + // If `factor` is less than `1`, three digits have already been parsed. A + // q-value having more than 3 fractional digits is invalid. + if factor < 1 { + return None; + } + // Add the digit's value multiplied by `factor` to `value`. + value += factor * (n as u16 - '0' as u16); + } + None => { + // No more characters to parse. Check that the value representing the q-value is + // in the valid range. + return if value <= 1000 { + Some(Self(value)) + } else { + None + }; + } + _ => return None, + }; + factor /= 10; + } + } +} + +#[cfg(any( + feature = "compression-gzip", + feature = "compression-br", + feature = "compression-zstd", + feature = "compression-deflate", + feature = "fs", +))] +// based on https://github.com/http-rs/accept-encoding +pub(crate) fn encodings<'a>( + headers: &'a http::HeaderMap, + supported_encoding: impl SupportedEncodings + 'a, +) -> impl Iterator + 'a { + headers + .get_all(http::header::ACCEPT_ENCODING) + .iter() + .filter_map(|hval| hval.to_str().ok()) + .flat_map(|s| s.split(',')) + .filter_map(move |v| { + let mut v = v.splitn(2, ';'); + + let encoding = match Encoding::parse(v.next().unwrap().trim(), supported_encoding) { + Some(encoding) => encoding, + None => return None, // ignore unknown encodings + }; + + let qval = if let Some(qval) = v.next() { + QValue::parse(qval.trim())? + } else { + QValue::one() + }; + + Some((encoding, qval)) + }) +} + +#[cfg(all( + test, + feature = "compression-gzip", + feature = "compression-deflate", + feature = "compression-br", + feature = "compression-zstd", +))] +mod tests { + use super::*; + + #[derive(Copy, Clone, Default)] + struct SupportedEncodingsAll; + + impl SupportedEncodings for SupportedEncodingsAll { + fn gzip(&self) -> bool { + true + } + + fn deflate(&self) -> bool { + true + } + + fn br(&self) -> bool { + true + } + + fn zstd(&self) -> bool { + true + } + } + + #[test] + fn no_accept_encoding_header() { + let encoding = Encoding::from_headers(&http::HeaderMap::new(), SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } + + #[test] + fn accept_encoding_header_single_encoding() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_gzip_x_gzip() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,x-gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_x_gzip_deflate() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("deflate,x-gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip,deflate,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate,br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn two_accept_encoding_headers_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn two_accept_encoding_headers_three_encodings_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn three_accept_encoding_headers_with_one_qvalue() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("deflate"), + ); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("br"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_two_encodings_with_two_qvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br;q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.8,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.995,br;q=0.999"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_three_encodings_with_three_qvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,deflate;q=0.6,br;q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.8,deflate;q=0.6,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.6,deflate;q=0.8,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Deflate, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.995,deflate;q=0.997,br;q=0.999"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_invalid_encdoing() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("invalid,gzip"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + } + + #[test] + fn accept_encoding_header_with_qvalue_zero() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0."), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0,br;q=0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_uppercase_letters() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gZiP"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Gzip, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5,br;Q=0.8"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_allowed_spaces() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static(" gzip\t; q=0.5 ,\tbr ;\tq=0.8\t"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Brotli, encoding); + } + + #[test] + fn accept_encoding_header_with_invalid_spaces() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q =0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q= 0.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } + + #[test] + fn accept_encoding_header_with_invalid_quvalues() { + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=-0.1"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=00.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=0.5000"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=.5"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=1.01"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + + let mut headers = http::HeaderMap::new(); + headers.append( + http::header::ACCEPT_ENCODING, + http::HeaderValue::from_static("gzip;q=1.001"), + ); + let encoding = Encoding::from_headers(&headers, SupportedEncodingsAll); + assert_eq!(Encoding::Identity, encoding); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_credentials.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_credentials.rs new file mode 100644 index 0000000000000000000000000000000000000000..de53ffed61c437344ff723a8cab92582da80a09a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_credentials.rs @@ -0,0 +1,96 @@ +use std::{fmt, sync::Arc}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Credentials`][mdn] header. +/// +/// See [`CorsLayer::allow_credentials`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials +/// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials +#[derive(Clone, Default)] +#[must_use] +pub struct AllowCredentials(AllowCredentialsInner); + +impl AllowCredentials { + /// Allow credentials for all requests + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials + pub fn yes() -> Self { + Self(AllowCredentialsInner::Yes) + } + + /// Allow credentials for some requests, based on a given predicate + /// + /// The first argument to the predicate is the request origin. + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [`CorsLayer::allow_credentials`]: super::CorsLayer::allow_credentials + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(AllowCredentialsInner::Predicate(Arc::new(f))) + } + + pub(super) fn is_true(&self) -> bool { + matches!(&self.0, AllowCredentialsInner::Yes) + } + + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + #[allow(clippy::declare_interior_mutable_const)] + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + let allow_creds = match &self.0 { + AllowCredentialsInner::Yes => true, + AllowCredentialsInner::No => false, + AllowCredentialsInner::Predicate(c) => c(origin?, parts), + }; + + allow_creds.then_some((header::ACCESS_CONTROL_ALLOW_CREDENTIALS, TRUE)) + } +} + +impl From for AllowCredentials { + fn from(v: bool) -> Self { + match v { + true => Self(AllowCredentialsInner::Yes), + false => Self(AllowCredentialsInner::No), + } + } +} + +impl fmt::Debug for AllowCredentials { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + AllowCredentialsInner::Yes => f.debug_tuple("Yes").finish(), + AllowCredentialsInner::No => f.debug_tuple("No").finish(), + AllowCredentialsInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + } + } +} + +#[derive(Clone)] +enum AllowCredentialsInner { + Yes, + No, + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), +} + +impl Default for AllowCredentialsInner { + fn default() -> Self { + Self::No + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_headers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_headers.rs new file mode 100644 index 0000000000000000000000000000000000000000..8e49e78055550812b448035e4139de8047c971ac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_headers.rs @@ -0,0 +1,111 @@ +use std::fmt; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Headers`][mdn] header. +/// +/// See [`CorsLayer::allow_headers`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers +/// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers +#[derive(Clone, Default)] +#[must_use] +pub struct AllowHeaders(AllowHeadersInner); + +impl AllowHeaders { + /// Allow any headers by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + pub fn any() -> Self { + Self(AllowHeadersInner::Const(Some(WILDCARD))) + } + + /// Set multiple allowed headers + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(AllowHeadersInner::Const(separated_by_commas( + headers.into_iter().map(Into::into), + ))) + } + + /// Allow any headers, by mirroring the preflight [`Access-Control-Request-Headers`][mdn] + /// header. + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [`CorsLayer::allow_headers`]: super::CorsLayer::allow_headers + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Headers + pub fn mirror_request() -> Self { + Self(AllowHeadersInner::MirrorRequest) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, AllowHeadersInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let allow_headers = match &self.0 { + AllowHeadersInner::Const(v) => v.clone()?, + AllowHeadersInner::MirrorRequest => parts + .headers + .get(header::ACCESS_CONTROL_REQUEST_HEADERS)? + .clone(), + }; + + Some((header::ACCESS_CONTROL_ALLOW_HEADERS, allow_headers)) + } +} + +impl fmt::Debug for AllowHeaders { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + AllowHeadersInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + AllowHeadersInner::MirrorRequest => f.debug_tuple("MirrorRequest").finish(), + } + } +} + +impl From for AllowHeaders { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From<[HeaderName; N]> for AllowHeaders { + fn from(arr: [HeaderName; N]) -> Self { + Self::list(arr) + } +} + +impl From> for AllowHeaders { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum AllowHeadersInner { + Const(Option), + MirrorRequest, +} + +impl Default for AllowHeadersInner { + fn default() -> Self { + Self::Const(None) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_methods.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_methods.rs new file mode 100644 index 0000000000000000000000000000000000000000..a2aeb64201e65d9c63b2b41ee5e97307a085a7ea --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_methods.rs @@ -0,0 +1,131 @@ +use std::fmt; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, + Method, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Methods`][mdn] header. +/// +/// See [`CorsLayer::allow_methods`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods +/// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods +#[derive(Clone, Default)] +#[must_use] +pub struct AllowMethods(AllowMethodsInner); + +impl AllowMethods { + /// Allow any method by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn any() -> Self { + Self(AllowMethodsInner::Const(Some(WILDCARD))) + } + + /// Set a single allowed method + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn exact(method: Method) -> Self { + Self(AllowMethodsInner::Const(Some( + HeaderValue::from_str(method.as_str()).unwrap(), + ))) + } + + /// Set multiple allowed methods + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + pub fn list(methods: I) -> Self + where + I: IntoIterator, + { + Self(AllowMethodsInner::Const(separated_by_commas( + methods + .into_iter() + .map(|m| HeaderValue::from_str(m.as_str()).unwrap()), + ))) + } + + /// Allow any method, by mirroring the preflight [`Access-Control-Request-Method`][mdn] + /// header. + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [`CorsLayer::allow_methods`]: super::CorsLayer::allow_methods + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Request-Method + pub fn mirror_request() -> Self { + Self(AllowMethodsInner::MirrorRequest) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, AllowMethodsInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let allow_methods = match &self.0 { + AllowMethodsInner::Const(v) => v.clone()?, + AllowMethodsInner::MirrorRequest => parts + .headers + .get(header::ACCESS_CONTROL_REQUEST_METHOD)? + .clone(), + }; + + Some((header::ACCESS_CONTROL_ALLOW_METHODS, allow_methods)) + } +} + +impl fmt::Debug for AllowMethods { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + AllowMethodsInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + AllowMethodsInner::MirrorRequest => f.debug_tuple("MirrorRequest").finish(), + } + } +} + +impl From for AllowMethods { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From for AllowMethods { + fn from(method: Method) -> Self { + Self::exact(method) + } +} + +impl From<[Method; N]> for AllowMethods { + fn from(arr: [Method; N]) -> Self { + Self::list(arr) + } +} + +impl From> for AllowMethods { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum AllowMethodsInner { + Const(Option), + MirrorRequest, +} + +impl Default for AllowMethodsInner { + fn default() -> Self { + Self::Const(None) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_origin.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_origin.rs new file mode 100644 index 0000000000000000000000000000000000000000..646220fab45f43d4fcd5c68320c2546f93e11df5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_origin.rs @@ -0,0 +1,240 @@ +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use super::{Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Origin`][mdn] header. +/// +/// See [`CorsLayer::allow_origin`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin +/// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin +#[derive(Clone, Default)] +#[must_use] +pub struct AllowOrigin(OriginInner); + +impl AllowOrigin { + /// Allow any origin by sending a wildcard (`*`) + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn any() -> Self { + Self(OriginInner::Const(WILDCARD)) + } + + /// Set a single allowed origin + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn exact(origin: HeaderValue) -> Self { + Self(OriginInner::Const(origin)) + } + + /// Set multiple allowed origins + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// # Panics + /// + /// If the iterator contains a wildcard (`*`). + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + #[allow(clippy::borrow_interior_mutable_const)] + pub fn list(origins: I) -> Self + where + I: IntoIterator, + { + let origins = origins.into_iter().collect::>(); + if origins.contains(&WILDCARD) { + panic!( + "Wildcard origin (`*`) cannot be passed to `AllowOrigin::list`. \ + Use `AllowOrigin::any()` instead" + ); + } + + Self(OriginInner::List(origins)) + } + + /// Set the allowed origins from a predicate + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(OriginInner::Predicate(Arc::new(f))) + } + + /// Set the allowed origins from an async predicate + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn async_predicate(f: F) -> Self + where + F: FnOnce(HeaderValue, &RequestParts) -> Fut + Send + Sync + 'static + Clone, + Fut: Future + Send + 'static, + { + Self(OriginInner::AsyncPredicate(Arc::new(move |v, p| { + Box::pin((f.clone())(v, p)) + }))) + } + + /// Allow any origin, by mirroring the request origin + /// + /// This is equivalent to + /// [`AllowOrigin::predicate(|_, _| true)`][Self::predicate]. + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [`CorsLayer::allow_origin`]: super::CorsLayer::allow_origin + pub fn mirror_request() -> Self { + Self::predicate(|_, _| true) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, OriginInner::Const(v) if v == WILDCARD) + } + + pub(super) fn to_future( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> AllowOriginFuture { + let name = header::ACCESS_CONTROL_ALLOW_ORIGIN; + + match &self.0 { + OriginInner::Const(v) => AllowOriginFuture::ok(Some((name, v.clone()))), + OriginInner::List(l) => { + AllowOriginFuture::ok(origin.filter(|o| l.contains(o)).map(|o| (name, o.clone()))) + } + OriginInner::Predicate(c) => AllowOriginFuture::ok( + origin + .filter(|origin| c(origin, parts)) + .map(|o| (name, o.clone())), + ), + OriginInner::AsyncPredicate(f) => { + if let Some(origin) = origin.cloned() { + let fut = f(origin.clone(), parts); + AllowOriginFuture::fut(async move { fut.await.then_some((name, origin)) }) + } else { + AllowOriginFuture::ok(None) + } + } + } + } +} + +pin_project! { + #[project = AllowOriginFutureProj] + pub(super) enum AllowOriginFuture { + Ok{ + res: Option<(HeaderName, HeaderValue)> + }, + Future{ + #[pin] + future: Pin> + Send + 'static>> + }, + } +} + +impl AllowOriginFuture { + fn ok(res: Option<(HeaderName, HeaderValue)>) -> Self { + Self::Ok { res } + } + + fn fut> + Send + 'static>( + future: F, + ) -> Self { + Self::Future { + future: Box::pin(future), + } + } +} + +impl Future for AllowOriginFuture { + type Output = Option<(HeaderName, HeaderValue)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project() { + AllowOriginFutureProj::Ok { res } => Poll::Ready(res.take()), + AllowOriginFutureProj::Future { future } => future.poll(cx), + } + } +} + +impl fmt::Debug for AllowOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + OriginInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + OriginInner::List(inner) => f.debug_tuple("List").field(inner).finish(), + OriginInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + OriginInner::AsyncPredicate(_) => f.debug_tuple("AsyncPredicate").finish(), + } + } +} + +impl From for AllowOrigin { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From for AllowOrigin { + fn from(val: HeaderValue) -> Self { + Self::exact(val) + } +} + +impl From<[HeaderValue; N]> for AllowOrigin { + fn from(arr: [HeaderValue; N]) -> Self { + Self::list(arr) + } +} + +impl From> for AllowOrigin { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum OriginInner { + Const(HeaderValue), + List(Vec), + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), + AsyncPredicate( + Arc< + dyn for<'a> Fn( + HeaderValue, + &'a RequestParts, + ) -> Pin + Send + 'static>> + + Send + + Sync + + 'static, + >, + ), +} + +impl Default for OriginInner { + fn default() -> Self { + Self::List(Vec::new()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_private_network.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_private_network.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f97dc11fbbd6a6bb1d717937e1d4b618011a6b0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/allow_private_network.rs @@ -0,0 +1,205 @@ +use std::{fmt, sync::Arc}; + +use http::{ + header::{HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Allow-Private-Network`][wicg] header. +/// +/// See [`CorsLayer::allow_private_network`] for more details. +/// +/// [wicg]: https://wicg.github.io/private-network-access/ +/// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network +#[derive(Clone, Default)] +#[must_use] +pub struct AllowPrivateNetwork(AllowPrivateNetworkInner); + +impl AllowPrivateNetwork { + /// Allow requests via a more private network than the one used to access the origin + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network + pub fn yes() -> Self { + Self(AllowPrivateNetworkInner::Yes) + } + + /// Allow requests via private network for some requests, based on a given predicate + /// + /// The first argument to the predicate is the request origin. + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [`CorsLayer::allow_private_network`]: super::CorsLayer::allow_private_network + pub fn predicate(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> bool + Send + Sync + 'static, + { + Self(AllowPrivateNetworkInner::Predicate(Arc::new(f))) + } + + #[allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const + )] + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + #[allow(clippy::declare_interior_mutable_const)] + const REQUEST_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-request-private-network"); + + #[allow(clippy::declare_interior_mutable_const)] + const ALLOW_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-allow-private-network"); + + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + // Cheapest fallback: allow_private_network hasn't been set + if let AllowPrivateNetworkInner::No = &self.0 { + return None; + } + + // Access-Control-Allow-Private-Network is only relevant if the request + // has the Access-Control-Request-Private-Network header set, else skip + if parts.headers.get(REQUEST_PRIVATE_NETWORK) != Some(&TRUE) { + return None; + } + + let allow_private_network = match &self.0 { + AllowPrivateNetworkInner::Yes => true, + AllowPrivateNetworkInner::No => false, // unreachable, but not harmful + AllowPrivateNetworkInner::Predicate(c) => c(origin?, parts), + }; + + allow_private_network.then_some((ALLOW_PRIVATE_NETWORK, TRUE)) + } +} + +impl From for AllowPrivateNetwork { + fn from(v: bool) -> Self { + match v { + true => Self(AllowPrivateNetworkInner::Yes), + false => Self(AllowPrivateNetworkInner::No), + } + } +} + +impl fmt::Debug for AllowPrivateNetwork { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + AllowPrivateNetworkInner::Yes => f.debug_tuple("Yes").finish(), + AllowPrivateNetworkInner::No => f.debug_tuple("No").finish(), + AllowPrivateNetworkInner::Predicate(_) => f.debug_tuple("Predicate").finish(), + } + } +} + +#[derive(Clone)] +enum AllowPrivateNetworkInner { + Yes, + No, + Predicate( + Arc Fn(&'a HeaderValue, &'a RequestParts) -> bool + Send + Sync + 'static>, + ), +} + +impl Default for AllowPrivateNetworkInner { + fn default() -> Self { + Self::No + } +} + +#[cfg(test)] +mod tests { + #![allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const + )] + + use super::AllowPrivateNetwork; + use crate::cors::CorsLayer; + + use crate::test_helpers::Body; + use http::{header::ORIGIN, request::Parts, HeaderName, HeaderValue, Request, Response}; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + use tower_service::Service; + + const REQUEST_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-request-private-network"); + + const ALLOW_PRIVATE_NETWORK: HeaderName = + HeaderName::from_static("access-control-allow-private-network"); + + const TRUE: HeaderValue = HeaderValue::from_static("true"); + + #[tokio::test] + async fn cors_private_network_header_is_added_correctly() { + let mut service = ServiceBuilder::new() + .layer(CorsLayer::new().allow_private_network(true)) + .service_fn(echo); + + let req = Request::builder() + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .body(Body::empty()) + .unwrap(); + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert_eq!(res.headers().get(ALLOW_PRIVATE_NETWORK).unwrap(), TRUE); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + } + + #[tokio::test] + async fn cors_private_network_header_is_added_correctly_with_predicate() { + let allow_private_network = + AllowPrivateNetwork::predicate(|origin: &HeaderValue, parts: &Parts| { + parts.uri.path() == "/allow-private" && origin == "localhost" + }); + let mut service = ServiceBuilder::new() + .layer(CorsLayer::new().allow_private_network(allow_private_network)) + .service_fn(echo); + + let req = Request::builder() + .header(ORIGIN, "localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/allow-private") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + assert_eq!(res.headers().get(ALLOW_PRIVATE_NETWORK).unwrap(), TRUE); + + let req = Request::builder() + .header(ORIGIN, "localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/other") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + + let req = Request::builder() + .header(ORIGIN, "not-localhost") + .header(REQUEST_PRIVATE_NETWORK, TRUE) + .uri("/allow-private") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(res.headers().get(ALLOW_PRIVATE_NETWORK).is_none()); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/expose_headers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/expose_headers.rs new file mode 100644 index 0000000000000000000000000000000000000000..9392746857d1f7c8a88b28c2dc36aaf20f88fb16 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/expose_headers.rs @@ -0,0 +1,93 @@ +use std::fmt; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +use super::{separated_by_commas, Any, WILDCARD}; + +/// Holds configuration for how to set the [`Access-Control-Expose-Headers`][mdn] header. +/// +/// See [`CorsLayer::expose_headers`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers +/// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers +#[derive(Clone, Default)] +#[must_use] +pub struct ExposeHeaders(ExposeHeadersInner); + +impl ExposeHeaders { + /// Expose any / all headers by sending a wildcard (`*`) + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers + pub fn any() -> Self { + Self(ExposeHeadersInner::Const(Some(WILDCARD))) + } + + /// Set multiple exposed header names + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [`CorsLayer::expose_headers`]: super::CorsLayer::expose_headers + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(ExposeHeadersInner::Const(separated_by_commas( + headers.into_iter().map(Into::into), + ))) + } + + #[allow(clippy::borrow_interior_mutable_const)] + pub(super) fn is_wildcard(&self) -> bool { + matches!(&self.0, ExposeHeadersInner::Const(Some(v)) if v == WILDCARD) + } + + pub(super) fn to_header(&self, _parts: &RequestParts) -> Option<(HeaderName, HeaderValue)> { + let expose_headers = match &self.0 { + ExposeHeadersInner::Const(v) => v.clone()?, + }; + + Some((header::ACCESS_CONTROL_EXPOSE_HEADERS, expose_headers)) + } +} + +impl fmt::Debug for ExposeHeaders { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + ExposeHeadersInner::Const(inner) => f.debug_tuple("Const").field(inner).finish(), + } + } +} + +impl From for ExposeHeaders { + fn from(_: Any) -> Self { + Self::any() + } +} + +impl From<[HeaderName; N]> for ExposeHeaders { + fn from(arr: [HeaderName; N]) -> Self { + Self::list(arr) + } +} + +impl From> for ExposeHeaders { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} + +#[derive(Clone)] +enum ExposeHeadersInner { + Const(Option), +} + +impl Default for ExposeHeadersInner { + fn default() -> Self { + ExposeHeadersInner::Const(None) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/max_age.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/max_age.rs new file mode 100644 index 0000000000000000000000000000000000000000..981899263333ca4bead7a55dbac74dae6022b548 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/max_age.rs @@ -0,0 +1,74 @@ +use std::{fmt, sync::Arc, time::Duration}; + +use http::{ + header::{self, HeaderName, HeaderValue}, + request::Parts as RequestParts, +}; + +/// Holds configuration for how to set the [`Access-Control-Max-Age`][mdn] header. +/// +/// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age +#[derive(Clone, Default)] +#[must_use] +pub struct MaxAge(MaxAgeInner); + +impl MaxAge { + /// Set a static max-age value + /// + /// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. + pub fn exact(max_age: Duration) -> Self { + Self(MaxAgeInner::Exact(Some(max_age.as_secs().into()))) + } + + /// Set the max-age based on the preflight request parts + /// + /// See [`CorsLayer::max_age`][super::CorsLayer::max_age] for more details. + pub fn dynamic(f: F) -> Self + where + F: Fn(&HeaderValue, &RequestParts) -> Duration + Send + Sync + 'static, + { + Self(MaxAgeInner::Fn(Arc::new(f))) + } + + pub(super) fn to_header( + &self, + origin: Option<&HeaderValue>, + parts: &RequestParts, + ) -> Option<(HeaderName, HeaderValue)> { + let max_age = match &self.0 { + MaxAgeInner::Exact(v) => v.clone()?, + MaxAgeInner::Fn(c) => c(origin?, parts).as_secs().into(), + }; + + Some((header::ACCESS_CONTROL_MAX_AGE, max_age)) + } +} + +impl fmt::Debug for MaxAge { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + MaxAgeInner::Exact(inner) => f.debug_tuple("Exact").field(inner).finish(), + MaxAgeInner::Fn(_) => f.debug_tuple("Fn").finish(), + } + } +} + +impl From for MaxAge { + fn from(max_age: Duration) -> Self { + Self::exact(max_age) + } +} + +#[derive(Clone)] +enum MaxAgeInner { + Exact(Option), + Fn(Arc Fn(&'a HeaderValue, &'a RequestParts) -> Duration + Send + Sync + 'static>), +} + +impl Default for MaxAgeInner { + fn default() -> Self { + Self::Exact(None) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d40994d0b292b46895dba71b12b9a0d4c6145a4f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/mod.rs @@ -0,0 +1,820 @@ +//! Middleware which adds headers for [CORS][mdn]. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response, Method, header}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{ServiceBuilder, ServiceExt, Service}; +//! use tower_http::cors::{Any, CorsLayer}; +//! use std::convert::Infallible; +//! +//! async fn handle(request: Request>) -> Result>, Infallible> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let cors = CorsLayer::new() +//! // allow `GET` and `POST` when accessing the resource +//! .allow_methods([Method::GET, Method::POST]) +//! // allow requests from any origin +//! .allow_origin(Any); +//! +//! let mut service = ServiceBuilder::new() +//! .layer(cors) +//! .service_fn(handle); +//! +//! let request = Request::builder() +//! .header(header::ORIGIN, "https://example.com") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!( +//! response.headers().get(header::ACCESS_CONTROL_ALLOW_ORIGIN).unwrap(), +//! "*", +//! ); +//! # Ok(()) +//! # } +//! ``` +//! +//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS + +#![allow(clippy::enum_variant_names)] + +use allow_origin::AllowOriginFuture; +use bytes::{BufMut, BytesMut}; +use http::{ + header::{self, HeaderName}, + HeaderMap, HeaderValue, Method, Request, Response, +}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + mem, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +mod allow_credentials; +mod allow_headers; +mod allow_methods; +mod allow_origin; +mod allow_private_network; +mod expose_headers; +mod max_age; +mod vary; + +#[cfg(test)] +mod tests; + +pub use self::{ + allow_credentials::AllowCredentials, allow_headers::AllowHeaders, allow_methods::AllowMethods, + allow_origin::AllowOrigin, allow_private_network::AllowPrivateNetwork, + expose_headers::ExposeHeaders, max_age::MaxAge, vary::Vary, +}; + +/// Layer that applies the [`Cors`] middleware which adds headers for [CORS][mdn]. +/// +/// See the [module docs](crate::cors) for an example. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS +#[derive(Debug, Clone)] +#[must_use] +pub struct CorsLayer { + allow_credentials: AllowCredentials, + allow_headers: AllowHeaders, + allow_methods: AllowMethods, + allow_origin: AllowOrigin, + allow_private_network: AllowPrivateNetwork, + expose_headers: ExposeHeaders, + max_age: MaxAge, + vary: Vary, +} + +#[allow(clippy::declare_interior_mutable_const)] +const WILDCARD: HeaderValue = HeaderValue::from_static("*"); + +impl CorsLayer { + /// Create a new `CorsLayer`. + /// + /// No headers are sent by default. Use the builder methods to customize + /// the behavior. + /// + /// You need to set at least an allowed origin for browsers to make + /// successful cross-origin requests to your service. + pub fn new() -> Self { + Self { + allow_credentials: Default::default(), + allow_headers: Default::default(), + allow_methods: Default::default(), + allow_origin: Default::default(), + allow_private_network: Default::default(), + expose_headers: Default::default(), + max_age: Default::default(), + vary: Default::default(), + } + } + + /// A permissive configuration: + /// + /// - All request headers allowed. + /// - All methods allowed. + /// - All origins allowed. + /// - All headers exposed. + pub fn permissive() -> Self { + Self::new() + .allow_headers(Any) + .allow_methods(Any) + .allow_origin(Any) + .expose_headers(Any) + } + + /// A very permissive configuration: + /// + /// - **Credentials allowed.** + /// - The method received in `Access-Control-Request-Method` is sent back + /// as an allowed method. + /// - The origin of the preflight request is sent back as an allowed origin. + /// - The header names received in `Access-Control-Request-Headers` are sent + /// back as allowed headers. + /// - No headers are currently exposed, but this may change in the future. + pub fn very_permissive() -> Self { + Self::new() + .allow_credentials(true) + .allow_headers(AllowHeaders::mirror_request()) + .allow_methods(AllowMethods::mirror_request()) + .allow_origin(AllowOrigin::mirror_request()) + } + + /// Set the [`Access-Control-Allow-Credentials`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_credentials(true); + /// ``` + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials + pub fn allow_credentials(mut self, allow_credentials: T) -> Self + where + T: Into, + { + self.allow_credentials = allow_credentials.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Headers`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::header::{AUTHORIZATION, ACCEPT}; + /// + /// let layer = CorsLayer::new().allow_headers([AUTHORIZATION, ACCEPT]); + /// ``` + /// + /// All headers can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_headers(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// Also note that `Access-Control-Allow-Headers` is required for requests that have + /// `Access-Control-Request-Headers`. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers + pub fn allow_headers(mut self, headers: T) -> Self + where + T: Into, + { + self.allow_headers = headers.into(); + self + } + + /// Set the value of the [`Access-Control-Max-Age`][mdn] header. + /// + /// ``` + /// use std::time::Duration; + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().max_age(Duration::from_secs(60) * 10); + /// ``` + /// + /// By default the header will not be set which disables caching and will + /// require a preflight call for all requests. + /// + /// Note that each browser has a maximum internal value that takes + /// precedence when the Access-Control-Max-Age is greater. For more details + /// see [mdn]. + /// + /// If you need more flexibility, you can use supply a function which can + /// dynamically decide the max-age based on the origin and other parts of + /// each preflight request: + /// + /// ``` + /// # struct MyServerConfig { cors_max_age: Duration } + /// use std::time::Duration; + /// + /// use http::{request::Parts as RequestParts, HeaderValue}; + /// use tower_http::cors::{CorsLayer, MaxAge}; + /// + /// let layer = CorsLayer::new().max_age(MaxAge::dynamic( + /// |_origin: &HeaderValue, parts: &RequestParts| -> Duration { + /// // Let's say you want to be able to reload your config at + /// // runtime and have another middleware that always inserts + /// // the current config into the request extensions + /// let config = parts.extensions.get::().unwrap(); + /// config.cors_max_age + /// }, + /// )); + /// ``` + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age + pub fn max_age(mut self, max_age: T) -> Self + where + T: Into, + { + self.max_age = max_age.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Methods`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::Method; + /// + /// let layer = CorsLayer::new().allow_methods([Method::GET, Method::POST]); + /// ``` + /// + /// All methods can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_methods(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods + pub fn allow_methods(mut self, methods: T) -> Self + where + T: Into, + { + self.allow_methods = methods.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Origin`][mdn] header. + /// + /// ``` + /// use http::HeaderValue; + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_origin( + /// "http://example.com".parse::().unwrap(), + /// ); + /// ``` + /// + /// Multiple origins can be allowed with + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let origins = [ + /// "http://example.com".parse().unwrap(), + /// "http://api.example.com".parse().unwrap(), + /// ]; + /// + /// let layer = CorsLayer::new().allow_origin(origins); + /// ``` + /// + /// All origins can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().allow_origin(Any); + /// ``` + /// + /// You can also use a closure + /// + /// ``` + /// use tower_http::cors::{CorsLayer, AllowOrigin}; + /// use http::{request::Parts as RequestParts, HeaderValue}; + /// + /// let layer = CorsLayer::new().allow_origin(AllowOrigin::predicate( + /// |origin: &HeaderValue, _request_parts: &RequestParts| { + /// origin.as_bytes().ends_with(b".rust-lang.org") + /// }, + /// )); + /// ``` + /// + /// You can also use an async closure: + /// + /// ``` + /// # #[derive(Clone)] + /// # struct Client; + /// # fn get_api_client() -> Client { + /// # Client + /// # } + /// # impl Client { + /// # async fn fetch_allowed_origins(&self) -> Vec { + /// # vec![HeaderValue::from_static("http://example.com")] + /// # } + /// # async fn fetch_allowed_origins_for_path(&self, _path: String) -> Vec { + /// # vec![HeaderValue::from_static("http://example.com")] + /// # } + /// # } + /// use tower_http::cors::{CorsLayer, AllowOrigin}; + /// use http::{request::Parts as RequestParts, HeaderValue}; + /// + /// let client = get_api_client(); + /// + /// let layer = CorsLayer::new().allow_origin(AllowOrigin::async_predicate( + /// |origin: HeaderValue, _request_parts: &RequestParts| async move { + /// // fetch list of origins that are allowed + /// let origins = client.fetch_allowed_origins().await; + /// origins.contains(&origin) + /// }, + /// )); + /// + /// let client = get_api_client(); + /// + /// // if using &RequestParts, make sure all the values are owned + /// // before passing into the future + /// let layer = CorsLayer::new().allow_origin(AllowOrigin::async_predicate( + /// |origin: HeaderValue, parts: &RequestParts| { + /// let path = parts.uri.path().to_owned(); + /// + /// async move { + /// // fetch list of origins that are allowed for this path + /// let origins = client.fetch_allowed_origins_for_path(path).await; + /// origins.contains(&origin) + /// } + /// }, + /// )); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin + pub fn allow_origin(mut self, origin: T) -> Self + where + T: Into, + { + self.allow_origin = origin.into(); + self + } + + /// Set the value of the [`Access-Control-Expose-Headers`][mdn] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// use http::header::CONTENT_ENCODING; + /// + /// let layer = CorsLayer::new().expose_headers([CONTENT_ENCODING]); + /// ``` + /// + /// All headers can be allowed with + /// + /// ``` + /// use tower_http::cors::{Any, CorsLayer}; + /// + /// let layer = CorsLayer::new().expose_headers(Any); + /// ``` + /// + /// Note that multiple calls to this method will override any previous + /// calls. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers + pub fn expose_headers(mut self, headers: T) -> Self + where + T: Into, + { + self.expose_headers = headers.into(); + self + } + + /// Set the value of the [`Access-Control-Allow-Private-Network`][wicg] header. + /// + /// ``` + /// use tower_http::cors::CorsLayer; + /// + /// let layer = CorsLayer::new().allow_private_network(true); + /// ``` + /// + /// [wicg]: https://wicg.github.io/private-network-access/ + pub fn allow_private_network(mut self, allow_private_network: T) -> Self + where + T: Into, + { + self.allow_private_network = allow_private_network.into(); + self + } + + /// Set the value(s) of the [`Vary`][mdn] header. + /// + /// In contrast to the other headers, this one has a non-empty default of + /// [`preflight_request_headers()`]. + /// + /// You only need to set this is you want to remove some of these defaults, + /// or if you use a closure for one of the other headers and want to add a + /// vary header accordingly. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary + pub fn vary(mut self, headers: T) -> Self + where + T: Into, + { + self.vary = headers.into(); + self + } +} + +/// Represents a wildcard value (`*`) used with some CORS headers such as +/// [`CorsLayer::allow_methods`]. +#[derive(Debug, Clone, Copy)] +#[must_use] +pub struct Any; + +/// Represents a wildcard value (`*`) used with some CORS headers such as +/// [`CorsLayer::allow_methods`]. +#[deprecated = "Use Any as a unit struct literal instead"] +pub fn any() -> Any { + Any +} + +fn separated_by_commas(mut iter: I) -> Option +where + I: Iterator, +{ + match iter.next() { + Some(fst) => { + let mut result = BytesMut::from(fst.as_bytes()); + for val in iter { + result.reserve(val.len() + 1); + result.put_u8(b','); + result.extend_from_slice(val.as_bytes()); + } + + Some(HeaderValue::from_maybe_shared(result.freeze()).unwrap()) + } + None => None, + } +} + +impl Default for CorsLayer { + fn default() -> Self { + Self::new() + } +} + +impl Layer for CorsLayer { + type Service = Cors; + + fn layer(&self, inner: S) -> Self::Service { + ensure_usable_cors_rules(self); + + Cors { + inner, + layer: self.clone(), + } + } +} + +/// Middleware which adds headers for [CORS][mdn]. +/// +/// See the [module docs](crate::cors) for an example. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS +#[derive(Debug, Clone)] +#[must_use] +pub struct Cors { + inner: S, + layer: CorsLayer, +} + +impl Cors { + /// Create a new `Cors`. + /// + /// See [`CorsLayer::new`] for more details. + pub fn new(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::new(), + } + } + + /// A permissive configuration. + /// + /// See [`CorsLayer::permissive`] for more details. + pub fn permissive(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::permissive(), + } + } + + /// A very permissive configuration. + /// + /// See [`CorsLayer::very_permissive`] for more details. + pub fn very_permissive(inner: S) -> Self { + Self { + inner, + layer: CorsLayer::very_permissive(), + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a [`Cors`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> CorsLayer { + CorsLayer::new() + } + + /// Set the [`Access-Control-Allow-Credentials`][mdn] header. + /// + /// See [`CorsLayer::allow_credentials`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials + pub fn allow_credentials(self, allow_credentials: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_credentials(allow_credentials)) + } + + /// Set the value of the [`Access-Control-Allow-Headers`][mdn] header. + /// + /// See [`CorsLayer::allow_headers`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers + pub fn allow_headers(self, headers: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_headers(headers)) + } + + /// Set the value of the [`Access-Control-Max-Age`][mdn] header. + /// + /// See [`CorsLayer::max_age`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age + pub fn max_age(self, max_age: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.max_age(max_age)) + } + + /// Set the value of the [`Access-Control-Allow-Methods`][mdn] header. + /// + /// See [`CorsLayer::allow_methods`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods + pub fn allow_methods(self, methods: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_methods(methods)) + } + + /// Set the value of the [`Access-Control-Allow-Origin`][mdn] header. + /// + /// See [`CorsLayer::allow_origin`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin + pub fn allow_origin(self, origin: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_origin(origin)) + } + + /// Set the value of the [`Access-Control-Expose-Headers`][mdn] header. + /// + /// See [`CorsLayer::expose_headers`] for more details. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers + pub fn expose_headers(self, headers: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.expose_headers(headers)) + } + + /// Set the value of the [`Access-Control-Allow-Private-Network`][wicg] header. + /// + /// See [`CorsLayer::allow_private_network`] for more details. + /// + /// [wicg]: https://wicg.github.io/private-network-access/ + pub fn allow_private_network(self, allow_private_network: T) -> Self + where + T: Into, + { + self.map_layer(|layer| layer.allow_private_network(allow_private_network)) + } + + fn map_layer(mut self, f: F) -> Self + where + F: FnOnce(CorsLayer) -> CorsLayer, + { + self.layer = f(self.layer); + self + } +} + +impl Service> for Cors +where + S: Service, Response = Response>, + ResBody: Default, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + ensure_usable_cors_rules(&self.layer); + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let (parts, body) = req.into_parts(); + let origin = parts.headers.get(&header::ORIGIN); + + let mut headers = HeaderMap::new(); + + // These headers are applied to both preflight and subsequent regular CORS requests: + // https://fetch.spec.whatwg.org/#http-responses + + headers.extend(self.layer.allow_credentials.to_header(origin, &parts)); + headers.extend(self.layer.allow_private_network.to_header(origin, &parts)); + headers.extend(self.layer.vary.to_header()); + + let allow_origin_future = self.layer.allow_origin.to_future(origin, &parts); + + // Return results immediately upon preflight request + if parts.method == Method::OPTIONS { + // These headers are applied only to preflight requests + headers.extend(self.layer.allow_methods.to_header(&parts)); + headers.extend(self.layer.allow_headers.to_header(&parts)); + headers.extend(self.layer.max_age.to_header(origin, &parts)); + + ResponseFuture { + inner: Kind::PreflightCall { + allow_origin_future, + headers, + }, + } + } else { + // This header is applied only to non-preflight requests + headers.extend(self.layer.expose_headers.to_header(&parts)); + + let req = Request::from_parts(parts, body); + ResponseFuture { + inner: Kind::CorsCall { + allow_origin_future, + allow_origin_complete: false, + future: self.inner.call(req), + headers, + }, + } + } + } +} + +pin_project! { + /// Response future for [`Cors`]. + pub struct ResponseFuture { + #[pin] + inner: Kind, + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + CorsCall { + #[pin] + allow_origin_future: AllowOriginFuture, + allow_origin_complete: bool, + #[pin] + future: F, + headers: HeaderMap, + }, + PreflightCall { + #[pin] + allow_origin_future: AllowOriginFuture, + headers: HeaderMap, + }, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Default, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().inner.project() { + KindProj::CorsCall { + allow_origin_future, + allow_origin_complete, + future, + headers, + } => { + if !*allow_origin_complete { + headers.extend(ready!(allow_origin_future.poll(cx))); + *allow_origin_complete = true; + } + + let mut response: Response = ready!(future.poll(cx))?; + + let response_headers = response.headers_mut(); + + // vary header can have multiple values, don't overwrite + // previously-set value(s). + if let Some(vary) = headers.remove(header::VARY) { + response_headers.append(header::VARY, vary); + } + // extend will overwrite previous headers of remaining names + response_headers.extend(headers.drain()); + + Poll::Ready(Ok(response)) + } + KindProj::PreflightCall { + allow_origin_future, + headers, + } => { + headers.extend(ready!(allow_origin_future.poll(cx))); + + let mut response = Response::new(B::default()); + mem::swap(response.headers_mut(), headers); + + Poll::Ready(Ok(response)) + } + } + } +} + +fn ensure_usable_cors_rules(layer: &CorsLayer) { + if layer.allow_credentials.is_true() { + assert!( + !layer.allow_headers.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Headers: *`" + ); + + assert!( + !layer.allow_methods.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Methods: *`" + ); + + assert!( + !layer.allow_origin.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Allow-Origin: *`" + ); + + assert!( + !layer.expose_headers.is_wildcard(), + "Invalid CORS configuration: Cannot combine `Access-Control-Allow-Credentials: true` \ + with `Access-Control-Expose-Headers: *`" + ); + } +} + +/// Returns an iterator over the three request headers that may be involved in a CORS preflight request. +/// +/// This is the default set of header names returned in the `vary` header +pub fn preflight_request_headers() -> impl Iterator { + IntoIterator::into_iter([ + header::ORIGIN, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::ACCESS_CONTROL_REQUEST_HEADERS, + ]) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/tests.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f3f4acb1ac0b30a650acc266069179bd9bfcaac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/tests.rs @@ -0,0 +1,73 @@ +use std::convert::Infallible; + +use crate::test_helpers::Body; +use http::{header, HeaderValue, Request, Response}; +use tower::{service_fn, util::ServiceExt, Layer}; + +use crate::cors::{AllowOrigin, CorsLayer}; + +#[tokio::test] +#[allow( + clippy::declare_interior_mutable_const, + clippy::borrow_interior_mutable_const +)] +async fn vary_set_by_inner_service() { + const CUSTOM_VARY_HEADERS: HeaderValue = HeaderValue::from_static("accept, accept-encoding"); + const PERMISSIVE_CORS_VARY_HEADERS: HeaderValue = HeaderValue::from_static( + "origin, access-control-request-method, access-control-request-headers", + ); + + async fn inner_svc(_: Request) -> Result, Infallible> { + Ok(Response::builder() + .header(header::VARY, CUSTOM_VARY_HEADERS) + .body(Body::empty()) + .unwrap()) + } + + let svc = CorsLayer::permissive().layer(service_fn(inner_svc)); + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + let mut vary_headers = res.headers().get_all(header::VARY).into_iter(); + assert_eq!(vary_headers.next(), Some(&CUSTOM_VARY_HEADERS)); + assert_eq!(vary_headers.next(), Some(&PERMISSIVE_CORS_VARY_HEADERS)); + assert_eq!(vary_headers.next(), None); +} + +#[tokio::test] +async fn test_allow_origin_async_predicate() { + #[derive(Clone)] + struct Client; + + impl Client { + async fn fetch_allowed_origins_for_path(&self, _path: String) -> Vec { + vec![HeaderValue::from_static("http://example.com")] + } + } + + let client = Client; + + let allow_origin = AllowOrigin::async_predicate(|origin, parts| { + let path = parts.uri.path().to_owned(); + + async move { + let origins = client.fetch_allowed_origins_for_path(path).await; + + origins.contains(&origin) + } + }); + + let valid_origin = HeaderValue::from_static("http://example.com"); + let parts = http::Request::new("hello world").into_parts().0; + + let header = allow_origin + .to_future(Some(&valid_origin), &parts) + .await + .unwrap(); + assert_eq!(header.0, header::ACCESS_CONTROL_ALLOW_ORIGIN); + assert_eq!(header.1, valid_origin); + + let invalid_origin = HeaderValue::from_static("http://example.org"); + let parts = http::Request::new("hello world").into_parts().0; + + let res = allow_origin.to_future(Some(&invalid_origin), &parts).await; + assert!(res.is_none()); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/vary.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/vary.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ebe4a27533cbac234c536640c3e8751e7dfbb9e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/cors/vary.rs @@ -0,0 +1,57 @@ +use http::header::{self, HeaderName, HeaderValue}; + +use super::preflight_request_headers; + +/// Holds configuration for how to set the [`Vary`][mdn] header. +/// +/// See [`CorsLayer::vary`] for more details. +/// +/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Vary +/// [`CorsLayer::vary`]: super::CorsLayer::vary +#[derive(Clone, Debug)] +pub struct Vary(Vec); + +impl Vary { + /// Set the list of header names to return as vary header values + /// + /// See [`CorsLayer::vary`] for more details. + /// + /// [`CorsLayer::vary`]: super::CorsLayer::vary + pub fn list(headers: I) -> Self + where + I: IntoIterator, + { + Self(headers.into_iter().map(Into::into).collect()) + } + + pub(super) fn to_header(&self) -> Option<(HeaderName, HeaderValue)> { + let values = &self.0; + let mut res = values.first()?.as_bytes().to_owned(); + for val in &values[1..] { + res.extend_from_slice(b", "); + res.extend_from_slice(val.as_bytes()); + } + + let header_val = HeaderValue::from_bytes(&res) + .expect("comma-separated list of HeaderValues is always a valid HeaderValue"); + Some((header::VARY, header_val)) + } +} + +impl Default for Vary { + fn default() -> Self { + Self::list(preflight_request_headers()) + } +} + +impl From<[HeaderName; N]> for Vary { + fn from(arr: [HeaderName; N]) -> Self { + Self::list(arr) + } +} + +impl From> for Vary { + fn from(vec: Vec) -> Self { + Self::list(vec) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..739490487b963704c39b16b7d852dbeb85477f59 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/body.rs @@ -0,0 +1,406 @@ +#![allow(unused_imports)] + +use crate::compression_utils::CompressionLevel; +use crate::{ + compression_utils::{AsyncReadBody, BodyIntoStream, DecorateAsyncRead, WrapBody}, + BoxError, +}; +#[cfg(feature = "decompression-br")] +use async_compression::tokio::bufread::BrotliDecoder; +#[cfg(feature = "decompression-gzip")] +use async_compression::tokio::bufread::GzipDecoder; +#[cfg(feature = "decompression-deflate")] +use async_compression::tokio::bufread::ZlibDecoder; +#[cfg(feature = "decompression-zstd")] +use async_compression::tokio::bufread::ZstdDecoder; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use http_body::{Body, SizeHint}; +use pin_project_lite::pin_project; +use std::task::Context; +use std::{ + io, + marker::PhantomData, + pin::Pin, + task::{ready, Poll}, +}; +use tokio_util::io::StreamReader; + +pin_project! { + /// Response body of [`RequestDecompression`] and [`Decompression`]. + /// + /// [`RequestDecompression`]: super::RequestDecompression + /// [`Decompression`]: super::Decompression + pub struct DecompressionBody + where + B: Body + { + #[pin] + pub(crate) inner: BodyInner, + } +} + +impl Default for DecompressionBody +where + B: Body + Default, +{ + fn default() -> Self { + Self { + inner: BodyInner::Identity { + inner: B::default(), + }, + } + } +} + +impl DecompressionBody +where + B: Body, +{ + pub(crate) fn new(inner: BodyInner) -> Self { + Self { inner } + } + + /// Get a reference to the inner body + pub fn get_ref(&self) -> &B { + match &self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_ref().get_ref().get_ref().get_ref(), + BodyInner::Identity { inner } => inner, + + // FIXME: Remove once possible; see https://github.com/rust-lang/rust/issues/51085 + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } + + /// Get a mutable reference to the inner body + pub fn get_mut(&mut self) -> &mut B { + match &mut self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner.read.get_mut().get_mut().get_mut().get_mut(), + BodyInner::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } + + /// Get a pinned mutable reference to the inner body + pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut B> { + match self.project().inner.project() { + #[cfg(feature = "decompression-gzip")] + BodyInnerProj::Gzip { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-deflate")] + BodyInnerProj::Deflate { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-br")] + BodyInnerProj::Brotli { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + #[cfg(feature = "decompression-zstd")] + BodyInnerProj::Zstd { inner } => inner + .project() + .read + .get_pin_mut() + .get_pin_mut() + .get_pin_mut() + .get_pin_mut(), + BodyInnerProj::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInnerProj::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInnerProj::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInnerProj::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInnerProj::Zstd { inner } => match inner.0 {}, + } + } + + /// Consume `self`, returning the inner body + pub fn into_inner(self) -> B { + match self.inner { + #[cfg(feature = "decompression-gzip")] + BodyInner::Gzip { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-deflate")] + BodyInner::Deflate { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-br")] + BodyInner::Brotli { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + #[cfg(feature = "decompression-zstd")] + BodyInner::Zstd { inner } => inner + .read + .into_inner() + .into_inner() + .into_inner() + .into_inner(), + BodyInner::Identity { inner } => inner, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInner::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInner::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInner::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInner::Zstd { inner } => match inner.0 {}, + } + } +} + +#[cfg(any( + not(feature = "decompression-gzip"), + not(feature = "decompression-deflate"), + not(feature = "decompression-br"), + not(feature = "decompression-zstd") +))] +pub(crate) enum Never {} + +#[cfg(feature = "decompression-gzip")] +type GzipBody = WrapBody>; +#[cfg(not(feature = "decompression-gzip"))] +type GzipBody = (Never, PhantomData); + +#[cfg(feature = "decompression-deflate")] +type DeflateBody = WrapBody>; +#[cfg(not(feature = "decompression-deflate"))] +type DeflateBody = (Never, PhantomData); + +#[cfg(feature = "decompression-br")] +type BrotliBody = WrapBody>; +#[cfg(not(feature = "decompression-br"))] +type BrotliBody = (Never, PhantomData); + +#[cfg(feature = "decompression-zstd")] +type ZstdBody = WrapBody>; +#[cfg(not(feature = "decompression-zstd"))] +type ZstdBody = (Never, PhantomData); + +pin_project! { + #[project = BodyInnerProj] + pub(crate) enum BodyInner + where + B: Body, + { + Gzip { + #[pin] + inner: GzipBody, + }, + Deflate { + #[pin] + inner: DeflateBody, + }, + Brotli { + #[pin] + inner: BrotliBody, + }, + Zstd { + #[pin] + inner: ZstdBody, + }, + Identity { + #[pin] + inner: B, + }, + } +} + +impl BodyInner { + #[cfg(feature = "decompression-gzip")] + pub(crate) fn gzip(inner: WrapBody>) -> Self { + Self::Gzip { inner } + } + + #[cfg(feature = "decompression-deflate")] + pub(crate) fn deflate(inner: WrapBody>) -> Self { + Self::Deflate { inner } + } + + #[cfg(feature = "decompression-br")] + pub(crate) fn brotli(inner: WrapBody>) -> Self { + Self::Brotli { inner } + } + + #[cfg(feature = "decompression-zstd")] + pub(crate) fn zstd(inner: WrapBody>) -> Self { + Self::Zstd { inner } + } + + pub(crate) fn identity(inner: B) -> Self { + Self::Identity { inner } + } +} + +impl Body for DecompressionBody +where + B: Body, + B::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + #[cfg(feature = "decompression-gzip")] + BodyInnerProj::Gzip { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-deflate")] + BodyInnerProj::Deflate { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-br")] + BodyInnerProj::Brotli { inner } => inner.poll_frame(cx), + #[cfg(feature = "decompression-zstd")] + BodyInnerProj::Zstd { inner } => inner.poll_frame(cx), + BodyInnerProj::Identity { inner } => match ready!(inner.poll_frame(cx)) { + Some(Ok(frame)) => { + let frame = frame.map_data(|mut buf| buf.copy_to_bytes(buf.remaining())); + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + }, + + #[cfg(not(feature = "decompression-gzip"))] + BodyInnerProj::Gzip { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-deflate"))] + BodyInnerProj::Deflate { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-br"))] + BodyInnerProj::Brotli { inner } => match inner.0 {}, + #[cfg(not(feature = "decompression-zstd"))] + BodyInnerProj::Zstd { inner } => match inner.0 {}, + } + } + + fn size_hint(&self) -> SizeHint { + match self.inner { + BodyInner::Identity { ref inner } => inner.size_hint(), + _ => SizeHint::default(), + } + } +} + +#[cfg(feature = "decompression-gzip")] +impl DecorateAsyncRead for GzipDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = GzipDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + GzipDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-deflate")] +impl DecorateAsyncRead for ZlibDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZlibDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + ZlibDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-br")] +impl DecorateAsyncRead for BrotliDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = BrotliDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + BrotliDecoder::new(input) + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} + +#[cfg(feature = "decompression-zstd")] +impl DecorateAsyncRead for ZstdDecoder +where + B: Body, +{ + type Input = AsyncReadBody; + type Output = ZstdDecoder; + + fn apply(input: Self::Input, _quality: CompressionLevel) -> Self::Output { + let mut decoder = ZstdDecoder::new(input); + decoder.multiple_members(true); + decoder + } + + fn get_pin_mut(pinned: Pin<&mut Self::Output>) -> Pin<&mut Self::Input> { + pinned.get_pin_mut() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..36867e974a87cf678412f6ed2b4393f7acb5f975 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/future.rs @@ -0,0 +1,80 @@ +#![allow(unused_imports)] + +use super::{body::BodyInner, DecompressionBody}; +use crate::compression_utils::{AcceptEncoding, CompressionLevel, WrapBody}; +use crate::content_encoding::SupportedEncodings; +use http::{header, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + +pin_project! { + /// Response future of [`Decompression`]. + /// + /// [`Decompression`]: super::Decompression + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) accept: AcceptEncoding, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Body, +{ + type Output = Result>, E>; + + #[allow(unreachable_code, unused_mut, unused_variables)] + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = ready!(self.as_mut().project().inner.poll(cx)?); + let (mut parts, body) = res.into_parts(); + + let res = + if let header::Entry::Occupied(entry) = parts.headers.entry(header::CONTENT_ENCODING) { + let body = match entry.get().as_bytes() { + #[cfg(feature = "decompression-gzip")] + b"gzip" if self.accept.gzip() => DecompressionBody::new(BodyInner::gzip( + WrapBody::new(body, CompressionLevel::default()), + )), + + #[cfg(feature = "decompression-deflate")] + b"deflate" if self.accept.deflate() => DecompressionBody::new( + BodyInner::deflate(WrapBody::new(body, CompressionLevel::default())), + ), + + #[cfg(feature = "decompression-br")] + b"br" if self.accept.br() => DecompressionBody::new(BodyInner::brotli( + WrapBody::new(body, CompressionLevel::default()), + )), + + #[cfg(feature = "decompression-zstd")] + b"zstd" if self.accept.zstd() => DecompressionBody::new(BodyInner::zstd( + WrapBody::new(body, CompressionLevel::default()), + )), + + _ => { + return Poll::Ready(Ok(Response::from_parts( + parts, + DecompressionBody::new(BodyInner::identity(body)), + ))) + } + }; + + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + + Response::from_parts(parts, body) + } else { + Response::from_parts(parts, DecompressionBody::new(BodyInner::identity(body))) + }; + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..4a184c166bae6f7948434bf1e52fd489908660ac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/layer.rs @@ -0,0 +1,92 @@ +use super::Decompression; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Decompresses response bodies of the underlying service. +/// +/// This adds the `Accept-Encoding` header to requests and transparently decompresses response +/// bodies based on the `Content-Encoding` header. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Default, Clone)] +pub struct DecompressionLayer { + accept: AcceptEncoding, +} + +impl Layer for DecompressionLayer { + type Service = Decompression; + + fn layer(&self, service: S) -> Self::Service { + Decompression { + inner: service, + accept: self.accept, + } + } +} + +impl DecompressionLayer { + /// Creates a new `DecompressionLayer`. + pub fn new() -> Self { + Default::default() + } + + /// Sets whether to request the gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to request the Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to request the Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to request the Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef37fc3fe34495c88373664449175afbb2be4478 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/mod.rs @@ -0,0 +1,255 @@ +//! Middleware that decompresses request and response bodies. +//! +//! # Examples +//! +//! #### Request +//! +//! ```rust +//! use bytes::Bytes; +//! use flate2::{write::GzEncoder, Compression}; +//! use http::{header, HeaderValue, Request, Response}; +//! use http_body_util::{Full, BodyExt}; +//! use std::{error::Error, io::Write}; +//! use tower::{Service, ServiceBuilder, service_fn, ServiceExt}; +//! use tower_http::{BoxError, decompression::{DecompressionBody, RequestDecompressionLayer}}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! // A request encoded with gzip coming from some HTTP client. +//! let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); +//! encoder.write_all(b"Hello?")?; +//! let request = Request::builder() +//! .header(header::CONTENT_ENCODING, "gzip") +//! .body(Full::from(encoder.finish()?))?; +//! +//! // Our HTTP server +//! let mut server = ServiceBuilder::new() +//! // Automatically decompress request bodies. +//! .layer(RequestDecompressionLayer::new()) +//! .service(service_fn(handler)); +//! +//! // Send the request, with the gzip encoded body, to our server. +//! let _response = server.ready().await?.call(request).await?; +//! +//! // Handler receives request whose body is decoded when read +//! async fn handler( +//! mut req: Request>>, +//! ) -> Result>, BoxError>{ +//! let data = req.into_body().collect().await?.to_bytes(); +//! assert_eq!(&data[..], b"Hello?"); +//! Ok(Response::new(Full::from("Hello, World!"))) +//! } +//! # Ok(()) +//! # } +//! ``` +//! +//! #### Response +//! +//! ```rust +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use http_body_util::{Full, BodyExt}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::{compression::Compression, decompression::DecompressionLayer, BoxError}; +//! # +//! # #[tokio::main] +//! # async fn main() -> Result<(), tower_http::BoxError> { +//! # async fn handle(req: Request>) -> Result>, Infallible> { +//! # let body = Full::from("Hello, World!"); +//! # Ok(Response::new(body)) +//! # } +//! +//! // Some opaque service that applies compression. +//! let service = Compression::new(service_fn(handle)); +//! +//! // Our HTTP client. +//! let mut client = ServiceBuilder::new() +//! // Automatically decompress response bodies. +//! .layer(DecompressionLayer::new()) +//! .service(service); +//! +//! // Call the service. +//! // +//! // `DecompressionLayer` takes care of setting `Accept-Encoding`. +//! let request = Request::new(Full::::default()); +//! +//! let response = client +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! // Read the body +//! let body = response.into_body(); +//! let bytes = body.collect().await?.to_bytes().to_vec(); +//! let body = String::from_utf8(bytes).map_err(Into::::into)?; +//! +//! assert_eq!(body, "Hello, World!"); +//! # +//! # Ok(()) +//! # } +//! ``` + +mod request; + +mod body; +mod future; +mod layer; +mod service; + +pub use self::{ + body::DecompressionBody, future::ResponseFuture, layer::DecompressionLayer, + service::Decompression, +}; + +pub use self::request::future::RequestDecompressionFuture; +pub use self::request::layer::RequestDecompressionLayer; +pub use self::request::service::RequestDecompression; + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + use std::io::Write; + + use super::*; + use crate::test_helpers::Body; + use crate::{compression::Compression, test_helpers::WithTrailers}; + use flate2::write::GzEncoder; + use http::Response; + use http::{HeaderMap, HeaderName, Request}; + use http_body_util::BodyExt; + use tower::{service_fn, Service, ServiceExt}; + + #[tokio::test] + async fn works() { + let mut client = Decompression::new(Compression::new(service_fn(handle))); + + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + // read the body, it will be decompressed automatically + let body = res.into_body(); + let collected = body.collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let decompressed_data = String::from_utf8(collected.to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, "Hello, World!"); + + // maintains trailers + assert_eq!(trailers["foo"], "bar"); + } + + async fn handle(_req: Request) -> Result>, Infallible> { + let mut trailers = HeaderMap::new(); + trailers.insert(HeaderName::from_static("foo"), "bar".parse().unwrap()); + let body = Body::from("Hello, World!").with_trailers(trailers); + Ok(Response::builder().body(body).unwrap()) + } + + #[tokio::test] + async fn decompress_multi_zstd() { + let mut client = Decompression::new(service_fn(handle_multi_zstd)); + + let req = Request::builder() + .header("accept-encoding", "zstd") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + // read the body, it will be decompressed automatically + let body = res.into_body(); + let decompressed_data = + String::from_utf8(body.collect().await.unwrap().to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, "Hello, World!"); + } + + async fn handle_multi_zstd(_req: Request) -> Result, Infallible> { + let mut buf = Vec::new(); + let mut enc1 = zstd::Encoder::new(&mut buf, Default::default()).unwrap(); + enc1.write_all(b"Hello, ").unwrap(); + enc1.finish().unwrap(); + + let mut enc2 = zstd::Encoder::new(&mut buf, Default::default()).unwrap(); + enc2.write_all(b"World!").unwrap(); + enc2.finish().unwrap(); + + let mut res = Response::new(Body::from(buf)); + res.headers_mut() + .insert("content-encoding", "zstd".parse().unwrap()); + Ok(res) + } + + #[allow(dead_code)] + async fn is_compatible_with_hyper() { + let client = + hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new()) + .build_http(); + let mut client = Decompression::new(client); + + let req = Request::new(Body::empty()); + + let _: Response> = + client.ready().await.unwrap().call(req).await.unwrap(); + } + + #[tokio::test] + async fn decompress_empty() { + let mut client = Decompression::new(Compression::new(service_fn(handle_empty))); + + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + let body = res.into_body(); + let decompressed_data = + String::from_utf8(body.collect().await.unwrap().to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, ""); + } + + async fn handle_empty(_req: Request) -> Result, Infallible> { + let mut res = Response::new(Body::empty()); + res.headers_mut() + .insert("content-encoding", "gzip".parse().unwrap()); + Ok(res) + } + + #[tokio::test] + async fn decompress_empty_with_trailers() { + let mut client = + Decompression::new(Compression::new(service_fn(handle_empty_with_trailers))); + + let req = Request::builder() + .header("accept-encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = client.ready().await.unwrap().call(req).await.unwrap(); + + let body = res.into_body(); + let collected = body.collect().await.unwrap(); + let trailers = collected.trailers().cloned().unwrap(); + let decompressed_data = String::from_utf8(collected.to_bytes().to_vec()).unwrap(); + + assert_eq!(decompressed_data, ""); + assert_eq!(trailers["foo"], "bar"); + } + + async fn handle_empty_with_trailers( + _req: Request, + ) -> Result>, Infallible> { + let mut trailers = HeaderMap::new(); + trailers.insert(HeaderName::from_static("foo"), "bar".parse().unwrap()); + let body = Body::empty().with_trailers(trailers); + Ok(Response::builder() + .header("content-encoding", "gzip") + .body(body) + .unwrap()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..bdb22f8b40914b6ebd1f18bdfed9a2028ee2ade2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/future.rs @@ -0,0 +1,98 @@ +use crate::body::UnsyncBoxBody; +use crate::compression_utils::AcceptEncoding; +use crate::BoxError; +use bytes::Buf; +use http::{header, HeaderValue, Response, StatusCode}; +use http_body::Body; +use http_body_util::BodyExt; +use http_body_util::Empty; +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +pin_project! { + #[derive(Debug)] + /// Response future of [`RequestDecompression`] + pub struct RequestDecompressionFuture + where + F: Future, E>>, + B: Body + { + #[pin] + kind: Kind, + } +} + +pin_project! { + #[derive(Debug)] + #[project = StateProj] + enum Kind + where + F: Future, E>>, + B: Body + { + Inner { + #[pin] + fut: F + }, + Unsupported { + #[pin] + accept: AcceptEncoding + }, + } +} + +impl RequestDecompressionFuture +where + F: Future, E>>, + B: Body, +{ + #[must_use] + pub(super) fn unsupported_encoding(accept: AcceptEncoding) -> Self { + Self { + kind: Kind::Unsupported { accept }, + } + } + + #[must_use] + pub(super) fn inner(fut: F) -> Self { + Self { + kind: Kind::Inner { fut }, + } + } +} + +impl Future for RequestDecompressionFuture +where + F: Future, E>>, + B: Body + Send + 'static, + B::Data: Buf + 'static, + B::Error: Into + 'static, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + StateProj::Inner { fut } => fut.poll(cx).map_ok(|res| { + res.map(|body| UnsyncBoxBody::new(body.map_err(Into::into).boxed_unsync())) + }), + StateProj::Unsupported { accept } => { + let res = Response::builder() + .header( + header::ACCEPT_ENCODING, + accept + .to_header_value() + .unwrap_or(HeaderValue::from_static("identity")), + ) + .status(StatusCode::UNSUPPORTED_MEDIA_TYPE) + .body(UnsyncBoxBody::new( + Empty::new().map_err(Into::into).boxed_unsync(), + )) + .unwrap(); + Poll::Ready(Ok(res)) + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..71200960edd4412e9aad8c7fee0bde75228257ed --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/layer.rs @@ -0,0 +1,105 @@ +use super::service::RequestDecompression; +use crate::compression_utils::AcceptEncoding; +use tower_layer::Layer; + +/// Decompresses request bodies and calls its underlying service. +/// +/// Transparently decompresses request bodies based on the `Content-Encoding` header. +/// When the encoding in the `Content-Encoding` header is not accepted an `Unsupported Media Type` +/// status code will be returned with the accepted encodings in the `Accept-Encoding` header. +/// +/// Enabling pass-through of unaccepted encodings will not return an `Unsupported Media Type`. But +/// will call the underlying service with the unmodified request if the encoding is not supported. +/// This is disabled by default. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Default, Clone)] +pub struct RequestDecompressionLayer { + accept: AcceptEncoding, + pass_through_unaccepted: bool, +} + +impl Layer for RequestDecompressionLayer { + type Service = RequestDecompression; + + fn layer(&self, service: S) -> Self::Service { + RequestDecompression { + inner: service, + accept: self.accept, + pass_through_unaccepted: self.pass_through_unaccepted, + } + } +} + +impl RequestDecompressionLayer { + /// Creates a new `RequestDecompressionLayer`. + pub fn new() -> Self { + Default::default() + } + + /// Sets whether to support gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to support Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to support Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to support Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables support for gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables support for Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables support for Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables support for Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } + + /// Sets whether to pass through the request even when the encoding is not supported. + pub fn pass_through_unaccepted(mut self, enable: bool) -> Self { + self.pass_through_unaccepted = enable; + self + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..da3d94093cbd272ced530e36e7d815d3d826bb51 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/mod.rs @@ -0,0 +1,90 @@ +pub(super) mod future; +pub(super) mod layer; +pub(super) mod service; + +#[cfg(test)] +mod tests { + use super::service::RequestDecompression; + use crate::decompression::DecompressionBody; + use crate::test_helpers::Body; + use flate2::{write::GzEncoder, Compression}; + use http::{header, Request, Response, StatusCode}; + use http_body_util::BodyExt; + use std::{convert::Infallible, io::Write}; + use tower::{service_fn, Service, ServiceExt}; + + #[tokio::test] + async fn decompress_accepted_encoding() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_decompressed)); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + #[tokio::test] + async fn support_unencoded_body() { + let req = Request::builder().body(Body::from("Hello?")).unwrap(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_decompressed)); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + #[tokio::test] + async fn unaccepted_content_encoding_returns_unsupported_media_type() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(should_not_be_called)).gzip(false); + let res = svc.ready().await.unwrap().call(req).await.unwrap(); + assert_eq!(StatusCode::UNSUPPORTED_MEDIA_TYPE, res.status()); + } + + #[tokio::test] + async fn pass_through_unsupported_encoding_when_enabled() { + let req = request_gzip(); + let mut svc = RequestDecompression::new(service_fn(assert_request_is_passed_through)) + .pass_through_unaccepted(true) + .gzip(false); + let _ = svc.ready().await.unwrap().call(req).await.unwrap(); + } + + async fn assert_request_is_decompressed( + req: Request>, + ) -> Result, Infallible> { + let (parts, mut body) = req.into_parts(); + let body = read_body(&mut body).await; + + assert_eq!(body, b"Hello?"); + assert!(!parts.headers.contains_key(header::CONTENT_ENCODING)); + + Ok(Response::new(Body::from("Hello, World!"))) + } + + async fn assert_request_is_passed_through( + req: Request>, + ) -> Result, Infallible> { + let (parts, mut body) = req.into_parts(); + let body = read_body(&mut body).await; + + assert_ne!(body, b"Hello?"); + assert!(parts.headers.contains_key(header::CONTENT_ENCODING)); + + Ok(Response::new(Body::empty())) + } + + async fn should_not_be_called( + _: Request>, + ) -> Result, Infallible> { + panic!("Inner service should not be called"); + } + + fn request_gzip() -> Request { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(b"Hello?").unwrap(); + let body = encoder.finish().unwrap(); + Request::builder() + .header(header::CONTENT_ENCODING, "gzip") + .body(Body::from(body)) + .unwrap() + } + + async fn read_body(body: &mut DecompressionBody) -> Vec { + body.collect().await.unwrap().to_bytes().to_vec() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..663436e5d1d6068e04d01c40d11d0fc7d8c89343 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/request/service.rs @@ -0,0 +1,198 @@ +use super::future::RequestDecompressionFuture as ResponseFuture; +use super::layer::RequestDecompressionLayer; +use crate::body::UnsyncBoxBody; +use crate::compression_utils::CompressionLevel; +use crate::{ + compression_utils::AcceptEncoding, decompression::body::BodyInner, + decompression::DecompressionBody, BoxError, +}; +use bytes::Buf; +use http::{header, Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +#[cfg(any( + feature = "decompression-gzip", + feature = "decompression-deflate", + feature = "decompression-br", + feature = "decompression-zstd", +))] +use crate::content_encoding::SupportedEncodings; + +/// Decompresses request bodies and calls its underlying service. +/// +/// Transparently decompresses request bodies based on the `Content-Encoding` header. +/// When the encoding in the `Content-Encoding` header is not accepted an `Unsupported Media Type` +/// status code will be returned with the accepted encodings in the `Accept-Encoding` header. +/// +/// Enabling pass-through of unaccepted encodings will not return an `Unsupported Media Type` but +/// will call the underlying service with the unmodified request if the encoding is not supported. +/// This is disabled by default. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Clone)] +pub struct RequestDecompression { + pub(super) inner: S, + pub(super) accept: AcceptEncoding, + pub(super) pass_through_unaccepted: bool, +} + +impl Service> for RequestDecompression +where + S: Service>, Response = Response>, + ReqBody: Body, + ResBody: Body + Send + 'static, + ::Error: Into, + D: Buf + 'static, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let (mut parts, body) = req.into_parts(); + + let body = + if let header::Entry::Occupied(entry) = parts.headers.entry(header::CONTENT_ENCODING) { + match entry.get().as_bytes() { + #[cfg(feature = "decompression-gzip")] + b"gzip" if self.accept.gzip() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::gzip(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-deflate")] + b"deflate" if self.accept.deflate() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::deflate(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-br")] + b"br" if self.accept.br() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::brotli(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + #[cfg(feature = "decompression-zstd")] + b"zstd" if self.accept.zstd() => { + entry.remove(); + parts.headers.remove(header::CONTENT_LENGTH); + BodyInner::zstd(crate::compression_utils::WrapBody::new( + body, + CompressionLevel::default(), + )) + } + b"identity" => BodyInner::identity(body), + _ if self.pass_through_unaccepted => BodyInner::identity(body), + _ => return ResponseFuture::unsupported_encoding(self.accept), + } + } else { + BodyInner::identity(body) + }; + let body = DecompressionBody::new(body); + let req = Request::from_parts(parts, body); + ResponseFuture::inner(self.inner.call(req)) + } +} + +impl RequestDecompression { + /// Creates a new `RequestDecompression` wrapping the `service`. + pub fn new(service: S) -> Self { + Self { + inner: service, + accept: AcceptEncoding::default(), + pass_through_unaccepted: false, + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `RequestDecompression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> RequestDecompressionLayer { + RequestDecompressionLayer::new() + } + + /// Passes through the request even when the encoding is not supported. + /// + /// By default pass-through is disabled. + pub fn pass_through_unaccepted(mut self, enabled: bool) -> Self { + self.pass_through_unaccepted = enabled; + self + } + + /// Sets whether to support gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to support Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to support Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to support Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables support for gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables support for Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables support for Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables support for Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..50e8ead5fd379260718ffccd667527215b78cbf9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/decompression/service.rs @@ -0,0 +1,127 @@ +use super::{DecompressionBody, DecompressionLayer, ResponseFuture}; +use crate::compression_utils::AcceptEncoding; +use http::{ + header::{self, ACCEPT_ENCODING}, + Request, Response, +}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Decompresses response bodies of the underlying service. +/// +/// This adds the `Accept-Encoding` header to requests and transparently decompresses response +/// bodies based on the `Content-Encoding` header. +/// +/// See the [module docs](crate::decompression) for more details. +#[derive(Debug, Clone)] +pub struct Decompression { + pub(crate) inner: S, + pub(crate) accept: AcceptEncoding, +} + +impl Decompression { + /// Creates a new `Decompression` wrapping the `service`. + pub fn new(service: S) -> Self { + Self { + inner: service, + accept: AcceptEncoding::default(), + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Decompression` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> DecompressionLayer { + DecompressionLayer::new() + } + + /// Sets whether to request the gzip encoding. + #[cfg(feature = "decompression-gzip")] + pub fn gzip(mut self, enable: bool) -> Self { + self.accept.set_gzip(enable); + self + } + + /// Sets whether to request the Deflate encoding. + #[cfg(feature = "decompression-deflate")] + pub fn deflate(mut self, enable: bool) -> Self { + self.accept.set_deflate(enable); + self + } + + /// Sets whether to request the Brotli encoding. + #[cfg(feature = "decompression-br")] + pub fn br(mut self, enable: bool) -> Self { + self.accept.set_br(enable); + self + } + + /// Sets whether to request the Zstd encoding. + #[cfg(feature = "decompression-zstd")] + pub fn zstd(mut self, enable: bool) -> Self { + self.accept.set_zstd(enable); + self + } + + /// Disables the gzip encoding. + /// + /// This method is available even if the `gzip` crate feature is disabled. + pub fn no_gzip(mut self) -> Self { + self.accept.set_gzip(false); + self + } + + /// Disables the Deflate encoding. + /// + /// This method is available even if the `deflate` crate feature is disabled. + pub fn no_deflate(mut self) -> Self { + self.accept.set_deflate(false); + self + } + + /// Disables the Brotli encoding. + /// + /// This method is available even if the `br` crate feature is disabled. + pub fn no_br(mut self) -> Self { + self.accept.set_br(false); + self + } + + /// Disables the Zstd encoding. + /// + /// This method is available even if the `zstd` crate feature is disabled. + pub fn no_zstd(mut self) -> Self { + self.accept.set_zstd(false); + self + } +} + +impl Service> for Decompression +where + S: Service, Response = Response>, + ResBody: Body, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let header::Entry::Vacant(entry) = req.headers_mut().entry(ACCEPT_ENCODING) { + if let Some(accept) = self.accept.to_header_value() { + entry.insert(accept); + } + } + + ResponseFuture { + inner: self.inner.call(req), + accept: self.accept, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a90e082503754ab595c80f1da7a7c780b4d98609 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/mod.rs @@ -0,0 +1,476 @@ +//! Middleware for following redirections. +//! +//! # Overview +//! +//! The [`FollowRedirect`] middleware retries requests with the inner [`Service`] to follow HTTP +//! redirections. +//! +//! The middleware tries to clone the original [`Request`] when making a redirected request. +//! However, since [`Extensions`][http::Extensions] are `!Clone`, any extensions set by outer +//! middleware will be discarded. Also, the request body cannot always be cloned. When the +//! original body is known to be empty by [`Body::size_hint`], the middleware uses `Default` +//! implementation of the body type to create a new request body. If you know that the body can be +//! cloned in some way, you can tell the middleware to clone it by configuring a [`policy`]. +//! +//! # Examples +//! +//! ## Basic usage +//! +//! ``` +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use tower::{Service, ServiceBuilder, ServiceExt}; +//! use tower_http::follow_redirect::{FollowRedirectLayer, RequestUri}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), std::convert::Infallible> { +//! # let http_client = tower::service_fn(|req: Request<_>| async move { +//! # let dest = "https://www.rust-lang.org/"; +//! # let mut res = http::Response::builder(); +//! # if req.uri() != dest { +//! # res = res +//! # .status(http::StatusCode::MOVED_PERMANENTLY) +//! # .header(http::header::LOCATION, dest); +//! # } +//! # Ok::<_, std::convert::Infallible>(res.body(Full::::default()).unwrap()) +//! # }); +//! let mut client = ServiceBuilder::new() +//! .layer(FollowRedirectLayer::new()) +//! .service(http_client); +//! +//! let request = Request::builder() +//! .uri("https://rust-lang.org/") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = client.ready().await?.call(request).await?; +//! // Get the final request URI. +//! assert_eq!(response.extensions().get::().unwrap().0, "https://www.rust-lang.org/"); +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Customizing the `Policy` +//! +//! You can use a [`Policy`] value to customize how the middleware handles redirections. +//! +//! ``` +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceBuilder, ServiceExt}; +//! use tower_http::follow_redirect::{ +//! policy::{self, PolicyExt}, +//! FollowRedirectLayer, +//! }; +//! +//! #[derive(Debug)] +//! enum MyError { +//! TooManyRedirects, +//! Other(tower::BoxError), +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), MyError> { +//! # let http_client = +//! # tower::service_fn(|_: Request>| async { Ok(Response::new(Full::::default())) }); +//! let policy = policy::Limited::new(10) // Set the maximum number of redirections to 10. +//! // Return an error when the limit was reached. +//! .or::<_, (), _>(policy::redirect_fn(|_| Err(MyError::TooManyRedirects))) +//! // Do not follow cross-origin redirections, and return the redirection responses as-is. +//! .and::<_, (), _>(policy::SameOrigin::new()); +//! +//! let mut client = ServiceBuilder::new() +//! .layer(FollowRedirectLayer::with_policy(policy)) +//! .map_err(MyError::Other) +//! .service(http_client); +//! +//! // ... +//! # let _ = client.ready().await?.call(Request::default()).await?; +//! # Ok(()) +//! # } +//! ``` + +pub mod policy; + +use self::policy::{Action, Attempt, Policy, Standard}; +use futures_util::future::Either; +use http::{ + header::CONTENT_ENCODING, header::CONTENT_LENGTH, header::CONTENT_TYPE, header::LOCATION, + header::TRANSFER_ENCODING, HeaderMap, HeaderValue, Method, Request, Response, StatusCode, Uri, + Version, +}; +use http_body::Body; +use iri_string::types::{UriAbsoluteString, UriReferenceStr}; +use pin_project_lite::pin_project; +use std::{ + convert::TryFrom, + future::Future, + mem, + pin::Pin, + str, + task::{ready, Context, Poll}, +}; +use tower::util::Oneshot; +use tower_layer::Layer; +use tower_service::Service; + +/// [`Layer`] for retrying requests with a [`Service`] to follow redirection responses. +/// +/// See the [module docs](self) for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct FollowRedirectLayer

{ + policy: P, +} + +impl FollowRedirectLayer { + /// Create a new [`FollowRedirectLayer`] with a [`Standard`] redirection policy. + pub fn new() -> Self { + Self::default() + } +} + +impl

FollowRedirectLayer

{ + /// Create a new [`FollowRedirectLayer`] with the given redirection [`Policy`]. + pub fn with_policy(policy: P) -> Self { + FollowRedirectLayer { policy } + } +} + +impl Layer for FollowRedirectLayer

+where + S: Clone, + P: Clone, +{ + type Service = FollowRedirect; + + fn layer(&self, inner: S) -> Self::Service { + FollowRedirect::with_policy(inner, self.policy.clone()) + } +} + +/// Middleware that retries requests with a [`Service`] to follow redirection responses. +/// +/// See the [module docs](self) for more details. +#[derive(Clone, Copy, Debug)] +pub struct FollowRedirect { + inner: S, + policy: P, +} + +impl FollowRedirect { + /// Create a new [`FollowRedirect`] with a [`Standard`] redirection policy. + pub fn new(inner: S) -> Self { + Self::with_policy(inner, Standard::default()) + } + + /// Returns a new [`Layer`] that wraps services with a `FollowRedirect` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer() -> FollowRedirectLayer { + FollowRedirectLayer::new() + } +} + +impl FollowRedirect +where + P: Clone, +{ + /// Create a new [`FollowRedirect`] with the given redirection [`Policy`]. + pub fn with_policy(inner: S, policy: P) -> Self { + FollowRedirect { inner, policy } + } + + /// Returns a new [`Layer`] that wraps services with a `FollowRedirect` middleware + /// with the given redirection [`Policy`]. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer_with_policy(policy: P) -> FollowRedirectLayer

{ + FollowRedirectLayer::with_policy(policy) + } + + define_inner_service_accessors!(); +} + +impl Service> for FollowRedirect +where + S: Service, Response = Response> + Clone, + ReqBody: Body + Default, + P: Policy + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + let service = self.inner.clone(); + let mut service = mem::replace(&mut self.inner, service); + let mut policy = self.policy.clone(); + let mut body = BodyRepr::None; + body.try_clone_from(req.body(), &policy); + policy.on_request(&mut req); + ResponseFuture { + method: req.method().clone(), + uri: req.uri().clone(), + version: req.version(), + headers: req.headers().clone(), + body, + future: Either::Left(service.call(req)), + service, + policy, + } + } +} + +pin_project! { + /// Response future for [`FollowRedirect`]. + #[derive(Debug)] + pub struct ResponseFuture + where + S: Service>, + { + #[pin] + future: Either>>, + service: S, + policy: P, + method: Method, + uri: Uri, + version: Version, + headers: HeaderMap, + body: BodyRepr, + } +} + +impl Future for ResponseFuture +where + S: Service, Response = Response> + Clone, + ReqBody: Body + Default, + P: Policy, +{ + type Output = Result, S::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + let mut res = ready!(this.future.as_mut().poll(cx)?); + res.extensions_mut().insert(RequestUri(this.uri.clone())); + + let drop_payload_headers = |headers: &mut HeaderMap| { + for header in &[ + CONTENT_TYPE, + CONTENT_LENGTH, + CONTENT_ENCODING, + TRANSFER_ENCODING, + ] { + headers.remove(header); + } + }; + match res.status() { + StatusCode::MOVED_PERMANENTLY | StatusCode::FOUND => { + // User agents MAY change the request method from POST to GET + // (RFC 7231 section 6.4.2. and 6.4.3.). + if *this.method == Method::POST { + *this.method = Method::GET; + *this.body = BodyRepr::Empty; + drop_payload_headers(this.headers); + } + } + StatusCode::SEE_OTHER => { + // A user agent can perform a GET or HEAD request (RFC 7231 section 6.4.4.). + if *this.method != Method::HEAD { + *this.method = Method::GET; + } + *this.body = BodyRepr::Empty; + drop_payload_headers(this.headers); + } + StatusCode::TEMPORARY_REDIRECT | StatusCode::PERMANENT_REDIRECT => {} + _ => return Poll::Ready(Ok(res)), + }; + + let body = if let Some(body) = this.body.take() { + body + } else { + return Poll::Ready(Ok(res)); + }; + + let location = res + .headers() + .get(&LOCATION) + .and_then(|loc| resolve_uri(str::from_utf8(loc.as_bytes()).ok()?, this.uri)); + let location = if let Some(loc) = location { + loc + } else { + return Poll::Ready(Ok(res)); + }; + + let attempt = Attempt { + status: res.status(), + location: &location, + previous: this.uri, + }; + match this.policy.redirect(&attempt)? { + Action::Follow => { + *this.uri = location; + this.body.try_clone_from(&body, &this.policy); + + let mut req = Request::new(body); + *req.uri_mut() = this.uri.clone(); + *req.method_mut() = this.method.clone(); + *req.version_mut() = *this.version; + *req.headers_mut() = this.headers.clone(); + this.policy.on_request(&mut req); + this.future + .set(Either::Right(Oneshot::new(this.service.clone(), req))); + + cx.waker().wake_by_ref(); + Poll::Pending + } + Action::Stop => Poll::Ready(Ok(res)), + } + } +} + +/// Response [`Extensions`][http::Extensions] value that represents the effective request URI of +/// a response returned by a [`FollowRedirect`] middleware. +/// +/// The value differs from the original request's effective URI if the middleware has followed +/// redirections. +#[derive(Clone)] +pub struct RequestUri(pub Uri); + +#[derive(Debug)] +enum BodyRepr { + Some(B), + Empty, + None, +} + +impl BodyRepr +where + B: Body + Default, +{ + fn take(&mut self) -> Option { + match mem::replace(self, BodyRepr::None) { + BodyRepr::Some(body) => Some(body), + BodyRepr::Empty => { + *self = BodyRepr::Empty; + Some(B::default()) + } + BodyRepr::None => None, + } + } + + fn try_clone_from(&mut self, body: &B, policy: &P) + where + P: Policy, + { + match self { + BodyRepr::Some(_) | BodyRepr::Empty => {} + BodyRepr::None => { + if let Some(body) = clone_body(policy, body) { + *self = BodyRepr::Some(body); + } + } + } + } +} + +fn clone_body(policy: &P, body: &B) -> Option +where + P: Policy, + B: Body + Default, +{ + if body.size_hint().exact() == Some(0) { + Some(B::default()) + } else { + policy.clone_body(body) + } +} + +/// Try to resolve a URI reference `relative` against a base URI `base`. +fn resolve_uri(relative: &str, base: &Uri) -> Option { + let relative = UriReferenceStr::new(relative).ok()?; + let base = UriAbsoluteString::try_from(base.to_string()).ok()?; + let uri = relative.resolve_against(&base).to_string(); + Uri::try_from(uri).ok() +} + +#[cfg(test)] +mod tests { + use super::{policy::*, *}; + use crate::test_helpers::Body; + use http::header::LOCATION; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn follows() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Action::Follow)) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 0); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/0" + ); + } + + #[tokio::test] + async fn stops() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Action::Stop)) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 42); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/42" + ); + } + + #[tokio::test] + async fn limited() { + let svc = ServiceBuilder::new() + .layer(FollowRedirectLayer::with_policy(Limited::new(10))) + .buffer(1) + .service_fn(handle); + let req = Request::builder() + .uri("http://example.com/42") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(*res.body(), 42 - 10); + assert_eq!( + res.extensions().get::().unwrap().0, + "http://example.com/32" + ); + } + + /// A server with an endpoint `GET /{n}` which redirects to `/{n-1}` unless `n` equals zero, + /// returning `n` as the response body. + async fn handle(req: Request) -> Result, Infallible> { + let n: u64 = req.uri().path()[1..].parse().unwrap(); + let mut res = Response::builder(); + if n > 0 { + res = res + .status(StatusCode::MOVED_PERMANENTLY) + .header(LOCATION, format!("/{}", n - 1)); + } + Ok::<_, Infallible>(res.body(n).unwrap()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/and.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/and.rs new file mode 100644 index 0000000000000000000000000000000000000000..69d2b7da4a354f8f9d247b5fb976ca020eba716d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/and.rs @@ -0,0 +1,118 @@ +use super::{Action, Attempt, Policy}; +use http::Request; + +/// A redirection [`Policy`] that combines the results of two `Policy`s. +/// +/// See [`PolicyExt::and`][super::PolicyExt::and] for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct And { + a: A, + b: B, +} + +impl And { + pub(crate) fn new(a: A, b: B) -> Self + where + A: Policy, + B: Policy, + { + And { a, b } + } +} + +impl Policy for And +where + A: Policy, + B: Policy, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + match self.a.redirect(attempt) { + Ok(Action::Follow) => self.b.redirect(attempt), + a => a, + } + } + + fn on_request(&mut self, request: &mut Request) { + self.a.on_request(request); + self.b.on_request(request); + } + + fn clone_body(&self, body: &Bd) -> Option { + self.a.clone_body(body).or_else(|| self.b.clone_body(body)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + struct Taint

{ + policy: P, + used: bool, + } + + impl

Taint

{ + fn new(policy: P) -> Self { + Taint { + policy, + used: false, + } + } + } + + impl Policy for Taint

+ where + P: Policy, + { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.used = true; + self.policy.redirect(attempt) + } + } + + #[test] + fn redirect() { + let attempt = Attempt { + status: Default::default(), + location: &Uri::from_static("*"), + previous: &Uri::from_static("*"), + }; + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Follow); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Follow); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(!b.used); // short-circuiting + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Stop); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Stop); + let mut policy = And::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(!b.used); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/clone_body_fn.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/clone_body_fn.rs new file mode 100644 index 0000000000000000000000000000000000000000..d7d7cb7c7f2f887b1e62b0939ee1fe77e0b59754 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/clone_body_fn.rs @@ -0,0 +1,42 @@ +use super::{Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] created from a closure. +/// +/// See [`clone_body_fn`] for more details. +#[derive(Clone, Copy)] +pub struct CloneBodyFn { + f: F, +} + +impl fmt::Debug for CloneBodyFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CloneBodyFn") + .field("f", &std::any::type_name::()) + .finish() + } +} + +impl Policy for CloneBodyFn +where + F: Fn(&B) -> Option, +{ + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + Ok(Action::Follow) + } + + fn clone_body(&self, body: &B) -> Option { + (self.f)(body) + } +} + +/// Create a new redirection [`Policy`] from a closure `F: Fn(&B) -> Option`. +/// +/// [`clone_body`][Policy::clone_body] method of the returned `Policy` delegates to the wrapped +/// closure and [`redirect`][Policy::redirect] method always returns [`Action::Follow`]. +pub fn clone_body_fn(f: F) -> CloneBodyFn +where + F: Fn(&B) -> Option, +{ + CloneBodyFn { f } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/filter_credentials.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/filter_credentials.rs new file mode 100644 index 0000000000000000000000000000000000000000..fea80f1198a77be76ed9ef17be340201b6cdd99b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/filter_credentials.rs @@ -0,0 +1,161 @@ +use super::{eq_origin, Action, Attempt, Policy}; +use http::{ + header::{self, HeaderName}, + Request, +}; + +/// A redirection [`Policy`] that removes credentials from requests in redirections. +#[derive(Clone, Debug)] +pub struct FilterCredentials { + block_cross_origin: bool, + block_any: bool, + remove_blocklisted: bool, + remove_all: bool, + blocked: bool, +} + +const BLOCKLIST: &[HeaderName] = &[ + header::AUTHORIZATION, + header::COOKIE, + header::PROXY_AUTHORIZATION, +]; + +impl FilterCredentials { + /// Create a new [`FilterCredentials`] that removes blocklisted request headers in cross-origin + /// redirections. + pub fn new() -> Self { + FilterCredentials { + block_cross_origin: true, + block_any: false, + remove_blocklisted: true, + remove_all: false, + blocked: false, + } + } + + /// Configure `self` to mark cross-origin redirections as "blocked". + pub fn block_cross_origin(mut self, enable: bool) -> Self { + self.block_cross_origin = enable; + self + } + + /// Configure `self` to mark every redirection as "blocked". + pub fn block_any(mut self) -> Self { + self.block_any = true; + self + } + + /// Configure `self` to mark no redirections as "blocked". + pub fn block_none(mut self) -> Self { + self.block_any = false; + self.block_cross_origin(false) + } + + /// Configure `self` to remove blocklisted headers in "blocked" redirections. + /// + /// The blocklist includes the following headers: + /// + /// - `Authorization` + /// - `Cookie` + /// - `Proxy-Authorization` + pub fn remove_blocklisted(mut self, enable: bool) -> Self { + self.remove_blocklisted = enable; + self + } + + /// Configure `self` to remove all headers in "blocked" redirections. + pub fn remove_all(mut self) -> Self { + self.remove_all = true; + self + } + + /// Configure `self` to remove no headers in "blocked" redirections. + pub fn remove_none(mut self) -> Self { + self.remove_all = false; + self.remove_blocklisted(false) + } +} + +impl Default for FilterCredentials { + fn default() -> Self { + Self::new() + } +} + +impl Policy for FilterCredentials { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.blocked = self.block_any + || (self.block_cross_origin && !eq_origin(attempt.previous(), attempt.location())); + Ok(Action::Follow) + } + + fn on_request(&mut self, request: &mut Request) { + if self.blocked { + let headers = request.headers_mut(); + if self.remove_all { + headers.clear(); + } else if self.remove_blocklisted { + for key in BLOCKLIST { + headers.remove(key); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + #[test] + fn works() { + let mut policy = FilterCredentials::default(); + + let initial = Uri::from_static("http://example.com/old"); + let same_origin = Uri::from_static("http://example.com/new"); + let cross_origin = Uri::from_static("https://example.com/new"); + + let mut request = Request::builder() + .uri(initial) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(request.headers().contains_key(header::COOKIE)); + + let attempt = Attempt { + status: Default::default(), + location: &same_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder() + .uri(same_origin) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(request.headers().contains_key(header::COOKIE)); + + let attempt = Attempt { + status: Default::default(), + location: &cross_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder() + .uri(cross_origin) + .header(header::COOKIE, "42") + .body(()) + .unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + assert!(!request.headers().contains_key(header::COOKIE)); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/limited.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/limited.rs new file mode 100644 index 0000000000000000000000000000000000000000..a81b0d7924ca583ad6b5e10f69a14475ce27e8e1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/limited.rs @@ -0,0 +1,74 @@ +use super::{Action, Attempt, Policy}; + +/// A redirection [`Policy`] that limits the number of successive redirections. +#[derive(Clone, Copy, Debug)] +pub struct Limited { + remaining: usize, +} + +impl Limited { + /// Create a new [`Limited`] with a limit of `max` redirections. + pub fn new(max: usize) -> Self { + Limited { remaining: max } + } +} + +impl Default for Limited { + /// Returns the default [`Limited`] with a limit of `20` redirections. + fn default() -> Self { + // This is the (default) limit of Firefox and the Fetch API. + // https://hg.mozilla.org/mozilla-central/file/6264f13d54a1caa4f5b60303617a819efd91b8ee/modules/libpref/init/all.js#l1371 + // https://fetch.spec.whatwg.org/#http-redirect-fetch + Limited::new(20) + } +} + +impl Policy for Limited { + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + if self.remaining > 0 { + self.remaining -= 1; + Ok(Action::Follow) + } else { + Ok(Action::Stop) + } + } +} + +#[cfg(test)] +mod tests { + use http::{Request, Uri}; + + use super::*; + + #[test] + fn works() { + let uri = Uri::from_static("https://example.com/"); + let mut policy = Limited::new(2); + + for _ in 0..2 { + let mut request = Request::builder().uri(uri.clone()).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &uri, + previous: &uri, + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + } + + let mut request = Request::builder().uri(uri.clone()).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &uri, + previous: &uri, + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..8e5d39ce04c0113757f7361ec8fe07887d1f5dcb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/mod.rs @@ -0,0 +1,316 @@ +//! Tools for customizing the behavior of a [`FollowRedirect`][super::FollowRedirect] middleware. + +mod and; +mod clone_body_fn; +mod filter_credentials; +mod limited; +mod or; +mod redirect_fn; +mod same_origin; + +pub use self::{ + and::And, + clone_body_fn::{clone_body_fn, CloneBodyFn}, + filter_credentials::FilterCredentials, + limited::Limited, + or::Or, + redirect_fn::{redirect_fn, RedirectFn}, + same_origin::SameOrigin, +}; + +use http::{uri::Scheme, Request, StatusCode, Uri}; + +/// Trait for the policy on handling redirection responses. +/// +/// # Example +/// +/// Detecting a cyclic redirection: +/// +/// ``` +/// use http::{Request, Uri}; +/// use std::collections::HashSet; +/// use tower_http::follow_redirect::policy::{Action, Attempt, Policy}; +/// +/// #[derive(Clone)] +/// pub struct DetectCycle { +/// uris: HashSet, +/// } +/// +/// impl Policy for DetectCycle { +/// fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { +/// if self.uris.contains(attempt.location()) { +/// Ok(Action::Stop) +/// } else { +/// self.uris.insert(attempt.previous().clone()); +/// Ok(Action::Follow) +/// } +/// } +/// } +/// ``` +pub trait Policy { + /// Invoked when the service received a response with a redirection status code (`3xx`). + /// + /// This method returns an [`Action`] which indicates whether the service should follow + /// the redirection. + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result; + + /// Invoked right before the service makes a request, regardless of whether it is redirected + /// or not. + /// + /// This can for example be used to remove sensitive headers from the request + /// or prepare the request in other ways. + /// + /// The default implementation does nothing. + fn on_request(&mut self, _request: &mut Request) {} + + /// Try to clone a request body before the service makes a redirected request. + /// + /// If the request body cannot be cloned, return `None`. + /// + /// This is not invoked when [`B::size_hint`][http_body::Body::size_hint] returns zero, + /// in which case `B::default()` will be used to create a new request body. + /// + /// The default implementation returns `None`. + fn clone_body(&self, _body: &B) -> Option { + None + } +} + +impl Policy for &mut P +where + P: Policy + ?Sized, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (**self).redirect(attempt) + } + + fn on_request(&mut self, request: &mut Request) { + (**self).on_request(request) + } + + fn clone_body(&self, body: &B) -> Option { + (**self).clone_body(body) + } +} + +impl Policy for Box

+where + P: Policy + ?Sized, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (**self).redirect(attempt) + } + + fn on_request(&mut self, request: &mut Request) { + (**self).on_request(request) + } + + fn clone_body(&self, body: &B) -> Option { + (**self).clone_body(body) + } +} + +/// An extension trait for `Policy` that provides additional adapters. +pub trait PolicyExt { + /// Create a new `Policy` that returns [`Action::Follow`] only if `self` and `other` return + /// `Action::Follow`. + /// + /// [`clone_body`][Policy::clone_body] method of the returned `Policy` tries to clone the body + /// with both policies. + /// + /// # Example + /// + /// ``` + /// use bytes::Bytes; + /// use http_body_util::Full; + /// use tower_http::follow_redirect::policy::{self, clone_body_fn, Limited, PolicyExt}; + /// + /// enum MyBody { + /// Bytes(Bytes), + /// Full(Full), + /// } + /// + /// let policy = Limited::default().and::<_, _, ()>(clone_body_fn(|body| { + /// if let MyBody::Bytes(buf) = body { + /// Some(MyBody::Bytes(buf.clone())) + /// } else { + /// None + /// } + /// })); + /// ``` + fn and(self, other: P) -> And + where + Self: Policy + Sized, + P: Policy; + + /// Create a new `Policy` that returns [`Action::Follow`] if either `self` or `other` returns + /// `Action::Follow`. + /// + /// [`clone_body`][Policy::clone_body] method of the returned `Policy` tries to clone the body + /// with both policies. + /// + /// # Example + /// + /// ``` + /// use tower_http::follow_redirect::policy::{self, Action, Limited, PolicyExt}; + /// + /// #[derive(Clone)] + /// enum MyError { + /// TooManyRedirects, + /// // ... + /// } + /// + /// let policy = Limited::default().or::<_, (), _>(Err(MyError::TooManyRedirects)); + /// ``` + fn or(self, other: P) -> Or + where + Self: Policy + Sized, + P: Policy; +} + +impl PolicyExt for T +where + T: ?Sized, +{ + fn and(self, other: P) -> And + where + Self: Policy + Sized, + P: Policy, + { + And::new(self, other) + } + + fn or(self, other: P) -> Or + where + Self: Policy + Sized, + P: Policy, + { + Or::new(self, other) + } +} + +/// A redirection [`Policy`] with a reasonable set of standard behavior. +/// +/// This policy limits the number of successive redirections ([`Limited`]) +/// and removes credentials from requests in cross-origin redirections ([`FilterCredentials`]). +pub type Standard = And; + +/// A type that holds information on a redirection attempt. +pub struct Attempt<'a> { + pub(crate) status: StatusCode, + pub(crate) location: &'a Uri, + pub(crate) previous: &'a Uri, +} + +impl<'a> Attempt<'a> { + /// Returns the redirection response. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Returns the destination URI of the redirection. + pub fn location(&self) -> &'a Uri { + self.location + } + + /// Returns the URI of the original request. + pub fn previous(&self) -> &'a Uri { + self.previous + } +} + +/// A value returned by [`Policy::redirect`] which indicates the action +/// [`FollowRedirect`][super::FollowRedirect] should take for a redirection response. +#[derive(Clone, Copy, Debug)] +pub enum Action { + /// Follow the redirection. + Follow, + /// Do not follow the redirection, and return the redirection response as-is. + Stop, +} + +impl Action { + /// Returns `true` if the `Action` is a `Follow` value. + pub fn is_follow(&self) -> bool { + if let Action::Follow = self { + true + } else { + false + } + } + + /// Returns `true` if the `Action` is a `Stop` value. + pub fn is_stop(&self) -> bool { + if let Action::Stop = self { + true + } else { + false + } + } +} + +impl Policy for Action { + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + Ok(*self) + } +} + +impl Policy for Result +where + E: Clone, +{ + fn redirect(&mut self, _: &Attempt<'_>) -> Result { + self.clone() + } +} + +/// Compares the origins of two URIs as per RFC 6454 sections 4. through 5. +fn eq_origin(lhs: &Uri, rhs: &Uri) -> bool { + let default_port = match (lhs.scheme(), rhs.scheme()) { + (Some(l), Some(r)) if l == r => { + if l == &Scheme::HTTP { + 80 + } else if l == &Scheme::HTTPS { + 443 + } else { + return false; + } + } + _ => return false, + }; + match (lhs.host(), rhs.host()) { + (Some(l), Some(r)) if l == r => {} + _ => return false, + } + lhs.port_u16().unwrap_or(default_port) == rhs.port_u16().unwrap_or(default_port) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn eq_origin_works() { + assert!(eq_origin( + &Uri::from_static("https://example.com/1"), + &Uri::from_static("https://example.com/2") + )); + assert!(eq_origin( + &Uri::from_static("https://example.com:443/"), + &Uri::from_static("https://example.com/") + )); + assert!(eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("https://user@example.com/") + )); + + assert!(!eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("https://www.example.com/") + )); + assert!(!eq_origin( + &Uri::from_static("https://example.com/"), + &Uri::from_static("http://example.com/") + )); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/or.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/or.rs new file mode 100644 index 0000000000000000000000000000000000000000..858e57bd87edd585aaed6ec2c4a772aa61f4f899 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/or.rs @@ -0,0 +1,118 @@ +use super::{Action, Attempt, Policy}; +use http::Request; + +/// A redirection [`Policy`] that combines the results of two `Policy`s. +/// +/// See [`PolicyExt::or`][super::PolicyExt::or] for more details. +#[derive(Clone, Copy, Debug, Default)] +pub struct Or { + a: A, + b: B, +} + +impl Or { + pub(crate) fn new(a: A, b: B) -> Self + where + A: Policy, + B: Policy, + { + Or { a, b } + } +} + +impl Policy for Or +where + A: Policy, + B: Policy, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + match self.a.redirect(attempt) { + Ok(Action::Stop) | Err(_) => self.b.redirect(attempt), + a => a, + } + } + + fn on_request(&mut self, request: &mut Request) { + self.a.on_request(request); + self.b.on_request(request); + } + + fn clone_body(&self, body: &Bd) -> Option { + self.a.clone_body(body).or_else(|| self.b.clone_body(body)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::Uri; + + struct Taint

{ + policy: P, + used: bool, + } + + impl

Taint

{ + fn new(policy: P) -> Self { + Taint { + policy, + used: false, + } + } + } + + impl Policy for Taint

+ where + P: Policy, + { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + self.used = true; + self.policy.redirect(attempt) + } + } + + #[test] + fn redirect() { + let attempt = Attempt { + status: Default::default(), + location: &Uri::from_static("*"), + previous: &Uri::from_static("*"), + }; + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Follow); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(!b.used); // short-circuiting + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Follow); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(b.used); + + let mut a = Taint::new(Action::Follow); + let mut b = Taint::new(Action::Stop); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + assert!(a.used); + assert!(!b.used); + + let mut a = Taint::new(Action::Stop); + let mut b = Taint::new(Action::Stop); + let mut policy = Or::new::<(), ()>(&mut a, &mut b); + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + assert!(a.used); + assert!(b.used); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/redirect_fn.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/redirect_fn.rs new file mode 100644 index 0000000000000000000000000000000000000000..a16593aca9c57e070c292259fe088b23e5cd0eb7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/redirect_fn.rs @@ -0,0 +1,39 @@ +use super::{Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] created from a closure. +/// +/// See [`redirect_fn`] for more details. +#[derive(Clone, Copy)] +pub struct RedirectFn { + f: F, +} + +impl fmt::Debug for RedirectFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RedirectFn") + .field("f", &std::any::type_name::()) + .finish() + } +} + +impl Policy for RedirectFn +where + F: FnMut(&Attempt<'_>) -> Result, +{ + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + (self.f)(attempt) + } +} + +/// Create a new redirection [`Policy`] from a closure +/// `F: FnMut(&Attempt<'_>) -> Result`. +/// +/// [`redirect`][Policy::redirect] method of the returned `Policy` delegates to +/// the wrapped closure. +pub fn redirect_fn(f: F) -> RedirectFn +where + F: FnMut(&Attempt<'_>) -> Result, +{ + RedirectFn { f } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/same_origin.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/same_origin.rs new file mode 100644 index 0000000000000000000000000000000000000000..cf7b7b1935086b6d7be0db189659b915c78fd335 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/follow_redirect/policy/same_origin.rs @@ -0,0 +1,70 @@ +use super::{eq_origin, Action, Attempt, Policy}; +use std::fmt; + +/// A redirection [`Policy`] that stops cross-origin redirections. +#[derive(Clone, Copy, Default)] +pub struct SameOrigin { + _priv: (), +} + +impl SameOrigin { + /// Create a new [`SameOrigin`]. + pub fn new() -> Self { + Self::default() + } +} + +impl fmt::Debug for SameOrigin { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SameOrigin").finish() + } +} + +impl Policy for SameOrigin { + fn redirect(&mut self, attempt: &Attempt<'_>) -> Result { + if eq_origin(attempt.previous(), attempt.location()) { + Ok(Action::Follow) + } else { + Ok(Action::Stop) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::{Request, Uri}; + + #[test] + fn works() { + let mut policy = SameOrigin::default(); + + let initial = Uri::from_static("http://example.com/old"); + let same_origin = Uri::from_static("http://example.com/new"); + let cross_origin = Uri::from_static("https://example.com/new"); + + let mut request = Request::builder().uri(initial).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &same_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_follow()); + + let mut request = Request::builder().uri(same_origin).body(()).unwrap(); + Policy::<(), ()>::on_request(&mut policy, &mut request); + + let attempt = Attempt { + status: Default::default(), + location: &cross_origin, + previous: request.uri(), + }; + assert!(Policy::<(), ()>::redirect(&mut policy, &attempt) + .unwrap() + .is_stop()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0760f51466dba7ce8cb9ef5ef76fbb202195f3c6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/lib.rs @@ -0,0 +1,373 @@ +//! `async fn(HttpRequest) -> Result` +//! +//! # Overview +//! +//! tower-http is a library that provides HTTP-specific middleware and utilities built on top of +//! [tower]. +//! +//! All middleware uses the [http] and [http-body] crates as the HTTP abstractions. That means +//! they're compatible with any library or framework that also uses those crates, such as +//! [hyper], [tonic], and [warp]. +//! +//! # Example server +//! +//! This example shows how to apply middleware from tower-http to a [`Service`] and then run +//! that service using [hyper]. +//! +//! ```rust,no_run +//! use tower_http::{ +//! add_extension::AddExtensionLayer, +//! compression::CompressionLayer, +//! propagate_header::PropagateHeaderLayer, +//! sensitive_headers::SetSensitiveRequestHeadersLayer, +//! set_header::SetResponseHeaderLayer, +//! trace::TraceLayer, +//! validate_request::ValidateRequestHeaderLayer, +//! }; +//! use tower::{ServiceBuilder, service_fn, BoxError}; +//! use http::{Request, Response, header::{HeaderName, CONTENT_TYPE, AUTHORIZATION}}; +//! use std::{sync::Arc, net::SocketAddr, convert::Infallible, iter::once}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! # struct DatabaseConnectionPool; +//! # impl DatabaseConnectionPool { +//! # fn new() -> DatabaseConnectionPool { DatabaseConnectionPool } +//! # } +//! # fn content_length_from_response(_: &http::Response) -> Option { None } +//! # async fn update_in_flight_requests_metric(count: usize) {} +//! +//! // Our request handler. This is where we would implement the application logic +//! // for responding to HTTP requests... +//! async fn handler(request: Request>) -> Result>, BoxError> { +//! // ... +//! # todo!() +//! } +//! +//! // Shared state across all request handlers --- in this case, a pool of database connections. +//! struct State { +//! pool: DatabaseConnectionPool, +//! } +//! +//! #[tokio::main] +//! async fn main() { +//! // Construct the shared state. +//! let state = State { +//! pool: DatabaseConnectionPool::new(), +//! }; +//! +//! // Use tower's `ServiceBuilder` API to build a stack of tower middleware +//! // wrapping our request handler. +//! let service = ServiceBuilder::new() +//! // Mark the `Authorization` request header as sensitive so it doesn't show in logs +//! .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) +//! // High level logging of requests and responses +//! .layer(TraceLayer::new_for_http()) +//! // Share an `Arc` with all requests +//! .layer(AddExtensionLayer::new(Arc::new(state))) +//! // Compress responses +//! .layer(CompressionLayer::new()) +//! // Propagate `X-Request-Id`s from requests to responses +//! .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) +//! // If the response has a known size set the `Content-Length` header +//! .layer(SetResponseHeaderLayer::overriding(CONTENT_TYPE, content_length_from_response)) +//! // Authorize requests using a token +//! .layer(ValidateRequestHeaderLayer::bearer("passwordlol")) +//! // Accept only application/json, application/* and */* in a request's ACCEPT header +//! .layer(ValidateRequestHeaderLayer::accept("application/json")) +//! // Wrap a `Service` in our middleware stack +//! .service_fn(handler); +//! # let mut service = service; +//! # tower::Service::call(&mut service, Request::new(Full::default())); +//! } +//! ``` +//! +//! Keep in mind that while this example uses [hyper], tower-http supports any HTTP +//! client/server implementation that uses the [http] and [http-body] crates. +//! +//! # Example client +//! +//! tower-http middleware can also be applied to HTTP clients: +//! +//! ```rust,no_run +//! use tower_http::{ +//! decompression::DecompressionLayer, +//! set_header::SetRequestHeaderLayer, +//! trace::TraceLayer, +//! classify::StatusInRangeAsFailures, +//! }; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! use hyper_util::{rt::TokioExecutor, client::legacy::Client}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use http::{Request, HeaderValue, header::USER_AGENT}; +//! +//! #[tokio::main] +//! async fn main() { +//! let client = Client::builder(TokioExecutor::new()).build_http(); +//! let mut client = ServiceBuilder::new() +//! // Add tracing and consider server errors and client +//! // errors as failures. +//! .layer(TraceLayer::new( +//! StatusInRangeAsFailures::new(400..=599).into_make_classifier() +//! )) +//! // Set a `User-Agent` header on all requests. +//! .layer(SetRequestHeaderLayer::overriding( +//! USER_AGENT, +//! HeaderValue::from_static("tower-http demo") +//! )) +//! // Decompress response bodies +//! .layer(DecompressionLayer::new()) +//! // Wrap a `Client` in our middleware stack. +//! // This is possible because `Client` implements +//! // `tower::Service`. +//! .service(client); +//! +//! // Make a request +//! let request = Request::builder() +//! .uri("http://example.com") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = client +//! .ready() +//! .await +//! .unwrap() +//! .call(request) +//! .await +//! .unwrap(); +//! } +//! ``` +//! +//! # Feature Flags +//! +//! All middleware are disabled by default and can be enabled using [cargo features]. +//! +//! For example, to enable the [`Trace`] middleware, add the "trace" feature flag in +//! your `Cargo.toml`: +//! +//! ```toml +//! tower-http = { version = "0.1", features = ["trace"] } +//! ``` +//! +//! You can use `"full"` to enable everything: +//! +//! ```toml +//! tower-http = { version = "0.1", features = ["full"] } +//! ``` +//! +//! # Getting Help +//! +//! If you're new to tower its [guides] might help. In the tower-http repo we also have a [number +//! of examples][examples] showing how to put everything together. You're also welcome to ask in +//! the [`#tower` Discord channel][chat] or open an [issue] with your question. +//! +//! [tower]: https://crates.io/crates/tower +//! [http]: https://crates.io/crates/http +//! [http-body]: https://crates.io/crates/http-body +//! [hyper]: https://crates.io/crates/hyper +//! [guides]: https://github.com/tower-rs/tower/tree/master/guides +//! [tonic]: https://crates.io/crates/tonic +//! [warp]: https://crates.io/crates/warp +//! [cargo features]: https://doc.rust-lang.org/cargo/reference/features.html +//! [`AddExtension`]: crate::add_extension::AddExtension +//! [`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html +//! [chat]: https://discord.gg/tokio +//! [issue]: https://github.com/tower-rs/tower-http/issues/new +//! [`Trace`]: crate::trace::Trace +//! [examples]: https://github.com/tower-rs/tower-http/tree/master/examples + +#![warn( + clippy::all, + clippy::dbg_macro, + clippy::todo, + clippy::empty_enum, + clippy::enum_glob_use, + clippy::mem_forget, + clippy::unused_self, + clippy::filter_map_next, + clippy::needless_continue, + clippy::needless_borrow, + clippy::match_wildcard_for_single_variants, + clippy::if_let_mutex, + clippy::await_holding_lock, + clippy::match_on_vec_items, + clippy::imprecise_flops, + clippy::suboptimal_flops, + clippy::lossy_float_literal, + clippy::rest_pat_in_fully_bound_structs, + clippy::fn_params_excessive_bools, + clippy::exit, + clippy::inefficient_to_string, + clippy::linkedlist, + clippy::macro_use_imports, + clippy::option_option, + clippy::verbose_file_reads, + clippy::unnested_or_patterns, + rust_2018_idioms, + future_incompatible, + nonstandard_style, + missing_docs +)] +#![deny(unreachable_pub)] +#![allow( + elided_lifetimes_in_paths, + // TODO: Remove this once the MSRV bumps to 1.42.0 or above. + clippy::match_like_matches_macro, + clippy::type_complexity +)] +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(test, allow(clippy::float_cmp))] + +#[macro_use] +pub(crate) mod macros; + +#[cfg(test)] +mod test_helpers; + +#[cfg(feature = "auth")] +pub mod auth; + +#[cfg(feature = "set-header")] +pub mod set_header; + +#[cfg(feature = "propagate-header")] +pub mod propagate_header; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", +))] +pub mod compression; + +#[cfg(feature = "add-extension")] +pub mod add_extension; + +#[cfg(feature = "sensitive-headers")] +pub mod sensitive_headers; + +#[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +pub mod decompression; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + feature = "fs" // Used for serving precompressed static files as well +))] +mod content_encoding; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +mod compression_utils; + +#[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", +))] +pub use compression_utils::CompressionLevel; + +#[cfg(feature = "map-response-body")] +pub mod map_response_body; + +#[cfg(feature = "map-request-body")] +pub mod map_request_body; + +#[cfg(feature = "trace")] +pub mod trace; + +#[cfg(feature = "follow-redirect")] +pub mod follow_redirect; + +#[cfg(feature = "limit")] +pub mod limit; + +#[cfg(feature = "metrics")] +pub mod metrics; + +#[cfg(feature = "cors")] +pub mod cors; + +#[cfg(feature = "request-id")] +pub mod request_id; + +#[cfg(feature = "catch-panic")] +pub mod catch_panic; + +#[cfg(feature = "set-status")] +pub mod set_status; + +#[cfg(feature = "timeout")] +pub mod timeout; + +#[cfg(feature = "normalize-path")] +pub mod normalize_path; + +pub mod classify; +pub mod services; + +#[cfg(feature = "util")] +mod builder; +#[cfg(feature = "util")] +mod service_ext; + +#[cfg(feature = "util")] +#[doc(inline)] +pub use self::{builder::ServiceBuilderExt, service_ext::ServiceExt}; + +#[cfg(feature = "validate-request")] +pub mod validate_request; + +#[cfg(any( + feature = "catch-panic", + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + feature = "fs", + feature = "limit", +))] +pub mod body; + +/// The latency unit used to report latencies by middleware. +#[non_exhaustive] +#[derive(Copy, Clone, Debug)] +pub enum LatencyUnit { + /// Use seconds. + Seconds, + /// Use milliseconds. + Millis, + /// Use microseconds. + Micros, + /// Use nanoseconds. + Nanos, +} + +/// Alias for a type-erased error type. +pub type BoxError = Box; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e540f8bea99cc3cff9e38f2051d34377c53894f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/body.rs @@ -0,0 +1,96 @@ +use bytes::Bytes; +use http::{HeaderValue, Response, StatusCode}; +use http_body::{Body, SizeHint}; +use http_body_util::Full; +use pin_project_lite::pin_project; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pin_project! { + /// Response body for [`RequestBodyLimit`]. + /// + /// [`RequestBodyLimit`]: super::RequestBodyLimit + pub struct ResponseBody { + #[pin] + inner: ResponseBodyInner + } +} + +impl ResponseBody { + fn payload_too_large() -> Self { + Self { + inner: ResponseBodyInner::PayloadTooLarge { + body: Full::from(BODY), + }, + } + } + + pub(crate) fn new(body: B) -> Self { + Self { + inner: ResponseBodyInner::Body { body }, + } + } +} + +pin_project! { + #[project = BodyProj] + enum ResponseBodyInner { + PayloadTooLarge { + #[pin] + body: Full, + }, + Body { + #[pin] + body: B + } + } +} + +impl Body for ResponseBody +where + B: Body, +{ + type Data = Bytes; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match self.project().inner.project() { + BodyProj::PayloadTooLarge { body } => body.poll_frame(cx).map_err(|err| match err {}), + BodyProj::Body { body } => body.poll_frame(cx), + } + } + + fn is_end_stream(&self) -> bool { + match &self.inner { + ResponseBodyInner::PayloadTooLarge { body } => body.is_end_stream(), + ResponseBodyInner::Body { body } => body.is_end_stream(), + } + } + + fn size_hint(&self) -> SizeHint { + match &self.inner { + ResponseBodyInner::PayloadTooLarge { body } => body.size_hint(), + ResponseBodyInner::Body { body } => body.size_hint(), + } + } +} + +const BODY: &[u8] = b"length limit exceeded"; + +pub(crate) fn create_error_response() -> Response> +where + B: Body, +{ + let mut res = Response::new(ResponseBody::payload_too_large()); + *res.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; + + #[allow(clippy::declare_interior_mutable_const)] + const TEXT_PLAIN: HeaderValue = HeaderValue::from_static("text/plain; charset=utf-8"); + res.headers_mut() + .insert(http::header::CONTENT_TYPE, TEXT_PLAIN); + + res +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..fd913c75de28834a1364b13f3fc38da27fa9e560 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/future.rs @@ -0,0 +1,60 @@ +use super::body::create_error_response; +use super::ResponseBody; +use http::Response; +use http_body::Body; +use pin_project_lite::pin_project; +use std::future::Future; +use std::pin::Pin; +use std::task::{ready, Context, Poll}; + +pin_project! { + /// Response future for [`RequestBodyLimit`]. + /// + /// [`RequestBodyLimit`]: super::RequestBodyLimit + pub struct ResponseFuture { + #[pin] + inner: ResponseFutureInner, + } +} + +impl ResponseFuture { + pub(crate) fn payload_too_large() -> Self { + Self { + inner: ResponseFutureInner::PayloadTooLarge, + } + } + + pub(crate) fn new(future: F) -> Self { + Self { + inner: ResponseFutureInner::Future { future }, + } + } +} + +pin_project! { + #[project = ResFutProj] + enum ResponseFutureInner { + PayloadTooLarge, + Future { + #[pin] + future: F, + } + } +} + +impl Future for ResponseFuture +where + ResBody: Body, + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let res = match self.project().inner.project() { + ResFutProj::PayloadTooLarge => create_error_response(), + ResFutProj::Future { future } => ready!(future.poll(cx))?.map(ResponseBody::new), + }; + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..2dcff71aace38cfd47aa3f7396ed135803daeae9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/layer.rs @@ -0,0 +1,32 @@ +use super::RequestBodyLimit; +use tower_layer::Layer; + +/// Layer that applies the [`RequestBodyLimit`] middleware that intercepts requests +/// with body lengths greater than the configured limit and converts them into +/// `413 Payload Too Large` responses. +/// +/// See the [module docs](crate::limit) for an example. +/// +/// [`RequestBodyLimit`]: super::RequestBodyLimit +#[derive(Clone, Copy, Debug)] +pub struct RequestBodyLimitLayer { + limit: usize, +} + +impl RequestBodyLimitLayer { + /// Create a new `RequestBodyLimitLayer` with the given body length limit. + pub fn new(limit: usize) -> Self { + Self { limit } + } +} + +impl Layer for RequestBodyLimitLayer { + type Service = RequestBodyLimit; + + fn layer(&self, inner: S) -> Self::Service { + RequestBodyLimit { + inner, + limit: self.limit, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3f2fede357dba4df7bef693ed1a40f9e3fc44bc2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/mod.rs @@ -0,0 +1,142 @@ +//! Middleware for limiting request bodies. +//! +//! This layer will also intercept requests with a `Content-Length` header +//! larger than the allowable limit and return an immediate error response +//! before reading any of the body. +//! +//! Note that payload length errors can be used by adversaries in an attempt +//! to smuggle requests. When an incoming stream is dropped due to an +//! over-sized payload, servers should close the connection or resynchronize +//! by optimistically consuming some data in an attempt to reach the end of +//! the current HTTP frame. If the incoming stream cannot be resynchronized, +//! then the connection should be closed. If you're using [hyper] this is +//! automatically handled for you. +//! +//! # Examples +//! +//! ## Limiting based on `Content-Length` +//! +//! If a `Content-Length` header is present and indicates a payload that is +//! larger than the acceptable limit, then the underlying service will not +//! be called and a `413 Payload Too Large` response will be generated. +//! +//! ```rust +//! use bytes::Bytes; +//! use std::convert::Infallible; +//! use http::{Request, Response, StatusCode, HeaderValue, header::CONTENT_LENGTH}; +//! use http_body_util::{LengthLimitError}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::{body::Limited, limit::RequestBodyLimitLayer}; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>>) -> Result>, Infallible> { +//! panic!("This should not be hit") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Limit incoming requests to 4096 bytes. +//! .layer(RequestBodyLimitLayer::new(4096)) +//! .service_fn(handle); +//! +//! // Call the service with a header that indicates the body is too large. +//! let mut request = Request::builder() +//! .header(CONTENT_LENGTH, HeaderValue::from_static("5000")) +//! .body(Full::::default()) +//! .unwrap(); +//! +//! // let response = svc.ready().await?.call(request).await?; +//! let response = svc.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Limiting without known `Content-Length` +//! +//! If a `Content-Length` header is not present, then the body will be read +//! until the configured limit has been reached. If the payload is larger than +//! the limit, the [`http_body_util::Limited`] body will return an error. This +//! error can be inspected to determine if it is a [`http_body_util::LengthLimitError`] +//! and return an appropriate response in such case. +//! +//! Note that no error will be generated if the body is never read. Similarly, +//! if the body _would be_ to large, but is never consumed beyond the length +//! limit, then no error is generated, and handling of the remaining incoming +//! data stream is left to the server implementation as described above. +//! +//! ```rust +//! # use bytes::Bytes; +//! # use std::convert::Infallible; +//! # use http::{Request, Response, StatusCode}; +//! # use http_body_util::LengthLimitError; +//! # use tower::{Service, ServiceExt, ServiceBuilder, BoxError}; +//! # use tower_http::{body::Limited, limit::RequestBodyLimitLayer}; +//! # use http_body_util::Full; +//! # use http_body_util::BodyExt; +//! # +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! async fn handle(req: Request>>) -> Result>, BoxError> { +//! let data = match req.into_body().collect().await { +//! Ok(collected) => collected.to_bytes(), +//! Err(err) => { +//! if let Some(_) = err.downcast_ref::() { +//! let mut resp = Response::new(Full::default()); +//! *resp.status_mut() = StatusCode::PAYLOAD_TOO_LARGE; +//! return Ok(resp); +//! } else { +//! return Err(err); +//! } +//! } +//! }; +//! +//! Ok(Response::new(Full::default())) +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // Limit incoming requests to 4096 bytes. +//! .layer(RequestBodyLimitLayer::new(4096)) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::new(Full::::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::OK); +//! +//! // Call the service with a body that is too large. +//! let request = Request::new(Full::::from(Bytes::from(vec![0u8; 4097]))); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PAYLOAD_TOO_LARGE); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Limiting without `Content-Length` +//! +//! If enforcement of body size limits is desired without preemptively +//! handling requests with a `Content-Length` header indicating an over-sized +//! request, consider using [`MapRequestBody`] to wrap the request body with +//! [`http_body_util::Limited`] and checking for [`http_body_util::LengthLimitError`] +//! like in the previous example. +//! +//! [`MapRequestBody`]: crate::map_request_body +//! [hyper]: https://crates.io/crates/hyper + +mod body; +mod future; +mod layer; +mod service; + +pub use body::ResponseBody; +pub use future::ResponseFuture; +pub use layer::RequestBodyLimitLayer; +pub use service::RequestBodyLimit; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..fdf65d256dccd8f72b20c2177eb4cd59276f9f8d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/limit/service.rs @@ -0,0 +1,64 @@ +use super::{RequestBodyLimitLayer, ResponseBody, ResponseFuture}; +use crate::body::Limited; +use http::{Request, Response}; +use http_body::Body; +use std::task::{Context, Poll}; +use tower_service::Service; + +/// Middleware that intercepts requests with body lengths greater than the +/// configured limit and converts them into `413 Payload Too Large` responses. +/// +/// See the [module docs](crate::limit) for an example. +#[derive(Clone, Copy, Debug)] +pub struct RequestBodyLimit { + pub(crate) inner: S, + pub(crate) limit: usize, +} + +impl RequestBodyLimit { + /// Create a new `RequestBodyLimit` with the given body length limit. + pub fn new(inner: S, limit: usize) -> Self { + Self { inner, limit } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `RequestBodyLimit` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(limit: usize) -> RequestBodyLimitLayer { + RequestBodyLimitLayer::new(limit) + } +} + +impl Service> for RequestBodyLimit +where + ResBody: Body, + S: Service>, Response = Response>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let content_length = req + .headers() + .get(http::header::CONTENT_LENGTH) + .and_then(|value| value.to_str().ok()?.parse::().ok()); + + let body_limit = match content_length { + Some(len) if len > self.limit => return ResponseFuture::payload_too_large(), + Some(len) => self.limit.min(len), + None => self.limit, + }; + + let req = req.map(|body| Limited::new(http_body_util::Limited::new(body, body_limit))); + + ResponseFuture::new(self.inner.call(req)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/macros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..f58d34a6692be3929a2ba2ac0a6920dc8aa59618 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/macros.rs @@ -0,0 +1,105 @@ +#[allow(unused_macros)] +macro_rules! define_inner_service_accessors { + () => { + /// Gets a reference to the underlying service. + pub fn get_ref(&self) -> &S { + &self.inner + } + + /// Gets a mutable reference to the underlying service. + pub fn get_mut(&mut self) -> &mut S { + &mut self.inner + } + + /// Consumes `self`, returning the underlying service. + pub fn into_inner(self) -> S { + self.inner + } + }; +} + +#[allow(unused_macros)] +macro_rules! opaque_body { + ($(#[$m:meta])* pub type $name:ident = $actual:ty;) => { + opaque_body! { + $(#[$m])* pub type $name<> = $actual; + } + }; + + ($(#[$m:meta])* pub type $name:ident<$($param:ident),*> = $actual:ty;) => { + pin_project_lite::pin_project! { + $(#[$m])* + pub struct $name<$($param),*> { + #[pin] + pub(crate) inner: $actual + } + } + + impl<$($param),*> $name<$($param),*> { + pub(crate) fn new(inner: $actual) -> Self { + Self { inner } + } + } + + impl<$($param),*> http_body::Body for $name<$($param),*> { + type Data = <$actual as http_body::Body>::Data; + type Error = <$actual as http_body::Body>::Error; + + #[inline] + fn poll_frame( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + http_body::Body::is_end_stream(&self.inner) + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + http_body::Body::size_hint(&self.inner) + } + } + }; +} + +#[allow(unused_macros)] +macro_rules! opaque_future { + ($(#[$m:meta])* pub type $name:ident<$($param:ident),+> = $actual:ty;) => { + pin_project_lite::pin_project! { + $(#[$m])* + pub struct $name<$($param),+> { + #[pin] + inner: $actual + } + } + + impl<$($param),+> $name<$($param),+> { + pub(crate) fn new(inner: $actual) -> Self { + Self { + inner + } + } + } + + impl<$($param),+> std::fmt::Debug for $name<$($param),+> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple(stringify!($name)).field(&format_args!("...")).finish() + } + } + + impl<$($param),+> std::future::Future for $name<$($param),+> + where + $actual: std::future::Future, + { + type Output = <$actual as std::future::Future>::Output; + #[inline] + fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll { + self.project().inner.poll(cx) + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_request_body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_request_body.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd067e924fff283840220aeda633e8ddcc9ad35e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_request_body.rs @@ -0,0 +1,157 @@ +//! Apply a transformation to the request body. +//! +//! # Example +//! +//! ``` +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use std::convert::Infallible; +//! use std::{pin::Pin, task::{ready, Context, Poll}}; +//! use tower::{ServiceBuilder, service_fn, ServiceExt, Service}; +//! use tower_http::map_request_body::MapRequestBodyLayer; +//! +//! // A wrapper for a `Full` +//! struct BodyWrapper { +//! inner: Full, +//! } +//! +//! impl BodyWrapper { +//! fn new(inner: Full) -> Self { +//! Self { inner } +//! } +//! } +//! +//! impl http_body::Body for BodyWrapper { +//! // ... +//! # type Data = Bytes; +//! # type Error = tower::BoxError; +//! # fn poll_frame( +//! # self: Pin<&mut Self>, +//! # cx: &mut Context<'_> +//! # ) -> Poll, Self::Error>>> { unimplemented!() } +//! # fn is_end_stream(&self) -> bool { unimplemented!() } +//! # fn size_hint(&self) -> http_body::SizeHint { unimplemented!() } +//! } +//! +//! async fn handle(_: Request) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut svc = ServiceBuilder::new() +//! // Wrap response bodies in `BodyWrapper` +//! .layer(MapRequestBodyLayer::new(BodyWrapper::new)) +//! .service_fn(handle); +//! +//! // Call the service +//! let request = Request::new(Full::default()); +//! +//! svc.ready().await?.call(request).await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use std::{ + fmt, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Apply a transformation to the request body. +/// +/// See the [module docs](crate::map_request_body) for an example. +#[derive(Clone)] +pub struct MapRequestBodyLayer { + f: F, +} + +impl MapRequestBodyLayer { + /// Create a new [`MapRequestBodyLayer`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(f: F) -> Self { + Self { f } + } +} + +impl Layer for MapRequestBodyLayer +where + F: Clone, +{ + type Service = MapRequestBody; + + fn layer(&self, inner: S) -> Self::Service { + MapRequestBody::new(inner, self.f.clone()) + } +} + +impl fmt::Debug for MapRequestBodyLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequestBodyLayer") + .field("f", &std::any::type_name::()) + .finish() + } +} + +/// Apply a transformation to the request body. +/// +/// See the [module docs](crate::map_request_body) for an example. +#[derive(Clone)] +pub struct MapRequestBody { + inner: S, + f: F, +} + +impl MapRequestBody { + /// Create a new [`MapRequestBody`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(service: S, f: F) -> Self { + Self { inner: service, f } + } + + /// Returns a new [`Layer`] that wraps services with a `MapRequestBodyLayer` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(f: F) -> MapRequestBodyLayer { + MapRequestBodyLayer::new(f) + } + + define_inner_service_accessors!(); +} + +impl Service> for MapRequestBody +where + S: Service, Response = Response>, + F: FnMut(ReqBody) -> NewReqBody, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(&mut self.f); + self.inner.call(req) + } +} + +impl fmt::Debug for MapRequestBody +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapRequestBody") + .field("inner", &self.inner) + .field("f", &std::any::type_name::()) + .finish() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_response_body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_response_body.rs new file mode 100644 index 0000000000000000000000000000000000000000..5329e5d5ecb89eaba9f7273a53a65301916f1f46 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/map_response_body.rs @@ -0,0 +1,185 @@ +//! Apply a transformation to the response body. +//! +//! # Example +//! +//! ``` +//! use bytes::Bytes; +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use std::convert::Infallible; +//! use std::{pin::Pin, task::{ready, Context, Poll}}; +//! use tower::{ServiceBuilder, service_fn, ServiceExt, Service}; +//! use tower_http::map_response_body::MapResponseBodyLayer; +//! +//! // A wrapper for a `Full` +//! struct BodyWrapper { +//! inner: Full, +//! } +//! +//! impl BodyWrapper { +//! fn new(inner: Full) -> Self { +//! Self { inner } +//! } +//! } +//! +//! impl http_body::Body for BodyWrapper { +//! // ... +//! # type Data = Bytes; +//! # type Error = tower::BoxError; +//! # fn poll_frame( +//! # self: Pin<&mut Self>, +//! # cx: &mut Context<'_> +//! # ) -> Poll, Self::Error>>> { unimplemented!() } +//! # fn is_end_stream(&self) -> bool { unimplemented!() } +//! # fn size_hint(&self) -> http_body::SizeHint { unimplemented!() } +//! } +//! +//! async fn handle(_: Request) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut svc = ServiceBuilder::new() +//! // Wrap response bodies in `BodyWrapper` +//! .layer(MapResponseBodyLayer::new(BodyWrapper::new)) +//! .service_fn(handle); +//! +//! // Call the service +//! let request = Request::new(Full::::from("foobar")); +//! +//! svc.ready().await?.call(request).await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use pin_project_lite::pin_project; +use std::future::Future; +use std::{ + fmt, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Apply a transformation to the response body. +/// +/// See the [module docs](crate::map_response_body) for an example. +#[derive(Clone)] +pub struct MapResponseBodyLayer { + f: F, +} + +impl MapResponseBodyLayer { + /// Create a new [`MapResponseBodyLayer`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(f: F) -> Self { + Self { f } + } +} + +impl Layer for MapResponseBodyLayer +where + F: Clone, +{ + type Service = MapResponseBody; + + fn layer(&self, inner: S) -> Self::Service { + MapResponseBody::new(inner, self.f.clone()) + } +} + +impl fmt::Debug for MapResponseBodyLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponseBodyLayer") + .field("f", &std::any::type_name::()) + .finish() + } +} + +/// Apply a transformation to the response body. +/// +/// See the [module docs](crate::map_response_body) for an example. +#[derive(Clone)] +pub struct MapResponseBody { + inner: S, + f: F, +} + +impl MapResponseBody { + /// Create a new [`MapResponseBody`]. + /// + /// `F` is expected to be a function that takes a body and returns another body. + pub fn new(service: S, f: F) -> Self { + Self { inner: service, f } + } + + /// Returns a new [`Layer`] that wraps services with a `MapResponseBodyLayer` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(f: F) -> MapResponseBodyLayer { + MapResponseBodyLayer::new(f) + } + + define_inner_service_accessors!(); +} + +impl Service> for MapResponseBody +where + S: Service, Response = Response>, + F: FnMut(ResBody) -> NewResBody + Clone, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + inner: self.inner.call(req), + f: self.f.clone(), + } + } +} + +impl fmt::Debug for MapResponseBody +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MapResponseBody") + .field("inner", &self.inner) + .field("f", &std::any::type_name::()) + .finish() + } +} + +pin_project! { + /// Response future for [`MapResponseBody`]. + pub struct ResponseFuture { + #[pin] + inner: Fut, + f: F, + } +} + +impl Future for ResponseFuture +where + Fut: Future, E>>, + F: FnMut(ResBody) -> NewResBody, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let res = ready!(this.inner.poll(cx)?); + Poll::Ready(Ok(res.map(this.f))) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/in_flight_requests.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/in_flight_requests.rs new file mode 100644 index 0000000000000000000000000000000000000000..dbb5e2ffc244e7f7c9b85c0a2c3434fc0cc0c69a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/in_flight_requests.rs @@ -0,0 +1,327 @@ +//! Measure the number of in-flight requests. +//! +//! In-flight requests is the number of requests a service is currently processing. The processing +//! of a request starts when it is received by the service (`tower::Service::call` is called) and +//! is considered complete when the response body is consumed, dropped, or an error happens. +//! +//! # Example +//! +//! ``` +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::metrics::InFlightRequestsLayer; +//! use http::{Request, Response}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{time::Duration, convert::Infallible}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! async fn update_in_flight_requests_metric(count: usize) { +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Create a `Layer` with an associated counter. +//! let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); +//! +//! // Spawn a task that will receive the number of in-flight requests every 10 seconds. +//! tokio::spawn( +//! counter.run_emitter(Duration::from_secs(10), |count| async move { +//! update_in_flight_requests_metric(count).await; +//! }), +//! ); +//! +//! let mut service = ServiceBuilder::new() +//! // Keep track of the number of in-flight requests. This will increment and decrement +//! // `counter` automatically. +//! .layer(in_flight_requests_layer) +//! .service_fn(handle); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response}; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, + time::Duration, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer for applying [`InFlightRequests`] which counts the number of in-flight requests. +/// +/// See the [module docs](crate::metrics::in_flight_requests) for more details. +#[derive(Clone, Debug)] +pub struct InFlightRequestsLayer { + counter: InFlightRequestsCounter, +} + +impl InFlightRequestsLayer { + /// Create a new `InFlightRequestsLayer` and its associated counter. + pub fn pair() -> (Self, InFlightRequestsCounter) { + let counter = InFlightRequestsCounter::new(); + let layer = Self::new(counter.clone()); + (layer, counter) + } + + /// Create a new `InFlightRequestsLayer` that will update the given counter. + pub fn new(counter: InFlightRequestsCounter) -> Self { + Self { counter } + } +} + +impl Layer for InFlightRequestsLayer { + type Service = InFlightRequests; + + fn layer(&self, inner: S) -> Self::Service { + InFlightRequests { + inner, + counter: self.counter.clone(), + } + } +} + +/// Middleware that counts the number of in-flight requests. +/// +/// See the [module docs](crate::metrics::in_flight_requests) for more details. +#[derive(Clone, Debug)] +pub struct InFlightRequests { + inner: S, + counter: InFlightRequestsCounter, +} + +impl InFlightRequests { + /// Create a new `InFlightRequests` and its associated counter. + pub fn pair(inner: S) -> (Self, InFlightRequestsCounter) { + let counter = InFlightRequestsCounter::new(); + let service = Self::new(inner, counter.clone()); + (service, counter) + } + + /// Create a new `InFlightRequests` that will update the given counter. + pub fn new(inner: S, counter: InFlightRequestsCounter) -> Self { + Self { inner, counter } + } + + define_inner_service_accessors!(); +} + +/// An atomic counter that keeps track of the number of in-flight requests. +/// +/// This will normally combined with [`InFlightRequestsLayer`] or [`InFlightRequests`] which will +/// update the counter as requests arrive. +#[derive(Debug, Clone, Default)] +pub struct InFlightRequestsCounter { + count: Arc, +} + +impl InFlightRequestsCounter { + /// Create a new `InFlightRequestsCounter`. + pub fn new() -> Self { + Self::default() + } + + /// Get the current number of in-flight requests. + pub fn get(&self) -> usize { + self.count.load(Ordering::Relaxed) + } + + fn increment(&self) -> IncrementGuard { + self.count.fetch_add(1, Ordering::Relaxed); + IncrementGuard { + count: self.count.clone(), + } + } + + /// Run a future every `interval` which receives the current number of in-flight requests. + /// + /// This can be used to send the current count to your metrics system. + /// + /// This function will loop forever so normally it is called with [`tokio::spawn`]: + /// + /// ```rust,no_run + /// use tower_http::metrics::in_flight_requests::InFlightRequestsCounter; + /// use std::time::Duration; + /// + /// let counter = InFlightRequestsCounter::new(); + /// + /// tokio::spawn( + /// counter.run_emitter(Duration::from_secs(10), |count: usize| async move { + /// // Send `count` to metrics system. + /// }), + /// ); + /// ``` + pub async fn run_emitter(mut self, interval: Duration, mut emit: F) + where + F: FnMut(usize) -> Fut + Send + 'static, + Fut: Future + Send, + { + let mut interval = tokio::time::interval(interval); + + loop { + // if all producers have gone away we don't need to emit anymore + match Arc::try_unwrap(self.count) { + Ok(_) => return, + Err(shared_count) => { + self = Self { + count: shared_count, + } + } + } + + interval.tick().await; + emit(self.get()).await; + } + } +} + +struct IncrementGuard { + count: Arc, +} + +impl Drop for IncrementGuard { + fn drop(&mut self) { + self.count.fetch_sub(1, Ordering::Relaxed); + } +} + +impl Service> for InFlightRequests +where + S: Service, Response = Response>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let guard = self.counter.increment(); + ResponseFuture { + inner: self.inner.call(req), + guard: Some(guard), + } + } +} + +pin_project! { + /// Response future for [`InFlightRequests`]. + pub struct ResponseFuture { + #[pin] + inner: F, + guard: Option, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let response = ready!(this.inner.poll(cx))?; + let guard = this.guard.take().unwrap(); + let response = response.map(move |body| ResponseBody { inner: body, guard }); + + Poll::Ready(Ok(response)) + } +} + +pin_project! { + /// Response body for [`InFlightRequests`]. + pub struct ResponseBody { + #[pin] + inner: B, + guard: IncrementGuard, + } +} + +impl Body for ResponseBody +where + B: Body, +{ + type Data = B::Data; + type Error = B::Error; + + #[inline] + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) + } + + #[inline] + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::Request; + use tower::{BoxError, ServiceBuilder}; + + #[tokio::test] + async fn basic() { + let (in_flight_requests_layer, counter) = InFlightRequestsLayer::pair(); + + let mut service = ServiceBuilder::new() + .layer(in_flight_requests_layer) + .service_fn(echo); + assert_eq!(counter.get(), 0); + + // driving service to ready shouldn't increment the counter + std::future::poll_fn(|cx| service.poll_ready(cx)) + .await + .unwrap(); + assert_eq!(counter.get(), 0); + + // creating the response future should increment the count + let response_future = service.call(Request::new(Body::empty())); + assert_eq!(counter.get(), 1); + + // count shouldn't decrement until the full body has been comsumed + let response = response_future.await.unwrap(); + assert_eq!(counter.get(), 1); + + let body = response.into_body(); + crate::test_helpers::to_bytes(body).await.unwrap(); + assert_eq!(counter.get(), 0); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..317d17b8fef9aecbf776f39c99965a66530cd425 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/metrics/mod.rs @@ -0,0 +1,12 @@ +//! Middlewares for adding metrics to services. +//! +//! Supported metrics: +//! +//! - [In-flight requests][]: Measure the number of requests a service is currently processing. +//! +//! [In-flight requests]: in_flight_requests + +pub mod in_flight_requests; + +#[doc(inline)] +pub use self::in_flight_requests::{InFlightRequests, InFlightRequestsLayer}; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/normalize_path.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/normalize_path.rs new file mode 100644 index 0000000000000000000000000000000000000000..f9b9dd2e58f4fd09cc59c0e86cff560a74ab9535 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/normalize_path.rs @@ -0,0 +1,384 @@ +//! Middleware that normalizes paths. +//! +//! # Example +//! +//! ``` +//! use tower_http::normalize_path::NormalizePathLayer; +//! use http::{Request, Response, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{iter::once, convert::Infallible}; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // `req.uri().path()` will not have trailing slashes +//! # Ok(Response::new(Full::default())) +//! } +//! +//! let mut service = ServiceBuilder::new() +//! // trim trailing slashes from paths +//! .layer(NormalizePathLayer::trim_trailing_slash()) +//! .service_fn(handle); +//! +//! // call the service +//! let request = Request::builder() +//! // `handle` will see `/foo` +//! .uri("/foo/") +//! .body(Full::default())?; +//! +//! service.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response, Uri}; +use std::{ + borrow::Cow, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Different modes of normalizing paths +#[derive(Debug, Copy, Clone)] +enum NormalizeMode { + /// Normalizes paths by trimming the trailing slashes, e.g. /foo/ -> /foo + Trim, + /// Normalizes paths by appending trailing slash, e.g. /foo -> /foo/ + Append, +} + +/// Layer that applies [`NormalizePath`] which normalizes paths. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Copy, Clone)] +pub struct NormalizePathLayer { + mode: NormalizeMode, +} + +impl NormalizePathLayer { + /// Create a new [`NormalizePathLayer`]. + /// + /// Any trailing slashes from request paths will be removed. For example, a request with `/foo/` + /// will be changed to `/foo` before reaching the inner service. + pub fn trim_trailing_slash() -> Self { + NormalizePathLayer { + mode: NormalizeMode::Trim, + } + } + + /// Create a new [`NormalizePathLayer`]. + /// + /// Request paths without trailing slash will be appended with a trailing slash. For example, a request with `/foo` + /// will be changed to `/foo/` before reaching the inner service. + pub fn append_trailing_slash() -> Self { + NormalizePathLayer { + mode: NormalizeMode::Append, + } + } +} + +impl Layer for NormalizePathLayer { + type Service = NormalizePath; + + fn layer(&self, inner: S) -> Self::Service { + NormalizePath { + mode: self.mode, + inner, + } + } +} + +/// Middleware that normalizes paths. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Copy, Clone)] +pub struct NormalizePath { + mode: NormalizeMode, + inner: S, +} + +impl NormalizePath { + /// Construct a new [`NormalizePath`] with trim mode. + pub fn trim_trailing_slash(inner: S) -> Self { + Self { + mode: NormalizeMode::Trim, + inner, + } + } + + /// Construct a new [`NormalizePath`] with append mode. + pub fn append_trailing_slash(inner: S) -> Self { + Self { + mode: NormalizeMode::Append, + inner, + } + } + + define_inner_service_accessors!(); +} + +impl Service> for NormalizePath +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + match self.mode { + NormalizeMode::Trim => trim_trailing_slash(req.uri_mut()), + NormalizeMode::Append => append_trailing_slash(req.uri_mut()), + } + self.inner.call(req) + } +} + +fn trim_trailing_slash(uri: &mut Uri) { + if !uri.path().ends_with('/') && !uri.path().starts_with("//") { + return; + } + + let new_path = format!("/{}", uri.path().trim_matches('/')); + + let mut parts = uri.clone().into_parts(); + + let new_path_and_query = if let Some(path_and_query) = &parts.path_and_query { + let new_path_and_query = if let Some(query) = path_and_query.query() { + Cow::Owned(format!("{}?{}", new_path, query)) + } else { + new_path.into() + } + .parse() + .unwrap(); + + Some(new_path_and_query) + } else { + None + }; + + parts.path_and_query = new_path_and_query; + if let Ok(new_uri) = Uri::from_parts(parts) { + *uri = new_uri; + } +} + +fn append_trailing_slash(uri: &mut Uri) { + if uri.path().ends_with("/") && !uri.path().ends_with("//") { + return; + } + + let trimmed = uri.path().trim_matches('/'); + let new_path = if trimmed.is_empty() { + "/".to_string() + } else { + format!("/{trimmed}/") + }; + + let mut parts = uri.clone().into_parts(); + + let new_path_and_query = if let Some(path_and_query) = &parts.path_and_query { + let new_path_and_query = if let Some(query) = path_and_query.query() { + Cow::Owned(format!("{new_path}?{query}")) + } else { + new_path.into() + } + .parse() + .unwrap(); + + Some(new_path_and_query) + } else { + Some(new_path.parse().unwrap()) + }; + + parts.path_and_query = new_path_and_query; + if let Ok(new_uri) = Uri::from_parts(parts) { + *uri = new_uri; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::convert::Infallible; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn trim_works() { + async fn handle(request: Request<()>) -> Result, Infallible> { + Ok(Response::new(request.uri().to_string())) + } + + let mut svc = ServiceBuilder::new() + .layer(NormalizePathLayer::trim_trailing_slash()) + .service_fn(handle); + + let body = svc + .ready() + .await + .unwrap() + .call(Request::builder().uri("/foo/").body(()).unwrap()) + .await + .unwrap() + .into_body(); + + assert_eq!(body, "/foo"); + } + + #[test] + fn is_noop_if_no_trailing_slash() { + let mut uri = "/foo".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } + + #[test] + fn maintains_query() { + let mut uri = "/foo/?a=a".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn removes_multiple_trailing_slashes() { + let mut uri = "/foo////".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } + + #[test] + fn removes_multiple_trailing_slashes_even_with_query() { + let mut uri = "/foo////?a=a".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn is_noop_on_index() { + let mut uri = "/".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn removes_multiple_trailing_slashes_on_index() { + let mut uri = "////".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn removes_multiple_trailing_slashes_on_index_even_with_query() { + let mut uri = "////?a=a".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/?a=a"); + } + + #[test] + fn removes_multiple_preceding_slashes_even_with_query() { + let mut uri = "///foo//?a=a".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo?a=a"); + } + + #[test] + fn removes_multiple_preceding_slashes() { + let mut uri = "///foo".parse::().unwrap(); + trim_trailing_slash(&mut uri); + assert_eq!(uri, "/foo"); + } + + #[tokio::test] + async fn append_works() { + async fn handle(request: Request<()>) -> Result, Infallible> { + Ok(Response::new(request.uri().to_string())) + } + + let mut svc = ServiceBuilder::new() + .layer(NormalizePathLayer::append_trailing_slash()) + .service_fn(handle); + + let body = svc + .ready() + .await + .unwrap() + .call(Request::builder().uri("/foo").body(()).unwrap()) + .await + .unwrap() + .into_body(); + + assert_eq!(body, "/foo/"); + } + + #[test] + fn is_noop_if_trailing_slash() { + let mut uri = "/foo/".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/"); + } + + #[test] + fn append_maintains_query() { + let mut uri = "/foo?a=a".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/?a=a"); + } + + #[test] + fn append_only_keeps_one_slash() { + let mut uri = "/foo////".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/"); + } + + #[test] + fn append_only_keeps_one_slash_even_with_query() { + let mut uri = "/foo////?a=a".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/?a=a"); + } + + #[test] + fn append_is_noop_on_index() { + let mut uri = "/".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn append_removes_multiple_trailing_slashes_on_index() { + let mut uri = "////".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/"); + } + + #[test] + fn append_removes_multiple_trailing_slashes_on_index_even_with_query() { + let mut uri = "////?a=a".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/?a=a"); + } + + #[test] + fn append_removes_multiple_preceding_slashes_even_with_query() { + let mut uri = "///foo//?a=a".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/?a=a"); + } + + #[test] + fn append_removes_multiple_preceding_slashes() { + let mut uri = "///foo".parse::().unwrap(); + append_trailing_slash(&mut uri); + assert_eq!(uri, "/foo/"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/propagate_header.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/propagate_header.rs new file mode 100644 index 0000000000000000000000000000000000000000..6c77ec325cebbff120058df68a1a6bdc938b7a23 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/propagate_header.rs @@ -0,0 +1,154 @@ +//! Propagate a header from the request to the response. +//! +//! # Example +//! +//! ```rust +//! use http::{Request, Response, header::HeaderName}; +//! use std::convert::Infallible; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use tower_http::propagate_header::PropagateHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! // This will copy `x-request-id` headers from requests onto responses. +//! .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder() +//! .header("x-request-id", "1337") +//! .body(Full::default())?; +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "1337"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{header::HeaderName, HeaderValue, Request, Response}; +use pin_project_lite::pin_project; +use std::future::Future; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`PropagateHeader`] which propagates headers from requests to responses. +/// +/// If the header is present on the request it'll be applied to the response as well. This could +/// for example be used to propagate headers such as `X-Request-Id`. +/// +/// See the [module docs](crate::propagate_header) for more details. +#[derive(Clone, Debug)] +pub struct PropagateHeaderLayer { + header: HeaderName, +} + +impl PropagateHeaderLayer { + /// Create a new [`PropagateHeaderLayer`]. + pub fn new(header: HeaderName) -> Self { + Self { header } + } +} + +impl Layer for PropagateHeaderLayer { + type Service = PropagateHeader; + + fn layer(&self, inner: S) -> Self::Service { + PropagateHeader { + inner, + header: self.header.clone(), + } + } +} + +/// Middleware that propagates headers from requests to responses. +/// +/// If the header is present on the request it'll be applied to the response as well. This could +/// for example be used to propagate headers such as `X-Request-Id`. +/// +/// See the [module docs](crate::propagate_header) for more details. +#[derive(Clone, Debug)] +pub struct PropagateHeader { + inner: S, + header: HeaderName, +} + +impl PropagateHeader { + /// Create a new [`PropagateHeader`] that propagates the given header. + pub fn new(inner: S, header: HeaderName) -> Self { + Self { inner, header } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `PropagateHeader` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(header: HeaderName) -> PropagateHeaderLayer { + PropagateHeaderLayer::new(header) + } +} + +impl Service> for PropagateHeader +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let value = req.headers().get(&self.header).cloned(); + + ResponseFuture { + future: self.inner.call(req), + header_and_value: Some(self.header.clone()).zip(value), + } + } +} + +pin_project! { + /// Response future for [`PropagateHeader`]. + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + future: F, + header_and_value: Option<(HeaderName, HeaderValue)>, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + if let Some((header, value)) = this.header_and_value.take() { + res.headers_mut().insert(header, value); + } + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/request_id.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/request_id.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c8c43fa315942c3eee944773695bf0498c391cb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/request_id.rs @@ -0,0 +1,604 @@ +//! Set and propagate request ids. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response, header::HeaderName}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::request_id::{ +//! SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! }; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # +//! // A `MakeRequestId` that increments an atomic counter +//! #[derive(Clone, Default)] +//! struct MyMakeRequestId { +//! counter: Arc, +//! } +//! +//! impl MakeRequestId for MyMakeRequestId { +//! fn make_request_id(&mut self, request: &Request) -> Option { +//! let request_id = self.counter +//! .fetch_add(1, Ordering::SeqCst) +//! .to_string() +//! .parse() +//! .unwrap(); +//! +//! Some(RequestId::new(request_id)) +//! } +//! } +//! +//! let x_request_id = HeaderName::from_static("x-request-id"); +//! +//! let mut svc = ServiceBuilder::new() +//! // set `x-request-id` header on all requests +//! .layer(SetRequestIdLayer::new( +//! x_request_id.clone(), +//! MyMakeRequestId::default(), +//! )) +//! // propagate `x-request-id` headers from request to response +//! .layer(PropagateRequestIdLayer::new(x_request_id)) +//! .service(handler); +//! +//! let request = Request::new(Full::default()); +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "0"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Additional convenience methods are available on [`ServiceBuilderExt`]: +//! +//! ``` +//! use tower_http::ServiceBuilderExt; +//! # use http::{Request, Response, header::HeaderName}; +//! # use tower::{Service, ServiceExt, ServiceBuilder}; +//! # use tower_http::request_id::{ +//! # SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! # }; +//! # use bytes::Bytes; +//! # use http_body_util::Full; +//! # use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # #[derive(Clone, Default)] +//! # struct MyMakeRequestId { +//! # counter: Arc, +//! # } +//! # impl MakeRequestId for MyMakeRequestId { +//! # fn make_request_id(&mut self, request: &Request) -> Option { +//! # let request_id = self.counter +//! # .fetch_add(1, Ordering::SeqCst) +//! # .to_string() +//! # .parse() +//! # .unwrap(); +//! # Some(RequestId::new(request_id)) +//! # } +//! # } +//! +//! let mut svc = ServiceBuilder::new() +//! .set_x_request_id(MyMakeRequestId::default()) +//! .propagate_x_request_id() +//! .service(handler); +//! +//! let request = Request::new(Full::default()); +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["x-request-id"], "0"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! See [`SetRequestId`] and [`PropagateRequestId`] for more details. +//! +//! # Using `Trace` +//! +//! To have request ids show up correctly in logs produced by [`Trace`] you must apply the layers +//! in this order: +//! +//! ``` +//! use tower_http::{ +//! ServiceBuilderExt, +//! trace::{TraceLayer, DefaultMakeSpan, DefaultOnResponse}, +//! }; +//! # use http::{Request, Response, header::HeaderName}; +//! # use tower::{Service, ServiceExt, ServiceBuilder}; +//! # use tower_http::request_id::{ +//! # SetRequestIdLayer, PropagateRequestIdLayer, MakeRequestId, RequestId, +//! # }; +//! # use http_body_util::Full; +//! # use bytes::Bytes; +//! # use std::sync::{Arc, atomic::{AtomicU64, Ordering}}; +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let handler = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # #[derive(Clone, Default)] +//! # struct MyMakeRequestId { +//! # counter: Arc, +//! # } +//! # impl MakeRequestId for MyMakeRequestId { +//! # fn make_request_id(&mut self, request: &Request) -> Option { +//! # let request_id = self.counter +//! # .fetch_add(1, Ordering::SeqCst) +//! # .to_string() +//! # .parse() +//! # .unwrap(); +//! # Some(RequestId::new(request_id)) +//! # } +//! # } +//! +//! let svc = ServiceBuilder::new() +//! // make sure to set request ids before the request reaches `TraceLayer` +//! .set_x_request_id(MyMakeRequestId::default()) +//! // log requests and responses +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(DefaultMakeSpan::new().include_headers(true)) +//! .on_response(DefaultOnResponse::new().include_headers(true)) +//! ) +//! // propagate the header to the response before the response reaches `TraceLayer` +//! .propagate_x_request_id() +//! .service(handler); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! # Doesn't override existing headers +//! +//! [`SetRequestId`] and [`PropagateRequestId`] wont override request ids if its already present on +//! requests or responses. Among other things, this allows other middleware to conditionally set +//! request ids and use the middleware in this module as a fallback. +//! +//! [`ServiceBuilderExt`]: crate::ServiceBuilderExt +//! [`Uuid`]: https://crates.io/crates/uuid +//! [`Trace`]: crate::trace::Trace + +use http::{ + header::{HeaderName, HeaderValue}, + Request, Response, +}; +use pin_project_lite::pin_project; +use std::task::{ready, Context, Poll}; +use std::{future::Future, pin::Pin}; +use tower_layer::Layer; +use tower_service::Service; +use uuid::Uuid; + +pub(crate) const X_REQUEST_ID: HeaderName = HeaderName::from_static("x-request-id"); + +/// Trait for producing [`RequestId`]s. +/// +/// Used by [`SetRequestId`]. +pub trait MakeRequestId { + /// Try and produce a [`RequestId`] from the request. + fn make_request_id(&mut self, request: &Request) -> Option; +} + +/// An identifier for a request. +#[derive(Debug, Clone)] +pub struct RequestId(HeaderValue); + +impl RequestId { + /// Create a new `RequestId` from a [`HeaderValue`]. + pub fn new(header_value: HeaderValue) -> Self { + Self(header_value) + } + + /// Gets a reference to the underlying [`HeaderValue`]. + pub fn header_value(&self) -> &HeaderValue { + &self.0 + } + + /// Consumes `self`, returning the underlying [`HeaderValue`]. + pub fn into_header_value(self) -> HeaderValue { + self.0 + } +} + +impl From for RequestId { + fn from(value: HeaderValue) -> Self { + Self::new(value) + } +} + +/// Set request id headers and extensions on requests. +/// +/// This layer applies the [`SetRequestId`] middleware. +/// +/// See the [module docs](self) and [`SetRequestId`] for more details. +#[derive(Debug, Clone)] +pub struct SetRequestIdLayer { + header_name: HeaderName, + make_request_id: M, +} + +impl SetRequestIdLayer { + /// Create a new `SetRequestIdLayer`. + pub fn new(header_name: HeaderName, make_request_id: M) -> Self + where + M: MakeRequestId, + { + SetRequestIdLayer { + header_name, + make_request_id, + } + } + + /// Create a new `SetRequestIdLayer` that uses `x-request-id` as the header name. + pub fn x_request_id(make_request_id: M) -> Self + where + M: MakeRequestId, + { + SetRequestIdLayer::new(X_REQUEST_ID, make_request_id) + } +} + +impl Layer for SetRequestIdLayer +where + M: Clone + MakeRequestId, +{ + type Service = SetRequestId; + + fn layer(&self, inner: S) -> Self::Service { + SetRequestId::new( + inner, + self.header_name.clone(), + self.make_request_id.clone(), + ) + } +} + +/// Set request id headers and extensions on requests. +/// +/// See the [module docs](self) for an example. +/// +/// If [`MakeRequestId::make_request_id`] returns `Some(_)` and the request doesn't already have a +/// header with the same name, then the header will be inserted. +/// +/// Additionally [`RequestId`] will be inserted into [`Request::extensions`] so other +/// services can access it. +#[derive(Debug, Clone)] +pub struct SetRequestId { + inner: S, + header_name: HeaderName, + make_request_id: M, +} + +impl SetRequestId { + /// Create a new `SetRequestId`. + pub fn new(inner: S, header_name: HeaderName, make_request_id: M) -> Self + where + M: MakeRequestId, + { + Self { + inner, + header_name, + make_request_id, + } + } + + /// Create a new `SetRequestId` that uses `x-request-id` as the header name. + pub fn x_request_id(inner: S, make_request_id: M) -> Self + where + M: MakeRequestId, + { + Self::new(inner, X_REQUEST_ID, make_request_id) + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetRequestId` middleware. + pub fn layer(header_name: HeaderName, make_request_id: M) -> SetRequestIdLayer + where + M: MakeRequestId, + { + SetRequestIdLayer::new(header_name, make_request_id) + } +} + +impl Service> for SetRequestId +where + S: Service, Response = Response>, + M: MakeRequestId, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + if let Some(request_id) = req.headers().get(&self.header_name) { + if req.extensions().get::().is_none() { + let request_id = request_id.clone(); + req.extensions_mut().insert(RequestId::new(request_id)); + } + } else if let Some(request_id) = self.make_request_id.make_request_id(&req) { + req.extensions_mut().insert(request_id.clone()); + req.headers_mut() + .insert(self.header_name.clone(), request_id.0); + } + + self.inner.call(req) + } +} + +/// Propagate request ids from requests to responses. +/// +/// This layer applies the [`PropagateRequestId`] middleware. +/// +/// See the [module docs](self) and [`PropagateRequestId`] for more details. +#[derive(Debug, Clone)] +pub struct PropagateRequestIdLayer { + header_name: HeaderName, +} + +impl PropagateRequestIdLayer { + /// Create a new `PropagateRequestIdLayer`. + pub fn new(header_name: HeaderName) -> Self { + PropagateRequestIdLayer { header_name } + } + + /// Create a new `PropagateRequestIdLayer` that uses `x-request-id` as the header name. + pub fn x_request_id() -> Self { + Self::new(X_REQUEST_ID) + } +} + +impl Layer for PropagateRequestIdLayer { + type Service = PropagateRequestId; + + fn layer(&self, inner: S) -> Self::Service { + PropagateRequestId::new(inner, self.header_name.clone()) + } +} + +/// Propagate request ids from requests to responses. +/// +/// See the [module docs](self) for an example. +/// +/// If the request contains a matching header that header will be applied to responses. If a +/// [`RequestId`] extension is also present it will be propagated as well. +#[derive(Debug, Clone)] +pub struct PropagateRequestId { + inner: S, + header_name: HeaderName, +} + +impl PropagateRequestId { + /// Create a new `PropagateRequestId`. + pub fn new(inner: S, header_name: HeaderName) -> Self { + Self { inner, header_name } + } + + /// Create a new `PropagateRequestId` that uses `x-request-id` as the header name. + pub fn x_request_id(inner: S) -> Self { + Self::new(inner, X_REQUEST_ID) + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `PropagateRequestId` middleware. + pub fn layer(header_name: HeaderName) -> PropagateRequestIdLayer { + PropagateRequestIdLayer::new(header_name) + } +} + +impl Service> for PropagateRequestId +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = PropagateRequestIdResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let request_id = req + .headers() + .get(&self.header_name) + .cloned() + .map(RequestId::new); + + PropagateRequestIdResponseFuture { + inner: self.inner.call(req), + header_name: self.header_name.clone(), + request_id, + } + } +} + +pin_project! { + /// Response future for [`PropagateRequestId`]. + pub struct PropagateRequestIdResponseFuture { + #[pin] + inner: F, + header_name: HeaderName, + request_id: Option, + } +} + +impl Future for PropagateRequestIdResponseFuture +where + F: Future, E>>, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut response = ready!(this.inner.poll(cx))?; + + if let Some(current_id) = response.headers().get(&*this.header_name) { + if response.extensions().get::().is_none() { + let current_id = current_id.clone(); + response.extensions_mut().insert(RequestId::new(current_id)); + } + } else if let Some(request_id) = this.request_id.take() { + response + .headers_mut() + .insert(this.header_name.clone(), request_id.0.clone()); + response.extensions_mut().insert(request_id); + } + + Poll::Ready(Ok(response)) + } +} + +/// A [`MakeRequestId`] that generates `UUID`s. +#[derive(Clone, Copy, Default)] +pub struct MakeRequestUuid; + +impl MakeRequestId for MakeRequestUuid { + fn make_request_id(&mut self, _request: &Request) -> Option { + let request_id = Uuid::new_v4().to_string().parse().unwrap(); + Some(RequestId::new(request_id)) + } +} + +#[cfg(test)] +mod tests { + use crate::test_helpers::Body; + use crate::ServiceBuilderExt as _; + use http::Response; + use std::{ + convert::Infallible, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + }; + use tower::{ServiceBuilder, ServiceExt}; + + #[allow(unused_imports)] + use super::*; + + #[tokio::test] + async fn basic() { + let svc = ServiceBuilder::new() + .set_x_request_id(Counter::default()) + .propagate_x_request_id() + .service_fn(handler); + + // header on response + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "0"); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "1"); + + // doesn't override if header is already there + let req = Request::builder() + .header("x-request-id", "foo") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + + // extension propagated + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.extensions().get::().unwrap().0, "2"); + } + + #[tokio::test] + async fn other_middleware_setting_request_id() { + let svc = ServiceBuilder::new() + .override_request_header( + HeaderName::from_static("x-request-id"), + HeaderValue::from_str("foo").unwrap(), + ) + .set_x_request_id(Counter::default()) + .map_request(|request: Request<_>| { + // `set_x_request_id` should set the extension if its missing + assert_eq!(request.extensions().get::().unwrap().0, "foo"); + request + }) + .propagate_x_request_id() + .service_fn(handler); + + let req = Request::builder() + .header( + "x-request-id", + "this-will-be-overriden-by-override_request_header-middleware", + ) + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + assert_eq!(res.extensions().get::().unwrap().0, "foo"); + } + + #[tokio::test] + async fn other_middleware_setting_request_id_on_response() { + let svc = ServiceBuilder::new() + .set_x_request_id(Counter::default()) + .propagate_x_request_id() + .override_response_header( + HeaderName::from_static("x-request-id"), + HeaderValue::from_str("foo").unwrap(), + ) + .service_fn(handler); + + let req = Request::builder() + .header("x-request-id", "foo") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(req).await.unwrap(); + assert_eq!(res.headers()["x-request-id"], "foo"); + assert_eq!(res.extensions().get::().unwrap().0, "foo"); + } + + #[derive(Clone, Default)] + struct Counter(Arc); + + impl MakeRequestId for Counter { + fn make_request_id(&mut self, _request: &Request) -> Option { + let id = + HeaderValue::from_str(&self.0.fetch_add(1, Ordering::SeqCst).to_string()).unwrap(); + Some(RequestId::new(id)) + } + } + + async fn handler(_: Request) -> Result, Infallible> { + Ok(Response::new(Body::empty())) + } + + #[tokio::test] + async fn uuid() { + let svc = ServiceBuilder::new() + .set_x_request_id(MakeRequestUuid) + .propagate_x_request_id() + .service_fn(handler); + + // header on response + let req = Request::builder().body(Body::empty()).unwrap(); + let mut res = svc.clone().oneshot(req).await.unwrap(); + let id = res.headers_mut().remove("x-request-id").unwrap(); + id.to_str().unwrap().parse::().unwrap(); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/sensitive_headers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/sensitive_headers.rs new file mode 100644 index 0000000000000000000000000000000000000000..3bd081db69115fed83669adb790627dc2bf4e2c6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/sensitive_headers.rs @@ -0,0 +1,448 @@ +//! Middlewares that mark headers as [sensitive]. +//! +//! [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +//! +//! # Example +//! +//! ``` +//! use tower_http::sensitive_headers::SetSensitiveHeadersLayer; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::{Request, Response, header::AUTHORIZATION}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{iter::once, convert::Infallible}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut service = ServiceBuilder::new() +//! // Mark the `Authorization` header as sensitive so it doesn't show in logs +//! // +//! // `SetSensitiveHeadersLayer` will mark the header as sensitive on both the +//! // request and response. +//! // +//! // The middleware is constructed from an iterator of headers to easily mark +//! // multiple headers at once. +//! .layer(SetSensitiveHeadersLayer::new(once(AUTHORIZATION))) +//! .service(service_fn(handle)); +//! +//! // Call the service. +//! let response = service +//! .ready() +//! .await? +//! .call(Request::new(Full::default())) +//! .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! Its important to think about the order in which requests and responses arrive at your +//! middleware. For example to hide headers both on requests and responses when using +//! [`TraceLayer`] you have to apply [`SetSensitiveRequestHeadersLayer`] before [`TraceLayer`] +//! and [`SetSensitiveResponseHeadersLayer`] afterwards. +//! +//! ``` +//! use tower_http::{ +//! trace::TraceLayer, +//! sensitive_headers::{ +//! SetSensitiveRequestHeadersLayer, +//! SetSensitiveResponseHeadersLayer, +//! }, +//! }; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn}; +//! use http::header; +//! use std::sync::Arc; +//! # use http::{Request, Response}; +//! # use bytes::Bytes; +//! # use http_body_util::Full; +//! # use std::convert::Infallible; +//! # async fn handle(req: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::default())) +//! # } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let headers: Arc<[_]> = Arc::new([ +//! header::AUTHORIZATION, +//! header::PROXY_AUTHORIZATION, +//! header::COOKIE, +//! header::SET_COOKIE, +//! ]); +//! +//! let service = ServiceBuilder::new() +//! .layer(SetSensitiveRequestHeadersLayer::from_shared(Arc::clone(&headers))) +//! .layer(TraceLayer::new_for_http()) +//! .layer(SetSensitiveResponseHeadersLayer::from_shared(headers)) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`TraceLayer`]: crate::trace::TraceLayer + +use http::{header::HeaderName, Request, Response}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Mark headers as [sensitive] on both requests and responses. +/// +/// Produces [`SetSensitiveHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveHeadersLayer { + /// Create a new [`SetSensitiveHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveHeadersLayer { + type Service = SetSensitiveHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveRequestHeaders::from_shared( + SetSensitiveResponseHeaders::from_shared(inner, self.headers.clone()), + self.headers.clone(), + ) + } +} + +/// Mark headers as [sensitive] on both requests and responses. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +pub type SetSensitiveHeaders = SetSensitiveRequestHeaders>; + +/// Mark request headers as [sensitive]. +/// +/// Produces [`SetSensitiveRequestHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveRequestHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveRequestHeadersLayer { + /// Create a new [`SetSensitiveRequestHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveRequestHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveRequestHeadersLayer { + type Service = SetSensitiveRequestHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveRequestHeaders { + inner, + headers: self.headers.clone(), + } + } +} + +/// Mark request headers as [sensitive]. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveRequestHeaders { + inner: S, + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveRequestHeaders { + /// Create a new [`SetSensitiveRequestHeaders`]. + pub fn new(inner: S, headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(inner, headers.into()) + } + + /// Create a new [`SetSensitiveRequestHeaders`] from a shared slice of headers. + pub fn from_shared(inner: S, headers: Arc<[HeaderName]>) -> Self { + Self { inner, headers } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetSensitiveRequestHeaders` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(headers: I) -> SetSensitiveRequestHeadersLayer + where + I: IntoIterator, + { + SetSensitiveRequestHeadersLayer::new(headers) + } +} + +impl Service> for SetSensitiveRequestHeaders +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + let headers = req.headers_mut(); + for header in &*self.headers { + if let http::header::Entry::Occupied(mut entry) = headers.entry(header) { + for value in entry.iter_mut() { + value.set_sensitive(true); + } + } + } + + self.inner.call(req) + } +} + +/// Mark response headers as [sensitive]. +/// +/// Produces [`SetSensitiveResponseHeaders`] services. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveResponseHeadersLayer { + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveResponseHeadersLayer { + /// Create a new [`SetSensitiveResponseHeadersLayer`]. + pub fn new(headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(headers.into()) + } + + /// Create a new [`SetSensitiveResponseHeadersLayer`] from a shared slice of headers. + pub fn from_shared(headers: Arc<[HeaderName]>) -> Self { + Self { headers } + } +} + +impl Layer for SetSensitiveResponseHeadersLayer { + type Service = SetSensitiveResponseHeaders; + + fn layer(&self, inner: S) -> Self::Service { + SetSensitiveResponseHeaders { + inner, + headers: self.headers.clone(), + } + } +} + +/// Mark response headers as [sensitive]. +/// +/// See the [module docs](crate::sensitive_headers) for more details. +/// +/// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive +#[derive(Clone, Debug)] +pub struct SetSensitiveResponseHeaders { + inner: S, + headers: Arc<[HeaderName]>, +} + +impl SetSensitiveResponseHeaders { + /// Create a new [`SetSensitiveResponseHeaders`]. + pub fn new(inner: S, headers: I) -> Self + where + I: IntoIterator, + { + let headers = headers.into_iter().collect::>(); + Self::from_shared(inner, headers.into()) + } + + /// Create a new [`SetSensitiveResponseHeaders`] from a shared slice of headers. + pub fn from_shared(inner: S, headers: Arc<[HeaderName]>) -> Self { + Self { inner, headers } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetSensitiveResponseHeaders` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(headers: I) -> SetSensitiveResponseHeadersLayer + where + I: IntoIterator, + { + SetSensitiveResponseHeadersLayer::new(headers) + } +} + +impl Service> for SetSensitiveResponseHeaders +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = SetSensitiveResponseHeadersResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + SetSensitiveResponseHeadersResponseFuture { + future: self.inner.call(req), + headers: self.headers.clone(), + } + } +} + +pin_project! { + /// Response future for [`SetSensitiveResponseHeaders`]. + #[derive(Debug)] + pub struct SetSensitiveResponseHeadersResponseFuture { + #[pin] + future: F, + headers: Arc<[HeaderName]>, + } +} + +impl Future for SetSensitiveResponseHeadersResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + let headers = res.headers_mut(); + for header in &**this.headers { + if let http::header::Entry::Occupied(mut entry) = headers.entry(header) { + for value in entry.iter_mut() { + value.set_sensitive(true); + } + } + } + + Poll::Ready(Ok(res)) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use http::header; + use tower::{ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn multiple_value_header() { + async fn response_set_cookie(req: http::Request<()>) -> Result, ()> { + let mut iter = req.headers().get_all(header::COOKIE).iter().peekable(); + + assert!(iter.peek().is_some()); + + for value in iter { + assert!(value.is_sensitive()) + } + + let mut resp = http::Response::new(()); + resp.headers_mut().append( + header::CONTENT_TYPE, + http::HeaderValue::from_static("text/html"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-1"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-2"), + ); + resp.headers_mut().append( + header::SET_COOKIE, + http::HeaderValue::from_static("cookie-3"), + ); + Ok(resp) + } + + let mut service = ServiceBuilder::new() + .layer(SetSensitiveRequestHeadersLayer::new(vec![header::COOKIE])) + .layer(SetSensitiveResponseHeadersLayer::new(vec![ + header::SET_COOKIE, + ])) + .service_fn(response_set_cookie); + + let mut req = http::Request::new(()); + req.headers_mut() + .append(header::COOKIE, http::HeaderValue::from_static("cookie+1")); + req.headers_mut() + .append(header::COOKIE, http::HeaderValue::from_static("cookie+2")); + + let resp = service.ready().await.unwrap().call(req).await.unwrap(); + + assert!(!resp + .headers() + .get(header::CONTENT_TYPE) + .unwrap() + .is_sensitive()); + + let mut iter = resp.headers().get_all(header::SET_COOKIE).iter().peekable(); + + assert!(iter.peek().is_some()); + + for value in iter { + assert!(value.is_sensitive()) + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/service_ext.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/service_ext.rs new file mode 100644 index 0000000000000000000000000000000000000000..8973d8a475595352756842c2dac5090423d3b1d8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/service_ext.rs @@ -0,0 +1,442 @@ +#[allow(unused_imports)] +use http::header::HeaderName; + +/// Extension trait that adds methods to any [`Service`] for adding middleware from +/// tower-http. +/// +/// [`Service`]: tower::Service +#[cfg(feature = "util")] +// ^ work around rustdoc not inferring doc(cfg)s for cfg's from surrounding scopes +pub trait ServiceExt { + /// Propagate a header from the request to the response. + /// + /// See [`tower_http::propagate_header`] for more details. + /// + /// [`tower_http::propagate_header`]: crate::propagate_header + #[cfg(feature = "propagate-header")] + fn propagate_header(self, header: HeaderName) -> crate::propagate_header::PropagateHeader + where + Self: Sized, + { + crate::propagate_header::PropagateHeader::new(self, header) + } + + /// Add some shareable value to [request extensions]. + /// + /// See [`tower_http::add_extension`] for more details. + /// + /// [`tower_http::add_extension`]: crate::add_extension + /// [request extensions]: https://docs.rs/http/latest/http/struct.Extensions.html + #[cfg(feature = "add-extension")] + fn add_extension(self, value: T) -> crate::add_extension::AddExtension + where + Self: Sized, + { + crate::add_extension::AddExtension::new(self, value) + } + + /// Apply a transformation to the request body. + /// + /// See [`tower_http::map_request_body`] for more details. + /// + /// [`tower_http::map_request_body`]: crate::map_request_body + #[cfg(feature = "map-request-body")] + fn map_request_body(self, f: F) -> crate::map_request_body::MapRequestBody + where + Self: Sized, + { + crate::map_request_body::MapRequestBody::new(self, f) + } + + /// Apply a transformation to the response body. + /// + /// See [`tower_http::map_response_body`] for more details. + /// + /// [`tower_http::map_response_body`]: crate::map_response_body + #[cfg(feature = "map-response-body")] + fn map_response_body(self, f: F) -> crate::map_response_body::MapResponseBody + where + Self: Sized, + { + crate::map_response_body::MapResponseBody::new(self, f) + } + + /// Compresses response bodies. + /// + /// See [`tower_http::compression`] for more details. + /// + /// [`tower_http::compression`]: crate::compression + #[cfg(any( + feature = "compression-br", + feature = "compression-deflate", + feature = "compression-gzip", + feature = "compression-zstd", + ))] + fn compression(self) -> crate::compression::Compression + where + Self: Sized, + { + crate::compression::Compression::new(self) + } + + /// Decompress response bodies. + /// + /// See [`tower_http::decompression`] for more details. + /// + /// [`tower_http::decompression`]: crate::decompression + #[cfg(any( + feature = "decompression-br", + feature = "decompression-deflate", + feature = "decompression-gzip", + feature = "decompression-zstd", + ))] + fn decompression(self) -> crate::decompression::Decompression + where + Self: Sized, + { + crate::decompression::Decompression::new(self) + } + + /// High level tracing that classifies responses using HTTP status codes. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_http(self) -> crate::trace::Trace + where + Self: Sized, + { + crate::trace::Trace::new_for_http(self) + } + + /// High level tracing that classifies responses using gRPC headers. + /// + /// This method does not support customizing the output, to do that use [`TraceLayer`] + /// instead. + /// + /// See [`tower_http::trace`] for more details. + /// + /// [`tower_http::trace`]: crate::trace + /// [`TraceLayer`]: crate::trace::TraceLayer + #[cfg(feature = "trace")] + fn trace_for_grpc(self) -> crate::trace::Trace + where + Self: Sized, + { + crate::trace::Trace::new_for_grpc(self) + } + + /// Follow redirect resposes using the [`Standard`] policy. + /// + /// See [`tower_http::follow_redirect`] for more details. + /// + /// [`tower_http::follow_redirect`]: crate::follow_redirect + /// [`Standard`]: crate::follow_redirect::policy::Standard + #[cfg(feature = "follow-redirect")] + fn follow_redirects( + self, + ) -> crate::follow_redirect::FollowRedirect + where + Self: Sized, + { + crate::follow_redirect::FollowRedirect::new(self) + } + + /// Mark headers as [sensitive] on both requests and responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_headers( + self, + headers: impl IntoIterator, + ) -> crate::sensitive_headers::SetSensitiveHeaders + where + Self: Sized, + { + use tower_layer::Layer as _; + crate::sensitive_headers::SetSensitiveHeadersLayer::new(headers).layer(self) + } + + /// Mark headers as [sensitive] on requests. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_request_headers( + self, + headers: impl IntoIterator, + ) -> crate::sensitive_headers::SetSensitiveRequestHeaders + where + Self: Sized, + { + crate::sensitive_headers::SetSensitiveRequestHeaders::new(self, headers) + } + + /// Mark headers as [sensitive] on responses. + /// + /// See [`tower_http::sensitive_headers`] for more details. + /// + /// [sensitive]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html#method.set_sensitive + /// [`tower_http::sensitive_headers`]: crate::sensitive_headers + #[cfg(feature = "sensitive-headers")] + fn sensitive_response_headers( + self, + headers: impl IntoIterator, + ) -> crate::sensitive_headers::SetSensitiveResponseHeaders + where + Self: Sized, + { + crate::sensitive_headers::SetSensitiveResponseHeaders::new(self, headers) + } + + /// Insert a header into the request. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_request_header( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetRequestHeader + where + Self: Sized, + { + crate::set_header::SetRequestHeader::overriding(self, header_name, make) + } + + /// Append a header into the request. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_request_header( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetRequestHeader + where + Self: Sized, + { + crate::set_header::SetRequestHeader::appending(self, header_name, make) + } + + /// Insert a header into the request, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_request_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetRequestHeader + where + Self: Sized, + { + crate::set_header::SetRequestHeader::if_not_present(self, header_name, make) + } + + /// Insert a header into the response. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn override_response_header( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetResponseHeader + where + Self: Sized, + { + crate::set_header::SetResponseHeader::overriding(self, header_name, make) + } + + /// Append a header into the response. + /// + /// If previous values exist, the header will have multiple values. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn append_response_header( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetResponseHeader + where + Self: Sized, + { + crate::set_header::SetResponseHeader::appending(self, header_name, make) + } + + /// Insert a header into the response, if the header is not already present. + /// + /// See [`tower_http::set_header`] for more details. + /// + /// [`tower_http::set_header`]: crate::set_header + #[cfg(feature = "set-header")] + fn insert_response_header_if_not_present( + self, + header_name: HeaderName, + make: M, + ) -> crate::set_header::SetResponseHeader + where + Self: Sized, + { + crate::set_header::SetResponseHeader::if_not_present(self, header_name, make) + } + + /// Add request id header and extension. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_request_id( + self, + header_name: HeaderName, + make_request_id: M, + ) -> crate::request_id::SetRequestId + where + Self: Sized, + M: crate::request_id::MakeRequestId, + { + crate::request_id::SetRequestId::new(self, header_name, make_request_id) + } + + /// Add request id header and extension, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn set_x_request_id(self, make_request_id: M) -> crate::request_id::SetRequestId + where + Self: Sized, + M: crate::request_id::MakeRequestId, + { + self.set_request_id(crate::request_id::X_REQUEST_ID, make_request_id) + } + + /// Propgate request ids from requests to responses. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_request_id( + self, + header_name: HeaderName, + ) -> crate::request_id::PropagateRequestId + where + Self: Sized, + { + crate::request_id::PropagateRequestId::new(self, header_name) + } + + /// Propgate request ids from requests to responses, using `x-request-id` as the header name. + /// + /// See [`tower_http::request_id`] for more details. + /// + /// [`tower_http::request_id`]: crate::request_id + #[cfg(feature = "request-id")] + fn propagate_x_request_id(self) -> crate::request_id::PropagateRequestId + where + Self: Sized, + { + self.propagate_request_id(crate::request_id::X_REQUEST_ID) + } + + /// Catch panics and convert them into `500 Internal Server` responses. + /// + /// See [`tower_http::catch_panic`] for more details. + /// + /// [`tower_http::catch_panic`]: crate::catch_panic + #[cfg(feature = "catch-panic")] + fn catch_panic( + self, + ) -> crate::catch_panic::CatchPanic + where + Self: Sized, + { + crate::catch_panic::CatchPanic::new(self) + } + + /// Intercept requests with over-sized payloads and convert them into + /// `413 Payload Too Large` responses. + /// + /// See [`tower_http::limit`] for more details. + /// + /// [`tower_http::limit`]: crate::limit + #[cfg(feature = "limit")] + fn request_body_limit(self, limit: usize) -> crate::limit::RequestBodyLimit + where + Self: Sized, + { + crate::limit::RequestBodyLimit::new(self, limit) + } + + /// Remove trailing slashes from paths. + /// + /// See [`tower_http::normalize_path`] for more details. + /// + /// [`tower_http::normalize_path`]: crate::normalize_path + #[cfg(feature = "normalize-path")] + fn trim_trailing_slash(self) -> crate::normalize_path::NormalizePath + where + Self: Sized, + { + crate::normalize_path::NormalizePath::trim_trailing_slash(self) + } + + /// Append trailing slash to paths. + /// + /// See [`tower_http::normalize_path`] for more details. + /// + /// [`tower_http::normalize_path`]: crate::normalize_path + #[cfg(feature = "normalize-path")] + fn append_trailing_slash(self) -> crate::normalize_path::NormalizePath + where + Self: Sized, + { + crate::normalize_path::NormalizePath::append_trailing_slash(self) + } +} + +impl ServiceExt for T {} + +#[cfg(all(test, feature = "fs", feature = "add-extension"))] +mod tests { + use super::ServiceExt; + use crate::services; + + #[allow(dead_code)] + fn test_type_inference() { + let _svc = services::fs::ServeDir::new(".").add_extension("&'static str"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..c23f9619e9dc82ac4c9e239990f8a24fe5ebbbe7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/mod.rs @@ -0,0 +1,79 @@ +//! File system related services. + +use bytes::Bytes; +use futures_core::Stream; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::io::{AsyncRead, AsyncReadExt, Take}; +use tokio_util::io::ReaderStream; + +mod serve_dir; +mod serve_file; + +pub use self::{ + serve_dir::{ + future::ResponseFuture as ServeFileSystemResponseFuture, + DefaultServeDirFallback, + // The response body and future are used for both ServeDir and ServeFile + ResponseBody as ServeFileSystemResponseBody, + ServeDir, + }, + serve_file::ServeFile, +}; + +pin_project! { + // NOTE: This could potentially be upstreamed to `http-body`. + /// Adapter that turns an [`impl AsyncRead`][tokio::io::AsyncRead] to an [`impl Body`][http_body::Body]. + #[derive(Debug)] + pub struct AsyncReadBody { + #[pin] + reader: ReaderStream, + } +} + +impl AsyncReadBody +where + T: AsyncRead, +{ + /// Create a new [`AsyncReadBody`] wrapping the given reader, + /// with a specific read buffer capacity + fn with_capacity(read: T, capacity: usize) -> Self { + Self { + reader: ReaderStream::with_capacity(read, capacity), + } + } + + fn with_capacity_limited( + read: T, + capacity: usize, + max_read_bytes: u64, + ) -> AsyncReadBody> { + AsyncReadBody { + reader: ReaderStream::with_capacity(read.take(max_read_bytes), capacity), + } + } +} + +impl Body for AsyncReadBody +where + T: AsyncRead, +{ + type Data = Bytes; + type Error = io::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + match std::task::ready!(self.project().reader.poll_next(cx)) { + Some(Ok(chunk)) => Poll::Ready(Some(Ok(Frame::data(chunk)))), + Some(Err(err)) => Poll::Ready(Some(Err(err))), + None => Poll::Ready(None), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..7df32b0d6b66ee955f7e3ecc1f5a7fd318c45b95 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/future.rs @@ -0,0 +1,332 @@ +use super::{ + open_file::{FileOpened, FileRequestExtent, OpenFileOutput}, + DefaultServeDirFallback, ResponseBody, +}; +use crate::{ + body::UnsyncBoxBody, content_encoding::Encoding, services::fs::AsyncReadBody, BoxError, +}; +use bytes::Bytes; +use futures_core::future::BoxFuture; +use futures_util::future::{FutureExt, TryFutureExt}; +use http::{ + header::{self, ALLOW}, + HeaderValue, Request, Response, StatusCode, +}; +use http_body_util::{BodyExt, Empty, Full}; +use pin_project_lite::pin_project; +use std::{ + convert::Infallible, + future::Future, + io, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_service::Service; + +pin_project! { + /// Response future of [`ServeDir::try_call()`][`super::ServeDir::try_call()`]. + pub struct ResponseFuture { + #[pin] + pub(super) inner: ResponseFutureInner, + } +} + +impl ResponseFuture { + pub(super) fn open_file_future( + future: BoxFuture<'static, io::Result>, + fallback_and_request: Option<(F, Request)>, + ) -> Self { + Self { + inner: ResponseFutureInner::OpenFileFuture { + future, + fallback_and_request, + }, + } + } + + pub(super) fn invalid_path(fallback_and_request: Option<(F, Request)>) -> Self { + Self { + inner: ResponseFutureInner::InvalidPath { + fallback_and_request, + }, + } + } + + pub(super) fn method_not_allowed() -> Self { + Self { + inner: ResponseFutureInner::MethodNotAllowed, + } + } +} + +pin_project! { + #[project = ResponseFutureInnerProj] + pub(super) enum ResponseFutureInner { + OpenFileFuture { + #[pin] + future: BoxFuture<'static, io::Result>, + fallback_and_request: Option<(F, Request)>, + }, + FallbackFuture { + future: BoxFuture<'static, Result, Infallible>>, + }, + InvalidPath { + fallback_and_request: Option<(F, Request)>, + }, + MethodNotAllowed, + } +} + +impl Future for ResponseFuture +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + ResBody: http_body::Body + Send + 'static, + ResBody::Error: Into>, +{ + type Output = io::Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + loop { + let mut this = self.as_mut().project(); + + let new_state = match this.inner.as_mut().project() { + ResponseFutureInnerProj::OpenFileFuture { + future: open_file_future, + fallback_and_request, + } => match ready!(open_file_future.poll(cx)) { + Ok(OpenFileOutput::FileOpened(file_output)) => { + break Poll::Ready(Ok(build_response(*file_output))); + } + + Ok(OpenFileOutput::Redirect { location }) => { + let mut res = response_with_status(StatusCode::TEMPORARY_REDIRECT); + res.headers_mut().insert(http::header::LOCATION, location); + break Poll::Ready(Ok(res)); + } + + Ok(OpenFileOutput::FileNotFound | OpenFileOutput::InvalidFilename) => { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } + + Ok(OpenFileOutput::PreconditionFailed) => { + break Poll::Ready(Ok(response_with_status( + StatusCode::PRECONDITION_FAILED, + ))); + } + + Ok(OpenFileOutput::NotModified) => { + break Poll::Ready(Ok(response_with_status(StatusCode::NOT_MODIFIED))); + } + + Ok(OpenFileOutput::InvalidRedirectUri) => { + break Poll::Ready(Ok(response_with_status( + StatusCode::INTERNAL_SERVER_ERROR, + ))); + } + + Err(err) => { + #[cfg(unix)] + // 20 = libc::ENOTDIR => "not a directory + // when `io_error_more` landed, this can be changed + // to checking for `io::ErrorKind::NotADirectory`. + // https://github.com/rust-lang/rust/issues/86442 + let error_is_not_a_directory = err.raw_os_error() == Some(20); + #[cfg(not(unix))] + let error_is_not_a_directory = false; + + if matches!( + err.kind(), + io::ErrorKind::NotFound | io::ErrorKind::PermissionDenied + ) || error_is_not_a_directory + { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } else { + break Poll::Ready(Err(err)); + } + } + }, + + ResponseFutureInnerProj::FallbackFuture { future } => { + break Pin::new(future).poll(cx).map_err(|err| match err {}) + } + + ResponseFutureInnerProj::InvalidPath { + fallback_and_request, + } => { + if let Some((mut fallback, request)) = fallback_and_request.take() { + call_fallback(&mut fallback, request) + } else { + break Poll::Ready(Ok(not_found())); + } + } + + ResponseFutureInnerProj::MethodNotAllowed => { + let mut res = response_with_status(StatusCode::METHOD_NOT_ALLOWED); + res.headers_mut() + .insert(ALLOW, HeaderValue::from_static("GET,HEAD")); + break Poll::Ready(Ok(res)); + } + }; + + this.inner.set(new_state); + } + } +} + +fn response_with_status(status: StatusCode) -> Response { + Response::builder() + .status(status) + .body(empty_body()) + .unwrap() +} + +fn not_found() -> Response { + response_with_status(StatusCode::NOT_FOUND) +} + +pub(super) fn call_fallback( + fallback: &mut F, + req: Request, +) -> ResponseFutureInner +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into, +{ + let future = fallback + .call(req) + .map_ok(|response| { + response + .map(|body| { + UnsyncBoxBody::new( + body.map_err(|err| match err.into().downcast::() { + Ok(err) => *err, + Err(err) => io::Error::new(io::ErrorKind::Other, err), + }) + .boxed_unsync(), + ) + }) + .map(ResponseBody::new) + }) + .boxed(); + + ResponseFutureInner::FallbackFuture { future } +} + +fn build_response(output: FileOpened) -> Response { + let (maybe_file, size) = match output.extent { + FileRequestExtent::Full(file, meta) => (Some(file), meta.len()), + FileRequestExtent::Head(meta) => (None, meta.len()), + }; + + let mut builder = Response::builder() + .header(header::CONTENT_TYPE, output.mime_header_value) + .header(header::ACCEPT_RANGES, "bytes"); + + if let Some(encoding) = output + .maybe_encoding + .filter(|encoding| *encoding != Encoding::Identity) + { + builder = builder.header(header::CONTENT_ENCODING, encoding.into_header_value()); + } + + if let Some(last_modified) = output.last_modified { + builder = builder.header(header::LAST_MODIFIED, last_modified.0.to_string()); + } + + match output.maybe_range { + Some(Ok(ranges)) => { + if let Some(range) = ranges.first() { + if ranges.len() > 1 { + builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(body_from_bytes(Bytes::from( + "Cannot serve multipart range requests", + ))) + .unwrap() + } else { + let body = if let Some(file) = maybe_file { + let range_size = range.end() - range.start() + 1; + ResponseBody::new(UnsyncBoxBody::new( + AsyncReadBody::with_capacity_limited( + file, + output.chunk_size, + range_size, + ) + .boxed_unsync(), + )) + } else { + empty_body() + }; + + let content_length = if size == 0 { + 0 + } else { + range.end() - range.start() + 1 + }; + + builder + .header( + header::CONTENT_RANGE, + format!("bytes {}-{}/{}", range.start(), range.end(), size), + ) + .header(header::CONTENT_LENGTH, content_length) + .status(StatusCode::PARTIAL_CONTENT) + .body(body) + .unwrap() + } + } else { + builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(body_from_bytes(Bytes::from( + "No range found after parsing range header, please file an issue", + ))) + .unwrap() + } + } + + Some(Err(_)) => builder + .header(header::CONTENT_RANGE, format!("bytes */{}", size)) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .body(empty_body()) + .unwrap(), + + // Not a range request + None => { + let body = if let Some(file) = maybe_file { + ResponseBody::new(UnsyncBoxBody::new( + AsyncReadBody::with_capacity(file, output.chunk_size).boxed_unsync(), + )) + } else { + empty_body() + }; + + builder + .header(header::CONTENT_LENGTH, size) + .body(body) + .unwrap() + } + } +} + +fn body_from_bytes(bytes: Bytes) -> ResponseBody { + let body = Full::from(bytes).map_err(|err| match err {}).boxed_unsync(); + ResponseBody::new(UnsyncBoxBody::new(body)) +} + +fn empty_body() -> ResponseBody { + let body = Empty::new().map_err(|err| match err {}).boxed_unsync(); + ResponseBody::new(UnsyncBoxBody::new(body)) +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/headers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/headers.rs new file mode 100644 index 0000000000000000000000000000000000000000..e9e809073fb0949dad2353da3bf2e87424533a96 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/headers.rs @@ -0,0 +1,45 @@ +use http::header::HeaderValue; +use httpdate::HttpDate; +use std::time::SystemTime; + +pub(super) struct LastModified(pub(super) HttpDate); + +impl From for LastModified { + fn from(time: SystemTime) -> Self { + LastModified(time.into()) + } +} + +pub(super) struct IfModifiedSince(HttpDate); + +impl IfModifiedSince { + /// Check if the supplied time means the resource has been modified. + pub(super) fn is_modified(&self, last_modified: &LastModified) -> bool { + self.0 < last_modified.0 + } + + /// convert a header value into a IfModifiedSince, invalid values are silentely ignored + pub(super) fn from_header_value(value: &HeaderValue) -> Option { + std::str::from_utf8(value.as_bytes()) + .ok() + .and_then(|value| httpdate::parse_http_date(value).ok()) + .map(|time| IfModifiedSince(time.into())) + } +} + +pub(super) struct IfUnmodifiedSince(HttpDate); + +impl IfUnmodifiedSince { + /// Check if the supplied time passes the precondtion. + pub(super) fn precondition_passes(&self, last_modified: &LastModified) -> bool { + self.0 >= last_modified.0 + } + + /// Convert a header value into a IfModifiedSince, invalid values are silentely ignored + pub(super) fn from_header_value(value: &HeaderValue) -> Option { + std::str::from_utf8(value.as_bytes()) + .ok() + .and_then(|value| httpdate::parse_http_date(value).ok()) + .map(|time| IfUnmodifiedSince(time.into())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..61b956d13c3f19c66a9e2fd2e16713d555ef1774 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/mod.rs @@ -0,0 +1,541 @@ +use self::future::ResponseFuture; +use crate::{ + body::UnsyncBoxBody, + content_encoding::{encodings, SupportedEncodings}, + set_status::SetStatus, +}; +use bytes::Bytes; +use futures_util::FutureExt; +use http::{header, HeaderValue, Method, Request, Response, StatusCode}; +use http_body_util::{BodyExt, Empty}; +use percent_encoding::percent_decode; +use std::{ + convert::Infallible, + io, + path::{Component, Path, PathBuf}, + task::{Context, Poll}, +}; +use tower_service::Service; + +pub(crate) mod future; +mod headers; +mod open_file; + +#[cfg(test)] +mod tests; + +// default capacity 64KiB +const DEFAULT_CAPACITY: usize = 65536; + +/// Service that serves files from a given directory and all its sub directories. +/// +/// The `Content-Type` will be guessed from the file extension. +/// +/// An empty response with status `404 Not Found` will be returned if: +/// +/// - The file doesn't exist +/// - Any segment of the path contains `..` +/// - Any segment of the path contains a backslash +/// - On unix, any segment of the path referenced as directory is actually an +/// existing file (`/file.html/something`) +/// - We don't have necessary permissions to read the file +/// +/// # Example +/// +/// ``` +/// use tower_http::services::ServeDir; +/// +/// // This will serve files in the "assets" directory and +/// // its subdirectories +/// let service = ServeDir::new("assets"); +/// ``` +#[derive(Clone, Debug)] +pub struct ServeDir { + base: PathBuf, + buf_chunk_size: usize, + precompressed_variants: Option, + // This is used to specialise implementation for + // single files + variant: ServeVariant, + fallback: Option, + call_fallback_on_method_not_allowed: bool, +} + +impl ServeDir { + /// Create a new [`ServeDir`]. + pub fn new

(path: P) -> Self + where + P: AsRef, + { + let mut base = PathBuf::from("."); + base.push(path.as_ref()); + + Self { + base, + buf_chunk_size: DEFAULT_CAPACITY, + precompressed_variants: None, + variant: ServeVariant::Directory { + append_index_html_on_directories: true, + }, + fallback: None, + call_fallback_on_method_not_allowed: false, + } + } + + pub(crate) fn new_single_file

(path: P, mime: HeaderValue) -> Self + where + P: AsRef, + { + Self { + base: path.as_ref().to_owned(), + buf_chunk_size: DEFAULT_CAPACITY, + precompressed_variants: None, + variant: ServeVariant::SingleFile { mime }, + fallback: None, + call_fallback_on_method_not_allowed: false, + } + } +} + +impl ServeDir { + /// If the requested path is a directory append `index.html`. + /// + /// This is useful for static sites. + /// + /// Defaults to `true`. + pub fn append_index_html_on_directories(mut self, append: bool) -> Self { + match &mut self.variant { + ServeVariant::Directory { + append_index_html_on_directories, + } => { + *append_index_html_on_directories = append; + self + } + ServeVariant::SingleFile { mime: _ } => self, + } + } + + /// Set a specific read buffer chunk size. + /// + /// The default capacity is 64kb. + pub fn with_buf_chunk_size(mut self, chunk_size: usize) -> Self { + self.buf_chunk_size = chunk_size; + self + } + + /// Informs the service that it should also look for a precompressed gzip + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the gzip encoding + /// will receive the file `dir/foo.txt.gz` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_gzip(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .gzip = true; + self + } + + /// Informs the service that it should also look for a precompressed brotli + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the brotli encoding + /// will receive the file `dir/foo.txt.br` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_br(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .br = true; + self + } + + /// Informs the service that it should also look for a precompressed deflate + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the deflate encoding + /// will receive the file `dir/foo.txt.zz` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_deflate(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .deflate = true; + self + } + + /// Informs the service that it should also look for a precompressed zstd + /// version of _any_ file in the directory. + /// + /// Assuming the `dir` directory is being served and `dir/foo.txt` is requested, + /// a client with an `Accept-Encoding` header that allows the zstd encoding + /// will receive the file `dir/foo.txt.zst` instead of `dir/foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the directory. Different precompressed variants can be combined. + pub fn precompressed_zstd(mut self) -> Self { + self.precompressed_variants + .get_or_insert(Default::default()) + .zstd = true; + self + } + + /// Set the fallback service. + /// + /// This service will be called if there is no file at the path of the request. + /// + /// The status code returned by the fallback will not be altered. Use + /// [`ServeDir::not_found_service`] to set a fallback and always respond with `404 Not Found`. + /// + /// # Example + /// + /// This can be used to respond with a different file: + /// + /// ```rust + /// use tower_http::services::{ServeDir, ServeFile}; + /// + /// let service = ServeDir::new("assets") + /// // respond with `not_found.html` for missing files + /// .fallback(ServeFile::new("assets/not_found.html")); + /// ``` + pub fn fallback(self, new_fallback: F2) -> ServeDir { + ServeDir { + base: self.base, + buf_chunk_size: self.buf_chunk_size, + precompressed_variants: self.precompressed_variants, + variant: self.variant, + fallback: Some(new_fallback), + call_fallback_on_method_not_allowed: self.call_fallback_on_method_not_allowed, + } + } + + /// Set the fallback service and override the fallback's status code to `404 Not Found`. + /// + /// This service will be called if there is no file at the path of the request. + /// + /// # Example + /// + /// This can be used to respond with a different file: + /// + /// ```rust + /// use tower_http::services::{ServeDir, ServeFile}; + /// + /// let service = ServeDir::new("assets") + /// // respond with `404 Not Found` and the contents of `not_found.html` for missing files + /// .not_found_service(ServeFile::new("assets/not_found.html")); + /// ``` + /// + /// Setups like this are often found in single page applications. + pub fn not_found_service(self, new_fallback: F2) -> ServeDir> { + self.fallback(SetStatus::new(new_fallback, StatusCode::NOT_FOUND)) + } + + /// Customize whether or not to call the fallback for requests that aren't `GET` or `HEAD`. + /// + /// Defaults to not calling the fallback and instead returning `405 Method Not Allowed`. + pub fn call_fallback_on_method_not_allowed(mut self, call_fallback: bool) -> Self { + self.call_fallback_on_method_not_allowed = call_fallback; + self + } + + /// Call the service and get a future that contains any `std::io::Error` that might have + /// happened. + /// + /// By default `>::call` will handle IO errors and convert them into + /// responses. It does that by converting [`std::io::ErrorKind::NotFound`] and + /// [`std::io::ErrorKind::PermissionDenied`] to `404 Not Found` and any other error to `500 + /// Internal Server Error`. The error will also be logged with `tracing`. + /// + /// If you want to manually control how the error response is generated you can make a new + /// service that wraps a `ServeDir` and calls `try_call` instead of `call`. + /// + /// # Example + /// + /// ``` + /// use tower_http::services::ServeDir; + /// use std::{io, convert::Infallible}; + /// use http::{Request, Response, StatusCode}; + /// use http_body::Body as _; + /// use http_body_util::{Full, BodyExt, combinators::UnsyncBoxBody}; + /// use bytes::Bytes; + /// use tower::{service_fn, ServiceExt, BoxError}; + /// + /// async fn serve_dir( + /// request: Request> + /// ) -> Result>, Infallible> { + /// let mut service = ServeDir::new("assets"); + /// + /// // You only need to worry about backpressure, and thus call `ServiceExt::ready`, if + /// // your adding a fallback to `ServeDir` that cares about backpressure. + /// // + /// // Its shown here for demonstration but you can do `service.try_call(request)` + /// // otherwise + /// let ready_service = match ServiceExt::>>::ready(&mut service).await { + /// Ok(ready_service) => ready_service, + /// Err(infallible) => match infallible {}, + /// }; + /// + /// match ready_service.try_call(request).await { + /// Ok(response) => { + /// Ok(response.map(|body| body.map_err(Into::into).boxed_unsync())) + /// } + /// Err(err) => { + /// let body = Full::from("Something went wrong...") + /// .map_err(Into::into) + /// .boxed_unsync(); + /// let response = Response::builder() + /// .status(StatusCode::INTERNAL_SERVER_ERROR) + /// .body(body) + /// .unwrap(); + /// Ok(response) + /// } + /// } + /// } + /// ``` + pub fn try_call( + &mut self, + req: Request, + ) -> ResponseFuture + where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into>, + { + if req.method() != Method::GET && req.method() != Method::HEAD { + if self.call_fallback_on_method_not_allowed { + if let Some(fallback) = &mut self.fallback { + return ResponseFuture { + inner: future::call_fallback(fallback, req), + }; + } + } else { + return ResponseFuture::method_not_allowed(); + } + } + + // `ServeDir` doesn't care about the request body but the fallback might. So move out the + // body and pass it to the fallback, leaving an empty body in its place + // + // this is necessary because we cannot clone bodies + let (mut parts, body) = req.into_parts(); + // same goes for extensions + let extensions = std::mem::take(&mut parts.extensions); + let req = Request::from_parts(parts, Empty::::new()); + + let fallback_and_request = self.fallback.as_mut().map(|fallback| { + let mut fallback_req = Request::new(body); + *fallback_req.method_mut() = req.method().clone(); + *fallback_req.uri_mut() = req.uri().clone(); + *fallback_req.headers_mut() = req.headers().clone(); + *fallback_req.extensions_mut() = extensions; + + // get the ready fallback and leave a non-ready clone in its place + let clone = fallback.clone(); + let fallback = std::mem::replace(fallback, clone); + + (fallback, fallback_req) + }); + + let path_to_file = match self + .variant + .build_and_validate_path(&self.base, req.uri().path()) + { + Some(path_to_file) => path_to_file, + None => { + return ResponseFuture::invalid_path(fallback_and_request); + } + }; + + let buf_chunk_size = self.buf_chunk_size; + let range_header = req + .headers() + .get(header::RANGE) + .and_then(|value| value.to_str().ok()) + .map(|s| s.to_owned()); + + let negotiated_encodings: Vec<_> = encodings( + req.headers(), + self.precompressed_variants.unwrap_or_default(), + ) + .collect(); + + let variant = self.variant.clone(); + + let open_file_future = Box::pin(open_file::open_file( + variant, + path_to_file, + req, + negotiated_encodings, + range_header, + buf_chunk_size, + )); + + ResponseFuture::open_file_future(open_file_future, fallback_and_request) + } +} + +impl Service> for ServeDir +where + F: Service, Response = Response, Error = Infallible> + Clone, + F::Future: Send + 'static, + FResBody: http_body::Body + Send + 'static, + FResBody::Error: Into>, +{ + type Response = Response; + type Error = Infallible; + type Future = InfallibleResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(fallback) = &mut self.fallback { + fallback.poll_ready(cx) + } else { + Poll::Ready(Ok(())) + } + } + + fn call(&mut self, req: Request) -> Self::Future { + let future = self + .try_call(req) + .map(|result: Result<_, _>| -> Result<_, Infallible> { + let response = result.unwrap_or_else(|err| { + tracing::error!(error = %err, "Failed to read file"); + + let body = ResponseBody::new(UnsyncBoxBody::new( + Empty::new().map_err(|err| match err {}).boxed_unsync(), + )); + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(body) + .unwrap() + }); + Ok(response) + } as _); + + InfallibleResponseFuture::new(future) + } +} + +opaque_future! { + /// Response future of [`ServeDir`]. + pub type InfallibleResponseFuture = + futures_util::future::Map< + ResponseFuture, + fn(Result, io::Error>) -> Result, Infallible>, + >; +} + +// Allow the ServeDir service to be used in the ServeFile service +// with almost no overhead +#[derive(Clone, Debug)] +enum ServeVariant { + Directory { + append_index_html_on_directories: bool, + }, + SingleFile { + mime: HeaderValue, + }, +} + +impl ServeVariant { + fn build_and_validate_path(&self, base_path: &Path, requested_path: &str) -> Option { + match self { + ServeVariant::Directory { + append_index_html_on_directories: _, + } => { + let path = requested_path.trim_start_matches('/'); + + let path_decoded = percent_decode(path.as_ref()).decode_utf8().ok()?; + let path_decoded = Path::new(&*path_decoded); + + let mut path_to_file = base_path.to_path_buf(); + for component in path_decoded.components() { + match component { + Component::Normal(comp) => { + // protect against paths like `/foo/c:/bar/baz` (#204) + if Path::new(&comp) + .components() + .all(|c| matches!(c, Component::Normal(_))) + { + path_to_file.push(comp) + } else { + return None; + } + } + Component::CurDir => {} + Component::Prefix(_) | Component::RootDir | Component::ParentDir => { + return None; + } + } + } + Some(path_to_file) + } + ServeVariant::SingleFile { mime: _ } => Some(base_path.to_path_buf()), + } + } +} + +opaque_body! { + /// Response body for [`ServeDir`] and [`ServeFile`][super::ServeFile]. + #[derive(Default)] + pub type ResponseBody = UnsyncBoxBody; +} + +/// The default fallback service used with [`ServeDir`]. +#[derive(Debug, Clone, Copy)] +pub struct DefaultServeDirFallback(Infallible); + +impl Service> for DefaultServeDirFallback +where + ReqBody: Send + 'static, +{ + type Response = Response; + type Error = Infallible; + type Future = InfallibleResponseFuture; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + match self.0 {} + } + + fn call(&mut self, _req: Request) -> Self::Future { + match self.0 {} + } +} + +#[derive(Clone, Copy, Debug, Default)] +struct PrecompressedVariants { + gzip: bool, + deflate: bool, + br: bool, + zstd: bool, +} + +impl SupportedEncodings for PrecompressedVariants { + fn gzip(&self) -> bool { + self.gzip + } + + fn deflate(&self) -> bool { + self.deflate + } + + fn br(&self) -> bool { + self.br + } + + fn zstd(&self) -> bool { + self.zstd + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/open_file.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/open_file.rs new file mode 100644 index 0000000000000000000000000000000000000000..852b2ee37e081492c40d569aa36891e8b8d51b4b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/open_file.rs @@ -0,0 +1,369 @@ +use super::{ + headers::{IfModifiedSince, IfUnmodifiedSince, LastModified}, + ServeVariant, +}; +use crate::content_encoding::{Encoding, QValue}; +use bytes::Bytes; +use http::{header, HeaderValue, Method, Request, Uri}; +use http_body_util::Empty; +use http_range_header::RangeUnsatisfiableError; +use std::{ + ffi::OsStr, + fs::Metadata, + io::{self, ErrorKind, SeekFrom}, + ops::RangeInclusive, + path::{Path, PathBuf}, +}; +use tokio::{fs::File, io::AsyncSeekExt}; + +pub(super) enum OpenFileOutput { + FileOpened(Box), + Redirect { location: HeaderValue }, + FileNotFound, + PreconditionFailed, + NotModified, + InvalidRedirectUri, + InvalidFilename, +} + +pub(super) struct FileOpened { + pub(super) extent: FileRequestExtent, + pub(super) chunk_size: usize, + pub(super) mime_header_value: HeaderValue, + pub(super) maybe_encoding: Option, + pub(super) maybe_range: Option>, RangeUnsatisfiableError>>, + pub(super) last_modified: Option, +} + +pub(super) enum FileRequestExtent { + Full(File, Metadata), + Head(Metadata), +} + +pub(super) async fn open_file( + variant: ServeVariant, + mut path_to_file: PathBuf, + req: Request>, + negotiated_encodings: Vec<(Encoding, QValue)>, + range_header: Option, + buf_chunk_size: usize, +) -> io::Result { + let if_unmodified_since = req + .headers() + .get(header::IF_UNMODIFIED_SINCE) + .and_then(IfUnmodifiedSince::from_header_value); + + let if_modified_since = req + .headers() + .get(header::IF_MODIFIED_SINCE) + .and_then(IfModifiedSince::from_header_value); + + let mime = match variant { + ServeVariant::Directory { + append_index_html_on_directories, + } => { + // Might already at this point know a redirect or not found result should be + // returned which corresponds to a Some(output). Otherwise the path might be + // modified and proceed to the open file/metadata future. + if let Some(output) = maybe_redirect_or_append_path( + &mut path_to_file, + req.uri(), + append_index_html_on_directories, + ) + .await + { + return Ok(output); + } + + mime_guess::from_path(&path_to_file) + .first_raw() + .map(HeaderValue::from_static) + .unwrap_or_else(|| { + HeaderValue::from_str(mime::APPLICATION_OCTET_STREAM.as_ref()).unwrap() + }) + } + + ServeVariant::SingleFile { mime } => mime, + }; + + if req.method() == Method::HEAD { + let (meta, maybe_encoding) = + file_metadata_with_fallback(path_to_file, negotiated_encodings).await?; + + let last_modified = meta.modified().ok().map(LastModified::from); + if let Some(output) = check_modified_headers( + last_modified.as_ref(), + if_unmodified_since, + if_modified_since, + ) { + return Ok(output); + } + + let maybe_range = try_parse_range(range_header.as_deref(), meta.len()); + + Ok(OpenFileOutput::FileOpened(Box::new(FileOpened { + extent: FileRequestExtent::Head(meta), + chunk_size: buf_chunk_size, + mime_header_value: mime, + maybe_encoding, + maybe_range, + last_modified, + }))) + } else { + let (mut file, maybe_encoding) = + match open_file_with_fallback(path_to_file, negotiated_encodings).await { + Ok(result) => result, + + Err(err) if is_invalid_filename_error(&err) => { + return Ok(OpenFileOutput::InvalidFilename) + } + Err(err) => return Err(err), + }; + + let meta = file.metadata().await?; + let last_modified = meta.modified().ok().map(LastModified::from); + if let Some(output) = check_modified_headers( + last_modified.as_ref(), + if_unmodified_since, + if_modified_since, + ) { + return Ok(output); + } + + let maybe_range = try_parse_range(range_header.as_deref(), meta.len()); + if let Some(Ok(ranges)) = maybe_range.as_ref() { + // if there is any other amount of ranges than 1 we'll return an + // unsatisfiable later as there isn't yet support for multipart ranges + if ranges.len() == 1 { + file.seek(SeekFrom::Start(*ranges[0].start())).await?; + } + } + + Ok(OpenFileOutput::FileOpened(Box::new(FileOpened { + extent: FileRequestExtent::Full(file, meta), + chunk_size: buf_chunk_size, + mime_header_value: mime, + maybe_encoding, + maybe_range, + last_modified, + }))) + } +} + +fn is_invalid_filename_error(err: &io::Error) -> bool { + // Only applies to NULL bytes + if err.kind() == ErrorKind::InvalidInput { + return true; + } + + // FIXME: Remove when MSRV >= 1.87. + // `io::ErrorKind::InvalidFilename` is stabilized in v1.87 + #[cfg(windows)] + if let Some(raw_err) = err.raw_os_error() { + // https://github.com/rust-lang/rust/blob/70e2b4a4d197f154bed0eb3dcb5cac6a948ff3a3/library/std/src/sys/pal/windows/mod.rs + // Lines 81 and 115 + if (raw_err == 123) || (raw_err == 161) || (raw_err == 206) { + return true; + } + } + + false +} + +fn check_modified_headers( + modified: Option<&LastModified>, + if_unmodified_since: Option, + if_modified_since: Option, +) -> Option { + if let Some(since) = if_unmodified_since { + let precondition = modified + .as_ref() + .map(|time| since.precondition_passes(time)) + .unwrap_or(false); + + if !precondition { + return Some(OpenFileOutput::PreconditionFailed); + } + } + + if let Some(since) = if_modified_since { + let unmodified = modified + .as_ref() + .map(|time| !since.is_modified(time)) + // no last_modified means its always modified + .unwrap_or(false); + if unmodified { + return Some(OpenFileOutput::NotModified); + } + } + + None +} + +// Returns the preferred_encoding encoding and modifies the path extension +// to the corresponding file extension for the encoding. +fn preferred_encoding( + path: &mut PathBuf, + negotiated_encoding: &[(Encoding, QValue)], +) -> Option { + let preferred_encoding = Encoding::preferred_encoding(negotiated_encoding.iter().copied()); + + if let Some(file_extension) = + preferred_encoding.and_then(|encoding| encoding.to_file_extension()) + { + let new_file_name = path + .file_name() + .map(|file_name| { + let mut os_string = file_name.to_os_string(); + os_string.push(file_extension); + os_string + }) + .unwrap_or_else(|| file_extension.to_os_string()); + + path.set_file_name(new_file_name); + } + + preferred_encoding +} + +// Attempts to open the file with any of the possible negotiated_encodings in the +// preferred order. If none of the negotiated_encodings have a corresponding precompressed +// file the uncompressed file is used as a fallback. +async fn open_file_with_fallback( + mut path: PathBuf, + mut negotiated_encoding: Vec<(Encoding, QValue)>, +) -> io::Result<(File, Option)> { + let (file, encoding) = loop { + // Get the preferred encoding among the negotiated ones. + let encoding = preferred_encoding(&mut path, &negotiated_encoding); + match (File::open(&path).await, encoding) { + (Ok(file), maybe_encoding) => break (file, maybe_encoding), + (Err(err), Some(encoding)) if err.kind() == io::ErrorKind::NotFound => { + // Remove the extension corresponding to a precompressed file (.gz, .br, .zz) + // to reset the path before the next iteration. + path.set_extension(OsStr::new("")); + // Remove the encoding from the negotiated_encodings since the file doesn't exist + negotiated_encoding + .retain(|(negotiated_encoding, _)| *negotiated_encoding != encoding); + } + (Err(err), _) => return Err(err), + } + }; + Ok((file, encoding)) +} + +// Attempts to get the file metadata with any of the possible negotiated_encodings in the +// preferred order. If none of the negotiated_encodings have a corresponding precompressed +// file the uncompressed file is used as a fallback. +async fn file_metadata_with_fallback( + mut path: PathBuf, + mut negotiated_encoding: Vec<(Encoding, QValue)>, +) -> io::Result<(Metadata, Option)> { + let (file, encoding) = loop { + // Get the preferred encoding among the negotiated ones. + let encoding = preferred_encoding(&mut path, &negotiated_encoding); + match (tokio::fs::metadata(&path).await, encoding) { + (Ok(file), maybe_encoding) => break (file, maybe_encoding), + (Err(err), Some(encoding)) if err.kind() == io::ErrorKind::NotFound => { + // Remove the extension corresponding to a precompressed file (.gz, .br, .zz) + // to reset the path before the next iteration. + path.set_extension(OsStr::new("")); + // Remove the encoding from the negotiated_encodings since the file doesn't exist + negotiated_encoding + .retain(|(negotiated_encoding, _)| *negotiated_encoding != encoding); + } + (Err(err), _) => return Err(err), + } + }; + Ok((file, encoding)) +} + +async fn maybe_redirect_or_append_path( + path_to_file: &mut PathBuf, + uri: &Uri, + append_index_html_on_directories: bool, +) -> Option { + if !is_dir(path_to_file).await { + return None; + } + + if !append_index_html_on_directories { + return Some(OpenFileOutput::FileNotFound); + } + + if uri.path().ends_with('/') { + path_to_file.push("index.html"); + None + } else { + let uri = match append_slash_on_path(uri.clone()) { + Ok(uri) => uri, + Err(err) => return Some(err), + }; + let location = HeaderValue::from_str(&uri.to_string()).unwrap(); + Some(OpenFileOutput::Redirect { location }) + } +} + +fn try_parse_range( + maybe_range_ref: Option<&str>, + file_size: u64, +) -> Option>, RangeUnsatisfiableError>> { + maybe_range_ref.map(|header_value| { + http_range_header::parse_range_header(header_value) + .and_then(|first_pass| first_pass.validate(file_size)) + }) +} + +async fn is_dir(path_to_file: &Path) -> bool { + tokio::fs::metadata(path_to_file) + .await + .map_or(false, |meta_data| meta_data.is_dir()) +} + +fn append_slash_on_path(uri: Uri) -> Result { + let http::uri::Parts { + scheme, + authority, + path_and_query, + .. + } = uri.into_parts(); + + let mut uri_builder = Uri::builder(); + + if let Some(scheme) = scheme { + uri_builder = uri_builder.scheme(scheme); + } + + if let Some(authority) = authority { + uri_builder = uri_builder.authority(authority); + } + + let uri_builder = if let Some(path_and_query) = path_and_query { + if let Some(query) = path_and_query.query() { + uri_builder.path_and_query(format!("{}/?{}", path_and_query.path(), query)) + } else { + uri_builder.path_and_query(format!("{}/", path_and_query.path())) + } + } else { + uri_builder.path_and_query("/") + }; + + uri_builder.build().map_err(|err| { + tracing::error!(?err, "redirect uri failed to build"); + OpenFileOutput::InvalidRedirectUri + }) +} + +#[test] +fn preferred_encoding_with_extension() { + let mut path = PathBuf::from("hello.txt"); + preferred_encoding(&mut path, &[(Encoding::Gzip, QValue::one())]); + assert_eq!(path, PathBuf::from("hello.txt.gz")); +} + +#[test] +fn preferred_encoding_without_extension() { + let mut path = PathBuf::from("hello"); + preferred_encoding(&mut path, &[(Encoding::Gzip, QValue::one())]); + assert_eq!(path, PathBuf::from("hello.gz")); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/tests.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..e220197afad84b05aecec73193de1885756aadf7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_dir/tests.rs @@ -0,0 +1,879 @@ +use crate::services::{ServeDir, ServeFile}; +use crate::test_helpers::{to_bytes, Body}; +use brotli::BrotliDecompress; +use bytes::Bytes; +use flate2::bufread::{DeflateDecoder, GzDecoder}; +use http::header::ALLOW; +use http::{header, Method, Response}; +use http::{Request, StatusCode}; +use http_body::Body as HttpBody; +use http_body_util::BodyExt; +use std::convert::Infallible; +use std::fs; +use std::io::Read; +use tower::{service_fn, ServiceExt}; + +#[tokio::test] +async fn basic() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn basic_with_index() { + let svc = ServeDir::new("../test-files"); + + let req = Request::new(Body::empty()); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()[header::CONTENT_TYPE], "text/html"); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "HTML!\n"); +} + +#[tokio::test] +async fn head_request() { + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri("/precompressed.txt") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "23"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn precompresed_head_request() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + assert_eq!(res.headers()["content-length"], "59"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn with_custom_chunk_size() { + let svc = ServeDir::new("..").with_buf_chunk_size(1024 * 32); + + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn precompressed_gzip() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn precompressed_br() { + let svc = ServeDir::new("../test-files").precompressed_br(); + + let req = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn precompressed_deflate() { + let svc = ServeDir::new("../test-files").precompressed_deflate(); + let request = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "deflate"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = DeflateDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn unsupported_precompression_alogrithm_fallbacks_to_uncompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/precompressed.txt") + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("\"This is a test file!\"")); +} + +#[tokio::test] +async fn only_precompressed_variant_existing() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/only_gzipped.txt") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + // Should reply with gzipped file if client supports it + let request = Request::builder() + .uri("/only_gzipped.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file\"")); +} + +#[tokio::test] +async fn missing_precompressed_variant_fallbacks_to_uncompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/missing_precompressed.txt") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("Test file!")); +} + +#[tokio::test] +async fn missing_precompressed_variant_fallbacks_to_uncompressed_for_head_request() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/missing_precompressed.txt") + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "11"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn precompressed_without_extension() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/extensionless_precompressed") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + assert_eq!(res.headers()["content-type"], "application/octet-stream"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + + let correct = fs::read_to_string("../test-files/extensionless_precompressed").unwrap(); + assert_eq!(decompressed, correct); +} + +#[tokio::test] +async fn missing_precompressed_without_extension_fallbacks_to_uncompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let request = Request::builder() + .uri("/extensionless_precompressed_missing") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + assert_eq!(res.headers()["content-type"], "application/octet-stream"); + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + let correct = fs::read_to_string("../test-files/extensionless_precompressed_missing").unwrap(); + assert_eq!(body, correct); +} + +#[tokio::test] +async fn access_to_sub_dirs() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/tower-http/Cargo.toml") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/x-toml"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("Cargo.toml").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn not_found() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .uri("/not-found") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[cfg(unix)] +#[tokio::test] +async fn not_found_when_not_a_directory() { + let svc = ServeDir::new("../test-files"); + + // `index.html` is a file, and we are trying to request + // it as a directory. + let req = Request::builder() + .uri("/index.html/some_file") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + // This should lead to a 404 + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn not_found_precompressed() { + let svc = ServeDir::new("../test-files").precompressed_gzip(); + + let req = Request::builder() + .uri("/not-found") + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn fallbacks_to_different_precompressed_variant_if_not_found_for_head_request() { + let svc = ServeDir::new("../test-files") + .precompressed_gzip() + .precompressed_br(); + + let req = Request::builder() + .uri("/precompressed_br.txt") + .header("Accept-Encoding", "gzip,br,deflate") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + assert_eq!(res.headers()["content-length"], "15"); + + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn fallbacks_to_different_precompressed_variant_if_not_found() { + let svc = ServeDir::new("../test-files") + .precompressed_gzip() + .precompressed_br(); + + let req = Request::builder() + .uri("/precompressed_br.txt") + .header("Accept-Encoding", "gzip,br,deflate") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("Test file")); +} + +#[tokio::test] +async fn redirect_to_trailing_slash_on_dir() { + let svc = ServeDir::new("."); + + let req = Request::builder().uri("/src").body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::TEMPORARY_REDIRECT); + + let location = &res.headers()[http::header::LOCATION]; + assert_eq!(location, "/src/"); +} + +#[tokio::test] +async fn empty_directory_without_index() { + let svc = ServeDir::new(".").append_index_html_on_directories(false); + + let req = Request::new(Body::empty()); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +#[tokio::test] +async fn empty_directory_without_index_no_information_leak() { + let svc = ServeDir::new("..").append_index_html_on_directories(false); + + let req = Request::builder() + .uri("/test-files") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + + let body = body_into_text(res.into_body()).await; + assert!(body.is_empty()); +} + +async fn body_into_text(body: B) -> String +where + B: HttpBody + Unpin, + B::Error: std::fmt::Debug, +{ + let bytes = to_bytes(body).await.unwrap(); + String::from_utf8(bytes.to_vec()).unwrap() +} + +#[tokio::test] +async fn access_cjk_percent_encoded_uri_path() { + // percent encoding present of 你好世界.txt + let cjk_filename_encoded = "%E4%BD%A0%E5%A5%BD%E4%B8%96%E7%95%8C.txt"; + + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri(format!("/{}", cjk_filename_encoded)) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/plain"); +} + +#[tokio::test] +async fn access_space_percent_encoded_uri_path() { + let encoded_filename = "filename%20with%20space.txt"; + + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri(format!("/{}", encoded_filename)) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/plain"); +} + +#[tokio::test] +async fn read_partial_empty() { + let svc = ServeDir::new("../test-files"); + + let req = Request::builder() + .uri("/empty.txt") + .header("Range", "bytes=0-") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + assert_eq!(res.headers()["content-length"], "0"); + assert_eq!(res.headers()["content-range"], "bytes 0-0/0"); + + let body = to_bytes(res.into_body()).await.ok().unwrap(); + assert!(body.is_empty()); +} + +#[tokio::test] +async fn read_partial_in_bounds() { + let svc = ServeDir::new(".."); + let bytes_start_incl = 9; + let bytes_end_incl = 1023; + + let req = Request::builder() + .uri("/README.md") + .header( + "Range", + format!("bytes={}-{}", bytes_start_incl, bytes_end_incl), + ) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + assert_eq!( + res.headers()["content-length"], + (bytes_end_incl - bytes_start_incl + 1).to_string() + ); + assert!(res.headers()["content-range"] + .to_str() + .unwrap() + .starts_with(&format!( + "bytes {}-{}/{}", + bytes_start_incl, + bytes_end_incl, + file_contents.len() + ))); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = to_bytes(res.into_body()).await.ok().unwrap(); + let source = Bytes::from(file_contents[bytes_start_incl..=bytes_end_incl].to_vec()); + assert_eq!(body, source); +} + +#[tokio::test] +async fn read_partial_accepts_out_of_bounds_range() { + let svc = ServeDir::new(".."); + let bytes_start_incl = 0; + let bytes_end_excl = 9999999; + let requested_len = bytes_end_excl - bytes_start_incl; + + let req = Request::builder() + .uri("/README.md") + .header( + "Range", + format!("bytes={}-{}", bytes_start_incl, requested_len - 1), + ) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT); + let file_contents = std::fs::read("../README.md").unwrap(); + // Out of bounds range gives all bytes + assert_eq!( + res.headers()["content-range"], + &format!( + "bytes 0-{}/{}", + file_contents.len() - 1, + file_contents.len() + ) + ) +} + +#[tokio::test] +async fn read_partial_errs_on_garbage_header() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Range", "bad_format") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::RANGE_NOT_SATISFIABLE); + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!( + res.headers()["content-range"], + &format!("bytes */{}", file_contents.len()) + ) +} + +#[tokio::test] +async fn read_partial_errs_on_bad_range() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Range", "bytes=-1-15") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::RANGE_NOT_SATISFIABLE); + let file_contents = std::fs::read("../README.md").unwrap(); + assert_eq!( + res.headers()["content-range"], + &format!("bytes */{}", file_contents.len()) + ) +} + +#[tokio::test] +async fn accept_encoding_identity() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header("Accept-Encoding", "identity") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + // Identity encoding should not be included in the response headers + assert!(res.headers().get("content-encoding").is_none()); +} + +#[tokio::test] +async fn last_modified() { + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + + let last_modified = res + .headers() + .get(header::LAST_MODIFIED) + .expect("Missing last modified header!"); + + // -- If-Modified-Since + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_MODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_MODIFIED); + assert!(res.into_body().frame().await.is_none()); + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_MODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let readme_bytes = include_bytes!("../../../../../README.md"); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + // -- If-Unmodified-Since + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_UNMODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + let svc = ServeDir::new(".."); + let req = Request::builder() + .uri("/README.md") + .header(header::IF_UNMODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::PRECONDITION_FAILED); + assert!(res.into_body().frame().await.is_none()); +} + +#[tokio::test] +async fn with_fallback_svc() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..").fallback(tower::service_fn(fallback)); + + let req = Request::builder() + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /doesnt-exist"); +} + +#[tokio::test] +async fn with_fallback_serve_file() { + let svc = ServeDir::new("..").fallback(ServeFile::new("../README.md")); + + let req = Request::builder() + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = body_into_text(res.into_body()).await; + + let contents = std::fs::read_to_string("../README.md").unwrap(); + assert_eq!(body, contents); +} + +#[tokio::test] +async fn method_not_allowed() { + let svc = ServeDir::new(".."); + + let req = Request::builder() + .method(Method::POST) + .uri("/README.md") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(res.headers()[ALLOW], "GET,HEAD"); +} + +#[tokio::test] +async fn calling_fallback_on_not_allowed() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..") + .call_fallback_on_method_not_allowed(true) + .fallback(tower::service_fn(fallback)); + + let req = Request::builder() + .method(Method::POST) + .uri("/doesnt-exist") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /doesnt-exist"); +} + +#[tokio::test] +async fn with_fallback_svc_and_not_append_index_html_on_directories() { + async fn fallback(req: Request) -> Result, Infallible> { + Ok(Response::new(Body::from(format!( + "from fallback {}", + req.uri().path() + )))) + } + + let svc = ServeDir::new("..") + .append_index_html_on_directories(false) + .fallback(tower::service_fn(fallback)); + + let req = Request::builder().uri("/").body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let body = body_into_text(res.into_body()).await; + assert_eq!(body, "from fallback /"); +} + +// https://github.com/tower-rs/tower-http/issues/308 +#[tokio::test] +async fn calls_fallback_on_invalid_paths() { + async fn fallback(_: T) -> Result, Infallible> { + let mut res = Response::new(Body::empty()); + res.headers_mut() + .insert("from-fallback", "1".parse().unwrap()); + Ok(res) + } + + let svc = ServeDir::new("..").fallback(service_fn(fallback)); + + let req = Request::builder() + .uri("/weird_%c3%28_path") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["from-fallback"], "1"); +} + +// https://github.com/tower-rs/tower-http/issues/573 +#[tokio::test] +async fn calls_fallback_on_invalid_filenames() { + async fn fallback(_: T) -> Result, Infallible> { + let mut res = Response::new(Body::empty()); + res.headers_mut() + .insert("from-fallback", "1".parse().unwrap()); + Ok(res) + } + + let svc = ServeDir::new("..").fallback(service_fn(fallback)); + + let req = Request::builder() + .uri("/invalid|path") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["from-fallback"], "1"); +} + +#[tokio::test] +async fn calls_fallback_on_null() { + async fn fallback(_: T) -> Result, Infallible> { + let mut res = Response::new(Body::empty()); + res.headers_mut() + .insert("from-fallback", "1".parse().unwrap()); + Ok(res) + } + + let svc = ServeDir::new("..").fallback(service_fn(fallback)); + + let req = Request::builder() + .uri("/invalid-path%00") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.headers()["from-fallback"], "1"); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_file.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_file.rs new file mode 100644 index 0000000000000000000000000000000000000000..ade3cd151b92917104fa122c2d0209d8a59360b0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/fs/serve_file.rs @@ -0,0 +1,560 @@ +//! Service that serves a file. + +use super::ServeDir; +use http::{HeaderValue, Request}; +use mime::Mime; +use std::{ + path::Path, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// Service that serves a file. +#[derive(Clone, Debug)] +pub struct ServeFile(ServeDir); + +// Note that this is just a special case of ServeDir +impl ServeFile { + /// Create a new [`ServeFile`]. + /// + /// The `Content-Type` will be guessed from the file extension. + pub fn new>(path: P) -> Self { + let guess = mime_guess::from_path(path.as_ref()); + let mime = guess + .first_raw() + .map(HeaderValue::from_static) + .unwrap_or_else(|| { + HeaderValue::from_str(mime::APPLICATION_OCTET_STREAM.as_ref()).unwrap() + }); + + Self(ServeDir::new_single_file(path, mime)) + } + + /// Create a new [`ServeFile`] with a specific mime type. + /// + /// # Panics + /// + /// Will panic if the mime type isn't a valid [header value]. + /// + /// [header value]: https://docs.rs/http/latest/http/header/struct.HeaderValue.html + pub fn new_with_mime>(path: P, mime: &Mime) -> Self { + let mime = HeaderValue::from_str(mime.as_ref()).expect("mime isn't a valid header value"); + Self(ServeDir::new_single_file(path, mime)) + } + + /// Informs the service that it should also look for a precompressed gzip + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the gzip encoding, + /// the file `foo.txt.gz` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_gzip(self) -> Self { + Self(self.0.precompressed_gzip()) + } + + /// Informs the service that it should also look for a precompressed brotli + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the brotli encoding, + /// the file `foo.txt.br` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_br(self) -> Self { + Self(self.0.precompressed_br()) + } + + /// Informs the service that it should also look for a precompressed deflate + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the deflate encoding, + /// the file `foo.txt.zz` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_deflate(self) -> Self { + Self(self.0.precompressed_deflate()) + } + + /// Informs the service that it should also look for a precompressed zstd + /// version of the file. + /// + /// If the client has an `Accept-Encoding` header that allows the zstd encoding, + /// the file `foo.txt.zst` will be served instead of `foo.txt`. + /// If the precompressed file is not available, or the client doesn't support it, + /// the uncompressed version will be served instead. + /// Both the precompressed version and the uncompressed version are expected + /// to be present in the same directory. Different precompressed + /// variants can be combined. + pub fn precompressed_zstd(self) -> Self { + Self(self.0.precompressed_zstd()) + } + + /// Set a specific read buffer chunk size. + /// + /// The default capacity is 64kb. + pub fn with_buf_chunk_size(self, chunk_size: usize) -> Self { + Self(self.0.with_buf_chunk_size(chunk_size)) + } + + /// Call the service and get a future that contains any `std::io::Error` that might have + /// happened. + /// + /// See [`ServeDir::try_call`] for more details. + pub fn try_call( + &mut self, + req: Request, + ) -> super::serve_dir::future::ResponseFuture + where + ReqBody: Send + 'static, + { + self.0.try_call(req) + } +} + +impl Service> for ServeFile +where + ReqBody: Send + 'static, +{ + type Error = >>::Error; + type Response = >>::Response; + type Future = >>::Future; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + self.0.call(req) + } +} + +#[cfg(test)] +mod tests { + use crate::services::ServeFile; + use crate::test_helpers::Body; + use async_compression::tokio::bufread::ZstdDecoder; + use brotli::BrotliDecompress; + use flate2::bufread::DeflateDecoder; + use flate2::bufread::GzDecoder; + use http::header; + use http::Method; + use http::{Request, StatusCode}; + use http_body_util::BodyExt; + use mime::Mime; + use std::io::Read; + use std::str::FromStr; + use tokio::io::AsyncReadExt; + use tower::ServiceExt; + + #[tokio::test] + async fn basic() { + let svc = ServeFile::new("../README.md"); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn basic_with_mime() { + let svc = ServeFile::new_with_mime("../README.md", &Mime::from_str("image/jpg").unwrap()); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "image/jpg"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn head_request() { + let svc = ServeFile::new("../test-files/precompressed.txt"); + + let mut request = Request::new(Body::empty()); + *request.method_mut() = Method::HEAD; + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "23"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn precompresed_head_request() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + assert_eq!(res.headers()["content-length"], "59"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn precompressed_gzip() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn unsupported_precompression_alogrithm_fallbacks_to_uncompressed() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn missing_precompressed_variant_fallbacks_to_uncompressed() { + let svc = ServeFile::new("../test-files/missing_precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + assert!(body.starts_with("Test file!")); + } + + #[tokio::test] + async fn missing_precompressed_variant_fallbacks_to_uncompressed_head_request() { + let svc = ServeFile::new("../test-files/missing_precompressed.txt").precompressed_gzip(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "11"); + // Uncompressed file is served because compressed version is missing + assert!(res.headers().get("content-encoding").is_none()); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn only_precompressed_variant_existing() { + let svc = ServeFile::new("../test-files/only_gzipped.txt").precompressed_gzip(); + + let request = Request::builder().body(Body::empty()).unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + + // Should reply with gzipped file if client supports it + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file\"")); + } + + #[tokio::test] + async fn precompressed_br() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn precompressed_deflate() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_deflate(); + let request = Request::builder() + .header("Accept-Encoding", "deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "deflate"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = DeflateDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn precompressed_zstd() { + let svc = ServeFile::new("../test-files/precompressed.txt").precompressed_zstd(); + let request = Request::builder() + .header("Accept-Encoding", "zstd,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "zstd"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = ZstdDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).await.unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn multi_precompressed() { + let svc = ServeFile::new("../test-files/precompressed.txt") + .precompressed_gzip() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "gzip"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decoder = GzDecoder::new(&body[..]); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + + let request = Request::builder() + .header("Accept-Encoding", "br") + .body(Body::empty()) + .unwrap(); + let res = svc.clone().oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("\"This is a test file!\"")); + } + + #[tokio::test] + async fn with_custom_chunk_size() { + let svc = ServeFile::new("../README.md").with_buf_chunk_size(1024 * 32); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/markdown"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let body = String::from_utf8(body.to_vec()).unwrap(); + + assert!(body.starts_with("# Tower HTTP")); + } + + #[tokio::test] + async fn fallbacks_to_different_precompressed_variant_if_not_found() { + let svc = ServeFile::new("../test-files/precompressed_br.txt") + .precompressed_gzip() + .precompressed_deflate() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,deflate,br") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-encoding"], "br"); + + let body = res.into_body().collect().await.unwrap().to_bytes(); + let mut decompressed = Vec::new(); + BrotliDecompress(&mut &body[..], &mut decompressed).unwrap(); + let decompressed = String::from_utf8(decompressed.to_vec()).unwrap(); + assert!(decompressed.starts_with("Test file")); + } + + #[tokio::test] + async fn fallbacks_to_different_precompressed_variant_if_not_found_head_request() { + let svc = ServeFile::new("../test-files/precompressed_br.txt") + .precompressed_gzip() + .precompressed_deflate() + .precompressed_br(); + + let request = Request::builder() + .header("Accept-Encoding", "gzip,deflate,br") + .method(Method::HEAD) + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.headers()["content-type"], "text/plain"); + assert_eq!(res.headers()["content-length"], "15"); + assert_eq!(res.headers()["content-encoding"], "br"); + + assert!(res.into_body().frame().await.is_none()); + } + + #[tokio::test] + async fn returns_404_if_file_doesnt_exist() { + let svc = ServeFile::new("../this-doesnt-exist.md"); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + } + + #[tokio::test] + async fn returns_404_if_file_doesnt_exist_when_precompression_is_used() { + let svc = ServeFile::new("../this-doesnt-exist.md").precompressed_deflate(); + + let request = Request::builder() + .header("Accept-Encoding", "deflate") + .body(Body::empty()) + .unwrap(); + let res = svc.oneshot(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_FOUND); + assert!(res.headers().get(header::CONTENT_TYPE).is_none()); + } + + #[tokio::test] + async fn last_modified() { + let svc = ServeFile::new("../README.md"); + + let req = Request::builder().body(Body::empty()).unwrap(); + let res = svc.oneshot(req).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + + let last_modified = res + .headers() + .get(header::LAST_MODIFIED) + .expect("Missing last modified header!"); + + // -- If-Modified-Since + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_MODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::NOT_MODIFIED); + assert!(res.into_body().frame().await.is_none()); + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_MODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let readme_bytes = include_bytes!("../../../../README.md"); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + // -- If-Unmodified-Since + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_UNMODIFIED_SINCE, last_modified) + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); + let body = res.into_body().collect().await.unwrap().to_bytes(); + assert_eq!(body.as_ref(), readme_bytes); + + let svc = ServeFile::new("../README.md"); + let req = Request::builder() + .header(header::IF_UNMODIFIED_SINCE, "Fri, 09 Aug 1996 14:21:40 GMT") + .body(Body::empty()) + .unwrap(); + + let res = svc.oneshot(req).await.unwrap(); + assert_eq!(res.status(), StatusCode::PRECONDITION_FAILED); + assert!(res.into_body().frame().await.is_none()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..737d2fa195282e5bcb195a22848351ca4e4a7477 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/mod.rs @@ -0,0 +1,21 @@ +//! [`Service`]s that return responses without wrapping other [`Service`]s. +//! +//! These kinds of services are also referred to as "leaf services" since they sit at the leaves of +//! a [tree] of services. +//! +//! [`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html +//! [tree]: https://en.wikipedia.org/wiki/Tree_(data_structure) + +#[cfg(feature = "redirect")] +pub mod redirect; + +#[cfg(feature = "redirect")] +#[doc(inline)] +pub use self::redirect::Redirect; + +#[cfg(feature = "fs")] +pub mod fs; + +#[cfg(feature = "fs")] +#[doc(inline)] +pub use self::fs::{ServeDir, ServeFile}; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/redirect.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/redirect.rs new file mode 100644 index 0000000000000000000000000000000000000000..020927c9211cae2d94c742171882720029d9d271 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/services/redirect.rs @@ -0,0 +1,159 @@ +//! Service that redirects all requests. +//! +//! # Example +//! +//! Imagine that we run `example.com` and want to redirect all requests using `HTTP` to `HTTPS`. +//! That can be done like so: +//! +//! ```rust +//! use http::{Request, Uri, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceExt}; +//! use tower_http::services::Redirect; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let uri: Uri = "https://example.com/".parse().unwrap(); +//! let mut service: Redirect> = Redirect::permanent(uri); +//! +//! let request = Request::builder() +//! .uri("http://example.com") +//! .body(Full::::default()) +//! .unwrap(); +//! +//! let response = service.oneshot(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::PERMANENT_REDIRECT); +//! assert_eq!(response.headers()["location"], "https://example.com/"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{header, HeaderValue, Response, StatusCode, Uri}; +use std::{ + convert::{Infallible, TryFrom}, + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower_service::Service; + +/// Service that redirects all requests. +/// +/// See the [module docs](crate::services::redirect) for more details. +pub struct Redirect { + status_code: StatusCode, + location: HeaderValue, + // Covariant over ResBody, no dropping of ResBody + _marker: PhantomData ResBody>, +} + +impl Redirect { + /// Create a new [`Redirect`] that uses a [`307 Temporary Redirect`][mdn] status code. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/307 + pub fn temporary(uri: Uri) -> Self { + Self::with_status_code(StatusCode::TEMPORARY_REDIRECT, uri) + } + + /// Create a new [`Redirect`] that uses a [`308 Permanent Redirect`][mdn] status code. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308 + pub fn permanent(uri: Uri) -> Self { + Self::with_status_code(StatusCode::PERMANENT_REDIRECT, uri) + } + + /// Create a new [`Redirect`] that uses the given status code. + /// + /// # Panics + /// + /// - If `status_code` isn't a [redirection status code][mdn] (3xx). + /// - If `uri` isn't a valid [`HeaderValue`]. + /// + /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#redirection_messages + pub fn with_status_code(status_code: StatusCode, uri: Uri) -> Self { + assert!( + status_code.is_redirection(), + "not a redirection status code" + ); + + Self { + status_code, + location: HeaderValue::try_from(uri.to_string()) + .expect("URI isn't a valid header value"), + _marker: PhantomData, + } + } +} + +impl Service for Redirect +where + ResBody: Default, +{ + type Response = Response; + type Error = Infallible; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: R) -> Self::Future { + ResponseFuture { + status_code: self.status_code, + location: Some(self.location.clone()), + _marker: PhantomData, + } + } +} + +impl fmt::Debug for Redirect { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Redirect") + .field("status_code", &self.status_code) + .field("location", &self.location) + .finish() + } +} + +impl Clone for Redirect { + fn clone(&self) -> Self { + Self { + status_code: self.status_code, + location: self.location.clone(), + _marker: PhantomData, + } + } +} + +/// Response future of [`Redirect`]. +#[derive(Debug)] +pub struct ResponseFuture { + location: Option, + status_code: StatusCode, + // Covariant over ResBody, no dropping of ResBody + _marker: PhantomData ResBody>, +} + +impl Future for ResponseFuture +where + ResBody: Default, +{ + type Output = Result, Infallible>; + + fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll { + let mut res = Response::default(); + + *res.status_mut() = self.status_code; + + res.headers_mut() + .insert(header::LOCATION, self.location.take().unwrap()); + + Poll::Ready(Ok(res)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..396527ef449015aff4a83f574805ec7996c8b7ab --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/mod.rs @@ -0,0 +1,110 @@ +//! Middleware for setting headers on requests and responses. +//! +//! See [request] and [response] for more details. + +use http::{header::HeaderName, HeaderMap, HeaderValue, Request, Response}; + +pub mod request; +pub mod response; + +#[doc(inline)] +pub use self::{ + request::{SetRequestHeader, SetRequestHeaderLayer}, + response::{SetResponseHeader, SetResponseHeaderLayer}, +}; + +/// Trait for producing header values. +/// +/// Used by [`SetRequestHeader`] and [`SetResponseHeader`]. +/// +/// This trait is implemented for closures with the correct type signature. Typically users will +/// not have to implement this trait for their own types. +/// +/// It is also implemented directly for [`HeaderValue`]. When a fixed header value should be added +/// to all responses, it can be supplied directly to the middleware. +pub trait MakeHeaderValue { + /// Try to create a header value from the request or response. + fn make_header_value(&mut self, message: &T) -> Option; +} + +impl MakeHeaderValue for F +where + F: FnMut(&T) -> Option, +{ + fn make_header_value(&mut self, message: &T) -> Option { + self(message) + } +} + +impl MakeHeaderValue for HeaderValue { + fn make_header_value(&mut self, _message: &T) -> Option { + Some(self.clone()) + } +} + +impl MakeHeaderValue for Option { + fn make_header_value(&mut self, _message: &T) -> Option { + self.clone() + } +} + +#[derive(Debug, Clone, Copy)] +enum InsertHeaderMode { + Override, + Append, + IfNotPresent, +} + +impl InsertHeaderMode { + fn apply(self, header_name: &HeaderName, target: &mut T, make: &mut M) + where + T: Headers, + M: MakeHeaderValue, + { + match self { + InsertHeaderMode::Override => { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().insert(header_name.clone(), value); + } + } + InsertHeaderMode::IfNotPresent => { + if !target.headers().contains_key(header_name) { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().insert(header_name.clone(), value); + } + } + } + InsertHeaderMode::Append => { + if let Some(value) = make.make_header_value(target) { + target.headers_mut().append(header_name.clone(), value); + } + } + } + } +} + +trait Headers { + fn headers(&self) -> &HeaderMap; + + fn headers_mut(&mut self) -> &mut HeaderMap; +} + +impl Headers for Request { + fn headers(&self) -> &HeaderMap { + Request::headers(self) + } + + fn headers_mut(&mut self) -> &mut HeaderMap { + Request::headers_mut(self) + } +} + +impl Headers for Response { + fn headers(&self) -> &HeaderMap { + Response::headers(self) + } + + fn headers_mut(&mut self) -> &mut HeaderMap { + Response::headers_mut(self) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/request.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/request.rs new file mode 100644 index 0000000000000000000000000000000000000000..4032e23a786296caa127f9dba2c0135759d23950 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/request.rs @@ -0,0 +1,254 @@ +//! Set a header on the request. +//! +//! The header value to be set may be provided as a fixed value when the +//! middleware is constructed, or determined dynamically based on the request +//! by a closure. See the [`MakeHeaderValue`] trait for details. +//! +//! # Example +//! +//! Setting a header from a fixed value provided when the middleware is constructed: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetRequestHeaderLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let http_client = tower::service_fn(|_: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::::default())) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `User-Agent: my very cool app` on requests. +//! // +//! // `if_not_present` will only insert the header if it does not already +//! // have a value. +//! SetRequestHeaderLayer::if_not_present( +//! header::USER_AGENT, +//! HeaderValue::from_static("my very cool app"), +//! ) +//! ) +//! .service(http_client); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Setting a header based on a value determined dynamically from the request: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetRequestHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let http_client = tower::service_fn(|_: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::::default())) +//! # }); +//! fn date_header_value() -> HeaderValue { +//! // ... +//! # HeaderValue::from_static("now") +//! } +//! +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Date` to the current date and time. +//! // +//! // `overriding` will insert the header and override any previous values it +//! // may have. +//! SetRequestHeaderLayer::overriding( +//! header::DATE, +//! |request: &Request>| { +//! Some(date_header_value()) +//! } +//! ) +//! ) +//! .service(http_client); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! # +//! # Ok(()) +//! # } +//! ``` + +use super::{InsertHeaderMode, MakeHeaderValue}; +use http::{header::HeaderName, Request, Response}; +use std::{ + fmt, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetRequestHeader`] which adds a request header. +/// +/// See [`SetRequestHeader`] for more details. +pub struct SetRequestHeaderLayer { + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl fmt::Debug for SetRequestHeaderLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetRequestHeaderLayer") + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl SetRequestHeaderLayer { + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetRequestHeaderLayer`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + make, + header_name, + mode, + } + } +} + +impl Layer for SetRequestHeaderLayer +where + M: Clone, +{ + type Service = SetRequestHeader; + + fn layer(&self, inner: S) -> Self::Service { + SetRequestHeader { + inner, + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +impl Clone for SetRequestHeaderLayer +where + M: Clone, +{ + fn clone(&self) -> Self { + Self { + make: self.make.clone(), + header_name: self.header_name.clone(), + mode: self.mode, + } + } +} + +/// Middleware that sets a header on the request. +#[derive(Clone)] +pub struct SetRequestHeader { + inner: S, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl SetRequestHeader { + /// Create a new [`SetRequestHeader`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetRequestHeader`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetRequestHeader`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(inner: S, header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + inner, + header_name, + make, + mode, + } + } + + define_inner_service_accessors!(); +} + +impl fmt::Debug for SetRequestHeader +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetRequestHeader") + .field("inner", &self.inner) + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl Service> for SetRequestHeader +where + S: Service, Response = Response>, + M: MakeHeaderValue>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + self.mode.apply(&self.header_name, &mut req, &mut self.make); + self.inner.call(req) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/response.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/response.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7b8ea84407003945b974c5fc67691dd1f112674 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_header/response.rs @@ -0,0 +1,391 @@ +//! Set a header on the response. +//! +//! The header value to be set may be provided as a fixed value when the +//! middleware is constructed, or determined dynamically based on the response +//! by a closure. See the [`MakeHeaderValue`] trait for details. +//! +//! # Example +//! +//! Setting a header from a fixed value provided when the middleware is constructed: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetResponseHeaderLayer; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let render_html = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(request.into_body())) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Content-Type: text/html` on responses. +//! // +//! // `if_not_present` will only insert the header if it does not already +//! // have a value. +//! SetResponseHeaderLayer::if_not_present( +//! header::CONTENT_TYPE, +//! HeaderValue::from_static("text/html"), +//! ) +//! ) +//! .service(render_html); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["content-type"], "text/html"); +//! # +//! # Ok(()) +//! # } +//! ``` +//! +//! Setting a header based on a value determined dynamically from the response: +//! +//! ``` +//! use http::{Request, Response, header::{self, HeaderValue}}; +//! use tower::{Service, ServiceExt, ServiceBuilder}; +//! use tower_http::set_header::SetResponseHeaderLayer; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use http_body::Body as _; // for `Body::size_hint` +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # let render_html = tower::service_fn(|request: Request>| async move { +//! # Ok::<_, std::convert::Infallible>(Response::new(Full::from("1234567890"))) +//! # }); +//! # +//! let mut svc = ServiceBuilder::new() +//! .layer( +//! // Layer that sets `Content-Length` if the body has a known size. +//! // Bodies with streaming responses wont have a known size. +//! // +//! // `overriding` will insert the header and override any previous values it +//! // may have. +//! SetResponseHeaderLayer::overriding( +//! header::CONTENT_LENGTH, +//! |response: &Response>| { +//! if let Some(size) = response.body().size_hint().exact() { +//! // If the response body has a known size, returning `Some` will +//! // set the `Content-Length` header to that value. +//! Some(HeaderValue::from_str(&size.to_string()).unwrap()) +//! } else { +//! // If the response body doesn't have a known size, return `None` +//! // to skip setting the header on this response. +//! None +//! } +//! } +//! ) +//! ) +//! .service(render_html); +//! +//! let request = Request::new(Full::default()); +//! +//! let response = svc.ready().await?.call(request).await?; +//! +//! assert_eq!(response.headers()["content-length"], "10"); +//! # +//! # Ok(()) +//! # } +//! ``` + +use super::{InsertHeaderMode, MakeHeaderValue}; +use http::{header::HeaderName, Request, Response}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetResponseHeader`] which adds a response header. +/// +/// See [`SetResponseHeader`] for more details. +pub struct SetResponseHeaderLayer { + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl fmt::Debug for SetResponseHeaderLayer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetResponseHeaderLayer") + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl SetResponseHeaderLayer { + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetResponseHeaderLayer`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(header_name: HeaderName, make: M) -> Self { + Self::new(header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + make, + header_name, + mode, + } + } +} + +impl Layer for SetResponseHeaderLayer +where + M: Clone, +{ + type Service = SetResponseHeader; + + fn layer(&self, inner: S) -> Self::Service { + SetResponseHeader { + inner, + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +impl Clone for SetResponseHeaderLayer +where + M: Clone, +{ + fn clone(&self) -> Self { + Self { + make: self.make.clone(), + header_name: self.header_name.clone(), + mode: self.mode, + } + } +} + +/// Middleware that sets a header on the response. +#[derive(Clone)] +pub struct SetResponseHeader { + inner: S, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, +} + +impl SetResponseHeader { + /// Create a new [`SetResponseHeader`]. + /// + /// If a previous value exists for the same header, it is removed and replaced with the new + /// header value. + pub fn overriding(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Override) + } + + /// Create a new [`SetResponseHeader`]. + /// + /// The new header is always added, preserving any existing values. If previous values exist, + /// the header will have multiple values. + pub fn appending(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::Append) + } + + /// Create a new [`SetResponseHeader`]. + /// + /// If a previous value exists for the header, the new value is not inserted. + pub fn if_not_present(inner: S, header_name: HeaderName, make: M) -> Self { + Self::new(inner, header_name, make, InsertHeaderMode::IfNotPresent) + } + + fn new(inner: S, header_name: HeaderName, make: M, mode: InsertHeaderMode) -> Self { + Self { + inner, + header_name, + make, + mode, + } + } + + define_inner_service_accessors!(); +} + +impl fmt::Debug for SetResponseHeader +where + S: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SetResponseHeader") + .field("inner", &self.inner) + .field("header_name", &self.header_name) + .field("mode", &self.mode) + .field("make", &std::any::type_name::()) + .finish() + } +} + +impl Service> for SetResponseHeader +where + S: Service, Response = Response>, + M: MakeHeaderValue> + Clone, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + future: self.inner.call(req), + header_name: self.header_name.clone(), + make: self.make.clone(), + mode: self.mode, + } + } +} + +pin_project! { + /// Response future for [`SetResponseHeader`]. + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + future: F, + header_name: HeaderName, + make: M, + mode: InsertHeaderMode, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + M: MakeHeaderValue>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut res = ready!(this.future.poll(cx)?); + + this.mode.apply(this.header_name, &mut res, &mut *this.make); + + Poll::Ready(Ok(res)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use http::{header, HeaderValue}; + use std::convert::Infallible; + use tower::{service_fn, ServiceExt}; + + #[tokio::test] + async fn test_override_mode() { + let svc = SetResponseHeader::overriding( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_append_mode() { + let svc = SetResponseHeader::appending( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "good-content"); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_skip_if_present_mode() { + let svc = SetResponseHeader::if_not_present( + service_fn(|_req: Request| async { + let res = Response::builder() + .header(header::CONTENT_TYPE, "good-content") + .body(Body::empty()) + .unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "good-content"); + assert_eq!(values.next(), None); + } + + #[tokio::test] + async fn test_skip_if_present_mode_when_not_present() { + let svc = SetResponseHeader::if_not_present( + service_fn(|_req: Request| async { + let res = Response::builder().body(Body::empty()).unwrap(); + Ok::<_, Infallible>(res) + }), + header::CONTENT_TYPE, + HeaderValue::from_static("text/html"), + ); + + let res = svc.oneshot(Request::new(Body::empty())).await.unwrap(); + + let mut values = res.headers().get_all(header::CONTENT_TYPE).iter(); + assert_eq!(values.next().unwrap(), "text/html"); + assert_eq!(values.next(), None); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_status.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_status.rs new file mode 100644 index 0000000000000000000000000000000000000000..65f5405e47355737e3be72fac40439c00af18114 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/set_status.rs @@ -0,0 +1,137 @@ +//! Middleware to override status codes. +//! +//! # Example +//! +//! ``` +//! use tower_http::set_status::SetStatusLayer; +//! use http::{Request, Response, StatusCode}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use std::{iter::once, convert::Infallible}; +//! use tower::{ServiceBuilder, Service, ServiceExt}; +//! +//! async fn handle(req: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let mut service = ServiceBuilder::new() +//! // change the status to `404 Not Found` regardless what the inner service returns +//! .layer(SetStatusLayer::new(StatusCode::NOT_FOUND)) +//! .service_fn(handle); +//! +//! // Call the service. +//! let request = Request::builder().body(Full::default())?; +//! +//! let response = service.ready().await?.call(request).await?; +//! +//! assert_eq!(response.status(), StatusCode::NOT_FOUND); +//! # +//! # Ok(()) +//! # } +//! ``` + +use http::{Request, Response, StatusCode}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`SetStatus`] which overrides the status codes. +#[derive(Debug, Clone, Copy)] +pub struct SetStatusLayer { + status: StatusCode, +} + +impl SetStatusLayer { + /// Create a new [`SetStatusLayer`]. + /// + /// The response status code will be `status` regardless of what the inner service returns. + pub fn new(status: StatusCode) -> Self { + SetStatusLayer { status } + } +} + +impl Layer for SetStatusLayer { + type Service = SetStatus; + + fn layer(&self, inner: S) -> Self::Service { + SetStatus::new(inner, self.status) + } +} + +/// Middleware to override status codes. +/// +/// See the [module docs](self) for more details. +#[derive(Debug, Clone, Copy)] +pub struct SetStatus { + inner: S, + status: StatusCode, +} + +impl SetStatus { + /// Create a new [`SetStatus`]. + /// + /// The response status code will be `status` regardless of what the inner service returns. + pub fn new(inner: S, status: StatusCode) -> Self { + Self { status, inner } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `SetStatus` middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(status: StatusCode) -> SetStatusLayer { + SetStatusLayer::new(status) + } +} + +impl Service> for SetStatus +where + S: Service, Response = Response>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseFuture { + inner: self.inner.call(req), + status: Some(self.status), + } + } +} + +pin_project! { + /// Response future for [`SetStatus`]. + pub struct ResponseFuture { + #[pin] + inner: F, + status: Option, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let mut response = ready!(this.inner.poll(cx)?); + *response.status_mut() = this.status.take().expect("future polled after completion"); + Poll::Ready(Ok(response)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/test_helpers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/test_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..af28463c3628c6b7a1b46aaeadedb306354bc35f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/test_helpers.rs @@ -0,0 +1,165 @@ +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures_util::TryStream; +use http::HeaderMap; +use http_body::Frame; +use http_body_util::BodyExt; +use pin_project_lite::pin_project; +use sync_wrapper::SyncWrapper; +use tower::BoxError; + +type BoxBody = http_body_util::combinators::UnsyncBoxBody; + +#[derive(Debug)] +pub(crate) struct Body(BoxBody); + +impl Body { + pub(crate) fn new(body: B) -> Self + where + B: http_body::Body + Send + 'static, + B::Error: Into, + { + Self(body.map_err(Into::into).boxed_unsync()) + } + + pub(crate) fn empty() -> Self { + Self::new(http_body_util::Empty::new()) + } + + pub(crate) fn from_stream(stream: S) -> Self + where + S: TryStream + Send + 'static, + S::Ok: Into, + S::Error: Into, + { + Self::new(StreamBody { + stream: SyncWrapper::new(stream), + }) + } + + pub(crate) fn with_trailers(self, trailers: HeaderMap) -> WithTrailers { + WithTrailers { + inner: self, + trailers: Some(trailers), + } + } +} + +impl Default for Body { + fn default() -> Self { + Self::empty() + } +} + +macro_rules! body_from_impl { + ($ty:ty) => { + impl From<$ty> for Body { + fn from(buf: $ty) -> Self { + Self::new(http_body_util::Full::from(buf)) + } + } + }; +} + +body_from_impl!(&'static [u8]); +body_from_impl!(std::borrow::Cow<'static, [u8]>); +body_from_impl!(Vec); + +body_from_impl!(&'static str); +body_from_impl!(std::borrow::Cow<'static, str>); +body_from_impl!(String); + +body_from_impl!(Bytes); + +impl http_body::Body for Body { + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + Pin::new(&mut self.0).poll_frame(cx) + } + + fn size_hint(&self) -> http_body::SizeHint { + self.0.size_hint() + } + + fn is_end_stream(&self) -> bool { + self.0.is_end_stream() + } +} + +pin_project! { + struct StreamBody { + #[pin] + stream: SyncWrapper, + } +} + +impl http_body::Body for StreamBody +where + S: TryStream, + S::Ok: Into, + S::Error: Into, +{ + type Data = Bytes; + type Error = BoxError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let stream = self.project().stream.get_pin_mut(); + match std::task::ready!(stream.try_poll_next(cx)) { + Some(Ok(chunk)) => Poll::Ready(Some(Ok(Frame::data(chunk.into())))), + Some(Err(err)) => Poll::Ready(Some(Err(err.into()))), + None => Poll::Ready(None), + } + } +} + +pub(crate) async fn to_bytes(body: T) -> Result +where + T: http_body::Body, +{ + Ok(body.collect().await?.to_bytes()) +} + +pin_project! { + pub(crate) struct WithTrailers { + #[pin] + inner: B, + trailers: Option, + } +} + +impl http_body::Body for WithTrailers +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + match std::task::ready!(this.inner.poll_frame(cx)) { + Some(frame) => Poll::Ready(Some(frame)), + None => { + if let Some(trailers) = this.trailers.take() { + Poll::Ready(Some(Ok(Frame::trailers(trailers)))) + } else { + Poll::Ready(None) + } + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..d44f35b8cfd3db45c944f09d98901dcc86c3d505 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/body.rs @@ -0,0 +1,193 @@ +use crate::BoxError; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; +use tokio::time::{sleep, Sleep}; + +pin_project! { + /// Middleware that applies a timeout to request and response bodies. + /// + /// Wrapper around a [`Body`][`http_body::Body`] to time out if data is not ready within the specified duration. + /// The timeout is enforced between consecutive [`Frame`][`http_body::Frame`] polls, and it + /// resets after each poll. + /// The total time to produce a [`Body`][`http_body::Body`] could exceed the timeout duration without + /// timing out, as long as no single interval between polls exceeds the timeout. + /// + /// If the [`Body`][`http_body::Body`] does not produce a requested data frame within the timeout period, it will return a [`TimeoutError`]. + /// + /// # Differences from [`Timeout`][crate::timeout::Timeout] + /// + /// [`Timeout`][crate::timeout::Timeout] applies a timeout to the request future, not body. + /// That timeout is not reset when bytes are handled, whether the request is active or not. + /// Bodies are handled asynchronously outside of the tower stack's future and thus needs an additional timeout. + /// + /// # Example + /// + /// ``` + /// use http::{Request, Response}; + /// use bytes::Bytes; + /// use http_body_util::Full; + /// use std::time::Duration; + /// use tower::ServiceBuilder; + /// use tower_http::timeout::RequestBodyTimeoutLayer; + /// + /// async fn handle(_: Request>) -> Result>, std::convert::Infallible> { + /// // ... + /// # todo!() + /// } + /// + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// let svc = ServiceBuilder::new() + /// // Timeout bodies after 30 seconds of inactivity + /// .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(30))) + /// .service_fn(handle); + /// # Ok(()) + /// # } + /// ``` + pub struct TimeoutBody { + timeout: Duration, + #[pin] + sleep: Option, + #[pin] + body: B, + } +} + +impl TimeoutBody { + /// Creates a new [`TimeoutBody`]. + pub fn new(timeout: Duration, body: B) -> Self { + TimeoutBody { + timeout, + sleep: None, + body, + } + } +} + +impl Body for TimeoutBody +where + B: Body, + B::Error: Into, +{ + type Data = B::Data; + type Error = Box; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + // Start the `Sleep` if not active. + let sleep_pinned = if let Some(some) = this.sleep.as_mut().as_pin_mut() { + some + } else { + this.sleep.set(Some(sleep(*this.timeout))); + this.sleep.as_mut().as_pin_mut().unwrap() + }; + + // Error if the timeout has expired. + if let Poll::Ready(()) = sleep_pinned.poll(cx) { + return Poll::Ready(Some(Err(Box::new(TimeoutError(()))))); + } + + // Check for body data. + let frame = ready!(this.body.poll_frame(cx)); + // A frame is ready. Reset the `Sleep`... + this.sleep.set(None); + + Poll::Ready(frame.transpose().map_err(Into::into).transpose()) + } +} + +/// Error for [`TimeoutBody`]. +#[derive(Debug)] +pub struct TimeoutError(()); + +impl std::error::Error for TimeoutError {} + +impl std::fmt::Display for TimeoutError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "data was not received within the designated timeout") + } +} +#[cfg(test)] +mod tests { + use super::*; + + use bytes::Bytes; + use http_body::Frame; + use http_body_util::BodyExt; + use pin_project_lite::pin_project; + use std::{error::Error, fmt::Display}; + + #[derive(Debug)] + struct MockError; + + impl Error for MockError {} + + impl Display for MockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "mock error") + } + } + + pin_project! { + struct MockBody { + #[pin] + sleep: Sleep + } + } + + impl Body for MockBody { + type Data = Bytes; + type Error = MockError; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + this.sleep + .poll(cx) + .map(|_| Some(Ok(Frame::data(vec![].into())))) + } + } + + #[tokio::test] + async fn test_body_available_within_timeout() { + let mock_sleep = Duration::from_secs(1); + let timeout_sleep = Duration::from_secs(2); + + let mock_body = MockBody { + sleep: sleep(mock_sleep), + }; + let timeout_body = TimeoutBody::new(timeout_sleep, mock_body); + + assert!(timeout_body + .boxed() + .frame() + .await + .expect("no frame") + .is_ok()); + } + + #[tokio::test] + async fn test_body_unavailable_within_timeout_error() { + let mock_sleep = Duration::from_secs(2); + let timeout_sleep = Duration::from_secs(1); + + let mock_body = MockBody { + sleep: sleep(mock_sleep), + }; + let timeout_body = TimeoutBody::new(timeout_sleep, mock_body); + + assert!(timeout_body.boxed().frame().await.unwrap().is_err()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e159b23cc4e09b91a5072e9863e42a1422e0d267 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/mod.rs @@ -0,0 +1,50 @@ +//! Middleware that applies a timeout to requests. +//! +//! If the request does not complete within the specified timeout, it will be aborted and a +//! response with an empty body and a custom status code will be returned. +//! +//! # Differences from `tower::timeout` +//! +//! tower's [`Timeout`](tower::timeout::Timeout) middleware uses an error to signal timeout, i.e. +//! it changes the error type to [`BoxError`](tower::BoxError). For HTTP services that is rarely +//! what you want as returning errors will terminate the connection without sending a response. +//! +//! This middleware won't change the error type and instead returns a response with an empty body +//! and the specified status code. That means if your service's error type is [`Infallible`], it will +//! still be [`Infallible`] after applying this middleware. +//! +//! # Example +//! +//! ``` +//! use http::{Request, Response, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use std::{convert::Infallible, time::Duration}; +//! use tower::ServiceBuilder; +//! use tower_http::timeout::TimeoutLayer; +//! +//! async fn handle(_: Request>) -> Result>, Infallible> { +//! // ... +//! # Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! let svc = ServiceBuilder::new() +//! // Timeout requests after 30 seconds with the specified status code +//! .layer(TimeoutLayer::with_status_code(StatusCode::REQUEST_TIMEOUT, Duration::from_secs(30))) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`Infallible`]: std::convert::Infallible + +mod body; +mod service; + +pub use body::{TimeoutBody, TimeoutError}; +pub use service::{ + RequestBodyTimeout, RequestBodyTimeoutLayer, ResponseBodyTimeout, ResponseBodyTimeoutLayer, + Timeout, TimeoutLayer, +}; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..68ea56ef37989be3fc92ff8dbcb48026b55fa42e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/timeout/service.rs @@ -0,0 +1,396 @@ +use crate::timeout::body::TimeoutBody; +use http::{Request, Response, StatusCode}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; +use tokio::time::Sleep; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies the [`Timeout`] middleware which apply a timeout to requests. +/// +/// See the [module docs](super) for an example. +#[derive(Debug, Clone, Copy)] +pub struct TimeoutLayer { + timeout: Duration, + status_code: StatusCode, +} + +impl TimeoutLayer { + /// Creates a new [`TimeoutLayer`]. + /// + /// By default, it will return a `408 Request Timeout` response if the request does not complete within the specified timeout. + /// To customize the response status code, use the `with_status_code` method. + #[deprecated(since = "0.6.7", note = "Use `TimeoutLayer::with_status_code` instead")] + pub fn new(timeout: Duration) -> Self { + Self::with_status_code(StatusCode::REQUEST_TIMEOUT, timeout) + } + + /// Creates a new [`TimeoutLayer`] with the specified status code for the timeout response. + pub fn with_status_code(status_code: StatusCode, timeout: Duration) -> Self { + Self { + timeout, + status_code, + } + } +} + +impl Layer for TimeoutLayer { + type Service = Timeout; + + fn layer(&self, inner: S) -> Self::Service { + Timeout::with_status_code(inner, self.status_code, self.timeout) + } +} + +/// Middleware which apply a timeout to requests. +/// +/// See the [module docs](super) for an example. +#[derive(Debug, Clone, Copy)] +pub struct Timeout { + inner: S, + timeout: Duration, + status_code: StatusCode, +} + +impl Timeout { + /// Creates a new [`Timeout`]. + /// + /// By default, it will return a `408 Request Timeout` response if the request does not complete within the specified timeout. + /// To customize the response status code, use the `with_status_code` method. + #[deprecated(since = "0.6.7", note = "Use `Timeout::with_status_code` instead")] + pub fn new(inner: S, timeout: Duration) -> Self { + Self::with_status_code(inner, StatusCode::REQUEST_TIMEOUT, timeout) + } + + /// Creates a new [`Timeout`] with the specified status code for the timeout response. + pub fn with_status_code(inner: S, status_code: StatusCode, timeout: Duration) -> Self { + Self { + inner, + timeout, + status_code, + } + } + + define_inner_service_accessors!(); + + /// Returns a new [`Layer`] that wraps services with a `Timeout` middleware. + /// + /// [`Layer`]: tower_layer::Layer + #[deprecated( + since = "0.6.7", + note = "Use `Timeout::layer_with_status_code` instead" + )] + pub fn layer(timeout: Duration) -> TimeoutLayer { + TimeoutLayer::with_status_code(StatusCode::REQUEST_TIMEOUT, timeout) + } + + /// Returns a new [`Layer`] that wraps services with a `Timeout` middleware with the specified status code. + pub fn layer_with_status_code(status_code: StatusCode, timeout: Duration) -> TimeoutLayer { + TimeoutLayer::with_status_code(status_code, timeout) + } +} + +impl Service> for Timeout +where + S: Service, Response = Response>, + ResBody: Default, +{ + type Response = S::Response; + type Error = S::Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let sleep = tokio::time::sleep(self.timeout); + ResponseFuture { + inner: self.inner.call(req), + sleep, + status_code: self.status_code, + } + } +} + +pin_project! { + /// Response future for [`Timeout`]. + pub struct ResponseFuture { + #[pin] + inner: F, + #[pin] + sleep: Sleep, + status_code: StatusCode, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, + B: Default, +{ + type Output = Result, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + if this.sleep.poll(cx).is_ready() { + let mut res = Response::new(B::default()); + *res.status_mut() = *this.status_code; + return Poll::Ready(Ok(res)); + } + + this.inner.poll(cx) + } +} + +/// Applies a [`TimeoutBody`] to the request body. +#[derive(Clone, Debug)] +pub struct RequestBodyTimeoutLayer { + timeout: Duration, +} + +impl RequestBodyTimeoutLayer { + /// Creates a new [`RequestBodyTimeoutLayer`]. + pub fn new(timeout: Duration) -> Self { + Self { timeout } + } +} + +impl Layer for RequestBodyTimeoutLayer { + type Service = RequestBodyTimeout; + + fn layer(&self, inner: S) -> Self::Service { + RequestBodyTimeout::new(inner, self.timeout) + } +} + +/// Applies a [`TimeoutBody`] to the request body. +#[derive(Clone, Debug)] +pub struct RequestBodyTimeout { + inner: S, + timeout: Duration, +} + +impl RequestBodyTimeout { + /// Creates a new [`RequestBodyTimeout`]. + pub fn new(service: S, timeout: Duration) -> Self { + Self { + inner: service, + timeout, + } + } + + /// Returns a new [`Layer`] that wraps services with a [`RequestBodyTimeoutLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(timeout: Duration) -> RequestBodyTimeoutLayer { + RequestBodyTimeoutLayer::new(timeout) + } + + define_inner_service_accessors!(); +} + +impl Service> for RequestBodyTimeout +where + S: Service>>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req = req.map(|body| TimeoutBody::new(self.timeout, body)); + self.inner.call(req) + } +} + +/// Applies a [`TimeoutBody`] to the response body. +#[derive(Clone)] +pub struct ResponseBodyTimeoutLayer { + timeout: Duration, +} + +impl ResponseBodyTimeoutLayer { + /// Creates a new [`ResponseBodyTimeoutLayer`]. + pub fn new(timeout: Duration) -> Self { + Self { timeout } + } +} + +impl Layer for ResponseBodyTimeoutLayer { + type Service = ResponseBodyTimeout; + + fn layer(&self, inner: S) -> Self::Service { + ResponseBodyTimeout::new(inner, self.timeout) + } +} + +/// Applies a [`TimeoutBody`] to the response body. +#[derive(Clone)] +pub struct ResponseBodyTimeout { + inner: S, + timeout: Duration, +} + +impl Service> for ResponseBodyTimeout +where + S: Service, Response = Response>, +{ + type Response = Response>; + type Error = S::Error; + type Future = ResponseBodyTimeoutFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + ResponseBodyTimeoutFuture { + inner: self.inner.call(req), + timeout: self.timeout, + } + } +} + +impl ResponseBodyTimeout { + /// Creates a new [`ResponseBodyTimeout`]. + pub fn new(service: S, timeout: Duration) -> Self { + Self { + inner: service, + timeout, + } + } + + /// Returns a new [`Layer`] that wraps services with a [`ResponseBodyTimeoutLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(timeout: Duration) -> ResponseBodyTimeoutLayer { + ResponseBodyTimeoutLayer::new(timeout) + } + + define_inner_service_accessors!(); +} + +pin_project! { + /// Response future for [`ResponseBodyTimeout`]. + pub struct ResponseBodyTimeoutFuture { + #[pin] + inner: Fut, + timeout: Duration, + } +} + +impl Future for ResponseBodyTimeoutFuture +where + Fut: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let timeout = self.timeout; + let this = self.project(); + let res = ready!(this.inner.poll(cx))?; + Poll::Ready(Ok(res.map(|body| TimeoutBody::new(timeout, body)))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_helpers::Body; + use http::{Request, Response, StatusCode}; + use std::time::Duration; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn request_completes_within_timeout() { + let mut service = ServiceBuilder::new() + .layer(TimeoutLayer::with_status_code( + StatusCode::GATEWAY_TIMEOUT, + Duration::from_secs(1), + )) + .service_fn(fast_handler); + + let request = Request::get("/").body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn timeout_middleware_with_custom_status_code() { + let timeout_service = Timeout::with_status_code( + tower::service_fn(slow_handler), + StatusCode::REQUEST_TIMEOUT, + Duration::from_millis(10), + ); + + let mut service = ServiceBuilder::new().service(timeout_service); + + let request = Request::get("/").body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); + } + + #[tokio::test] + async fn timeout_response_has_empty_body() { + let mut service = ServiceBuilder::new() + .layer(TimeoutLayer::with_status_code( + StatusCode::GATEWAY_TIMEOUT, + Duration::from_millis(10), + )) + .service_fn(slow_handler); + + let request = Request::get("/").body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::GATEWAY_TIMEOUT); + + // Verify the body is empty (default) + use http_body_util::BodyExt; + let body = res.into_body(); + let bytes = body.collect().await.unwrap().to_bytes(); + assert!(bytes.is_empty()); + } + + #[tokio::test] + async fn deprecated_new_method_compatibility() { + #[allow(deprecated)] + let layer = TimeoutLayer::new(Duration::from_millis(10)); + + let mut service = ServiceBuilder::new().layer(layer).service_fn(slow_handler); + + let request = Request::get("/").body(Body::empty()).unwrap(); + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + // Should use default 408 status code + assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT); + } + + async fn slow_handler(_req: Request) -> Result, BoxError> { + tokio::time::sleep(Duration::from_secs(10)).await; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty()) + .unwrap()) + } + + async fn fast_handler(_req: Request) -> Result, BoxError> { + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty()) + .unwrap()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/body.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/body.rs new file mode 100644 index 0000000000000000000000000000000000000000..d713f2432c4045c597933a5f1d26fce2245fe2b2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/body.rs @@ -0,0 +1,102 @@ +use super::{DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, OnBodyChunk, OnEos, OnFailure}; +use crate::classify::ClassifyEos; +use http_body::{Body, Frame}; +use pin_project_lite::pin_project; +use std::{ + fmt, + pin::Pin, + task::{ready, Context, Poll}, + time::Instant, +}; +use tracing::Span; + +pin_project! { + /// Response body for [`Trace`]. + /// + /// [`Trace`]: super::Trace + pub struct ResponseBody { + #[pin] + pub(crate) inner: B, + pub(crate) classify_eos: Option, + pub(crate) on_eos: Option<(OnEos, Instant)>, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_failure: Option, + pub(crate) start: Instant, + pub(crate) span: Span, + } +} + +impl Body + for ResponseBody +where + B: Body, + B::Error: fmt::Display + 'static, + C: ClassifyEos, + OnEosT: OnEos, + OnBodyChunkT: OnBodyChunk, + OnFailureT: OnFailure, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + let _guard = this.span.enter(); + let result = ready!(this.inner.poll_frame(cx)); + + let latency = this.start.elapsed(); + *this.start = Instant::now(); + + match result { + Some(Ok(frame)) => { + let frame = match frame.into_data() { + Ok(chunk) => { + this.on_body_chunk.on_body_chunk(&chunk, latency, this.span); + Frame::data(chunk) + } + Err(frame) => frame, + }; + + let frame = match frame.into_trailers() { + Ok(trailers) => { + if let Some((on_eos, stream_start)) = this.on_eos.take() { + on_eos.on_eos(Some(&trailers), stream_start.elapsed(), this.span); + } + Frame::trailers(trailers) + } + Err(frame) => frame, + }; + + Poll::Ready(Some(Ok(frame))) + } + Some(Err(err)) => { + if let Some((classify_eos, mut on_failure)) = + this.classify_eos.take().zip(this.on_failure.take()) + { + let failure_class = classify_eos.classify_error(&err); + on_failure.on_failure(failure_class, latency, this.span); + } + + Poll::Ready(Some(Err(err))) + } + None => { + if let Some((on_eos, stream_start)) = this.on_eos.take() { + on_eos.on_eos(None, stream_start.elapsed(), this.span); + } + + Poll::Ready(None) + } + } + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/future.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/future.rs new file mode 100644 index 0000000000000000000000000000000000000000..6a73b887aeed81097a5f9031e2abc15445cc903c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/future.rs @@ -0,0 +1,116 @@ +use super::{ + DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnResponse, OnBodyChunk, OnEos, + OnFailure, OnResponse, ResponseBody, +}; +use crate::classify::{ClassifiedResponse, ClassifyResponse}; +use http::Response; +use http_body::Body; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, + time::Instant, +}; +use tracing::Span; + +pin_project! { + /// Response future for [`Trace`]. + /// + /// [`Trace`]: super::Trace + pub struct ResponseFuture { + #[pin] + pub(crate) inner: F, + pub(crate) span: Span, + pub(crate) classifier: Option, + pub(crate) on_response: Option, + pub(crate) on_body_chunk: Option, + pub(crate) on_eos: Option, + pub(crate) on_failure: Option, + pub(crate) start: Instant, + } +} + +impl Future + for ResponseFuture +where + Fut: Future, E>>, + ResBody: Body, + ResBody::Error: std::fmt::Display + 'static, + E: std::fmt::Display + 'static, + C: ClassifyResponse, + OnResponseT: OnResponse, + OnFailureT: OnFailure, + OnBodyChunkT: OnBodyChunk, + OnEosT: OnEos, +{ + type Output = Result< + Response>, + E, + >; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let _guard = this.span.enter(); + let result = ready!(this.inner.poll(cx)); + let latency = this.start.elapsed(); + + let classifier = this.classifier.take().unwrap(); + let on_eos = this.on_eos.take(); + let on_body_chunk = this.on_body_chunk.take().unwrap(); + let mut on_failure = this.on_failure.take().unwrap(); + + match result { + Ok(res) => { + let classification = classifier.classify_response(&res); + let start = *this.start; + + this.on_response + .take() + .unwrap() + .on_response(&res, latency, this.span); + + match classification { + ClassifiedResponse::Ready(classification) => { + if let Err(failure_class) = classification { + on_failure.on_failure(failure_class, latency, this.span); + } + + let span = this.span.clone(); + let res = res.map(|body| ResponseBody { + inner: body, + classify_eos: None, + on_eos: on_eos.zip(Some(Instant::now())), + on_body_chunk, + on_failure: Some(on_failure), + start, + span, + }); + + Poll::Ready(Ok(res)) + } + ClassifiedResponse::RequiresEos(classify_eos) => { + let span = this.span.clone(); + let res = res.map(|body| ResponseBody { + inner: body, + classify_eos: Some(classify_eos), + on_eos: on_eos.zip(Some(Instant::now())), + on_body_chunk, + on_failure: Some(on_failure), + start, + span, + }); + + Poll::Ready(Ok(res)) + } + } + } + Err(err) => { + let failure_class = classifier.classify_error(&err); + on_failure.on_failure(failure_class, latency, this.span); + + Poll::Ready(Err(err)) + } + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/layer.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/layer.rs new file mode 100644 index 0000000000000000000000000000000000000000..21ff321c1d785beda9faf4dbfda4c67c8decddbf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/layer.rs @@ -0,0 +1,236 @@ +use super::{ + DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnRequest, + DefaultOnResponse, GrpcMakeClassifier, HttpMakeClassifier, Trace, +}; +use crate::classify::{ + GrpcErrorsAsFailures, MakeClassifier, ServerErrorsAsFailures, SharedClassifier, +}; +use tower_layer::Layer; + +/// [`Layer`] that adds high level [tracing] to a [`Service`]. +/// +/// See the [module docs](crate::trace) for more details. +/// +/// [`Layer`]: tower_layer::Layer +/// [tracing]: https://crates.io/crates/tracing +/// [`Service`]: tower_service::Service +#[derive(Debug, Copy, Clone)] +pub struct TraceLayer< + M, + MakeSpan = DefaultMakeSpan, + OnRequest = DefaultOnRequest, + OnResponse = DefaultOnResponse, + OnBodyChunk = DefaultOnBodyChunk, + OnEos = DefaultOnEos, + OnFailure = DefaultOnFailure, +> { + pub(crate) make_classifier: M, + pub(crate) make_span: MakeSpan, + pub(crate) on_request: OnRequest, + pub(crate) on_response: OnResponse, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_eos: OnEos, + pub(crate) on_failure: OnFailure, +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using the given [`MakeClassifier`]. + pub fn new(make_classifier: M) -> Self + where + M: MakeClassifier, + { + Self { + make_classifier, + make_span: DefaultMakeSpan::new(), + on_failure: DefaultOnFailure::default(), + on_request: DefaultOnRequest::default(), + on_eos: DefaultOnEos::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_response: DefaultOnResponse::default(), + } + } +} + +impl + TraceLayer +{ + /// Customize what to do when a request is received. + /// + /// `NewOnRequest` is expected to implement [`OnRequest`]. + /// + /// [`OnRequest`]: super::OnRequest + pub fn on_request( + self, + new_on_request: NewOnRequest, + ) -> TraceLayer { + TraceLayer { + on_request: new_on_request, + on_failure: self.on_failure, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been produced. + /// + /// `NewOnResponse` is expected to implement [`OnResponse`]. + /// + /// [`OnResponse`]: super::OnResponse + pub fn on_response( + self, + new_on_response: NewOnResponse, + ) -> TraceLayer { + TraceLayer { + on_response: new_on_response, + on_request: self.on_request, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + on_failure: self.on_failure, + make_span: self.make_span, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a body chunk has been sent. + /// + /// `NewOnBodyChunk` is expected to implement [`OnBodyChunk`]. + /// + /// [`OnBodyChunk`]: super::OnBodyChunk + pub fn on_body_chunk( + self, + new_on_body_chunk: NewOnBodyChunk, + ) -> TraceLayer { + TraceLayer { + on_body_chunk: new_on_body_chunk, + on_eos: self.on_eos, + on_failure: self.on_failure, + on_request: self.on_request, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a streaming response has closed. + /// + /// `NewOnEos` is expected to implement [`OnEos`]. + /// + /// [`OnEos`]: super::OnEos + pub fn on_eos( + self, + new_on_eos: NewOnEos, + ) -> TraceLayer { + TraceLayer { + on_eos: new_on_eos, + on_body_chunk: self.on_body_chunk, + on_failure: self.on_failure, + on_request: self.on_request, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been classified as a failure. + /// + /// `NewOnFailure` is expected to implement [`OnFailure`]. + /// + /// [`OnFailure`]: super::OnFailure + pub fn on_failure( + self, + new_on_failure: NewOnFailure, + ) -> TraceLayer { + TraceLayer { + on_failure: new_on_failure, + on_request: self.on_request, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize how to make [`Span`]s that all request handling will be wrapped in. + /// + /// `NewMakeSpan` is expected to implement [`MakeSpan`]. + /// + /// [`MakeSpan`]: super::MakeSpan + /// [`Span`]: tracing::Span + pub fn make_span_with( + self, + new_make_span: NewMakeSpan, + ) -> TraceLayer { + TraceLayer { + make_span: new_make_span, + on_request: self.on_request, + on_failure: self.on_failure, + on_body_chunk: self.on_body_chunk, + on_eos: self.on_eos, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using [`ServerErrorsAsFailures`] which supports classifying + /// regular HTTP responses based on the status code. + pub fn new_for_http() -> Self { + Self { + make_classifier: SharedClassifier::new(ServerErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_response: DefaultOnResponse::default(), + on_request: DefaultOnRequest::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl TraceLayer { + /// Create a new [`TraceLayer`] using [`GrpcErrorsAsFailures`] which supports classifying + /// gRPC responses and streams based on the `grpc-status` header. + pub fn new_for_grpc() -> Self { + Self { + make_classifier: SharedClassifier::new(GrpcErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_response: DefaultOnResponse::default(), + on_request: DefaultOnRequest::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl Layer + for TraceLayer +where + M: Clone, + MakeSpan: Clone, + OnRequest: Clone, + OnResponse: Clone, + OnEos: Clone, + OnBodyChunk: Clone, + OnFailure: Clone, +{ + type Service = Trace; + + fn layer(&self, inner: S) -> Self::Service { + Trace { + inner, + make_classifier: self.make_classifier.clone(), + make_span: self.make_span.clone(), + on_request: self.on_request.clone(), + on_eos: self.on_eos.clone(), + on_body_chunk: self.on_body_chunk.clone(), + on_response: self.on_response.clone(), + on_failure: self.on_failure.clone(), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/make_span.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/make_span.rs new file mode 100644 index 0000000000000000000000000000000000000000..bf558d3b36e0f7bda7a162beabfd1c661c014cd1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/make_span.rs @@ -0,0 +1,113 @@ +use http::Request; +use tracing::{Level, Span}; + +use super::DEFAULT_MESSAGE_LEVEL; + +/// Trait used to generate [`Span`]s from requests. [`Trace`] wraps all request handling in this +/// span. +/// +/// [`Span`]: tracing::Span +/// [`Trace`]: super::Trace +pub trait MakeSpan { + /// Make a span from a request. + fn make_span(&mut self, request: &Request) -> Span; +} + +impl MakeSpan for Span { + fn make_span(&mut self, _request: &Request) -> Span { + self.clone() + } +} + +impl MakeSpan for F +where + F: FnMut(&Request) -> Span, +{ + fn make_span(&mut self, request: &Request) -> Span { + self(request) + } +} + +/// The default way [`Span`]s will be created for [`Trace`]. +/// +/// [`Span`]: tracing::Span +/// [`Trace`]: super::Trace +#[derive(Debug, Clone)] +pub struct DefaultMakeSpan { + level: Level, + include_headers: bool, +} + +impl DefaultMakeSpan { + /// Create a new `DefaultMakeSpan`. + pub fn new() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + include_headers: false, + } + } + + /// Set the [`Level`] used for the [tracing span]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing span]: https://docs.rs/tracing/latest/tracing/#spans + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Include request headers on the [`Span`]. + /// + /// By default headers are not included. + /// + /// [`Span`]: tracing::Span + pub fn include_headers(mut self, include_headers: bool) -> Self { + self.include_headers = include_headers; + self + } +} + +impl Default for DefaultMakeSpan { + fn default() -> Self { + Self::new() + } +} + +impl MakeSpan for DefaultMakeSpan { + fn make_span(&mut self, request: &Request) -> Span { + // This ugly macro is needed, unfortunately, because `tracing::span!` + // required the level argument to be static. Meaning we can't just pass + // `self.level`. + macro_rules! make_span { + ($level:expr) => { + if self.include_headers { + tracing::span!( + $level, + "request", + method = %request.method(), + uri = %request.uri(), + version = ?request.version(), + headers = ?request.headers(), + ) + } else { + tracing::span!( + $level, + "request", + method = %request.method(), + uri = %request.uri(), + version = ?request.version(), + ) + } + } + } + + match self.level { + Level::ERROR => make_span!(Level::ERROR), + Level::WARN => make_span!(Level::WARN), + Level::INFO => make_span!(Level::INFO), + Level::DEBUG => make_span!(Level::DEBUG), + Level::TRACE => make_span!(Level::TRACE), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..950ee4a7e52f3e64333df2aa70f4c5a46fcd6e39 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/mod.rs @@ -0,0 +1,635 @@ +//! Middleware that adds high level [tracing] to a [`Service`]. +//! +//! # Example +//! +//! Adding tracing to your service can be as simple as: +//! +//! ```rust +//! use http::{Request, Response}; +//! use tower::{ServiceBuilder, ServiceExt, Service}; +//! use tower_http::trace::TraceLayer; +//! use std::convert::Infallible; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! +//! async fn handle(request: Request>) -> Result>, Infallible> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Setup tracing +//! tracing_subscriber::fmt::init(); +//! +//! let mut service = ServiceBuilder::new() +//! .layer(TraceLayer::new_for_http()) +//! .service_fn(handle); +//! +//! let request = Request::new(Full::from("foo")); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! If you run this application with `RUST_LOG=tower_http=trace cargo run` you should see logs like: +//! +//! ```text +//! Mar 05 20:50:28.523 DEBUG request{method=GET path="/foo"}: tower_http::trace::on_request: started processing request +//! Mar 05 20:50:28.524 DEBUG request{method=GET path="/foo"}: tower_http::trace::on_response: finished processing request latency=1 ms status=200 +//! ``` +//! +//! # Customization +//! +//! [`Trace`] comes with good defaults but also supports customizing many aspects of the output. +//! +//! The default behaviour supports some customization: +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tracing::Level; +//! use tower_http::{ +//! LatencyUnit, +//! trace::{TraceLayer, DefaultMakeSpan, DefaultOnRequest, DefaultOnResponse}, +//! }; +//! use std::time::Duration; +//! # use tower::{ServiceExt, Service}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with( +//! DefaultMakeSpan::new().include_headers(true) +//! ) +//! .on_request( +//! DefaultOnRequest::new().level(Level::INFO) +//! ) +//! .on_response( +//! DefaultOnResponse::new() +//! .level(Level::INFO) +//! .latency_unit(LatencyUnit::Micros) +//! ) +//! // on so on for `on_eos`, `on_body_chunk`, and `on_failure` +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! However for maximum control you can provide callbacks: +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::{classify::ServerErrorsFailureClass, trace::TraceLayer}; +//! use std::time::Duration; +//! use tracing::Span; +//! # use tower::{ServiceExt, Service}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(|request: &Request>| { +//! tracing::debug_span!("http-request") +//! }) +//! .on_request(|request: &Request>, _span: &Span| { +//! tracing::debug!("started {} {}", request.method(), request.uri().path()) +//! }) +//! .on_response(|response: &Response>, latency: Duration, _span: &Span| { +//! tracing::debug!("response generated in {:?}", latency) +//! }) +//! .on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| { +//! tracing::debug!("sending {} bytes", chunk.len()) +//! }) +//! .on_eos(|trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| { +//! tracing::debug!("stream closed after {:?}", stream_duration) +//! }) +//! .on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| { +//! tracing::debug!("something went wrong") +//! }) +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Disabling something +//! +//! Setting the behaviour to `()` will be disable that particular step: +//! +//! ```rust +//! use http::StatusCode; +//! use tower::ServiceBuilder; +//! use tower_http::{classify::ServerErrorsFailureClass, trace::TraceLayer}; +//! use std::time::Duration; +//! use tracing::Span; +//! # use tower::{ServiceExt, Service}; +//! # use http_body_util::Full; +//! # use bytes::Bytes; +//! # use http::{Response, Request}; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! // This configuration will only emit events on failures +//! TraceLayer::new_for_http() +//! .on_request(()) +//! .on_response(()) +//! .on_body_chunk(()) +//! .on_eos(()) +//! .on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| { +//! tracing::debug!("something went wrong") +//! }) +//! ) +//! .service_fn(handle); +//! # let mut service = service; +//! # let response = service +//! # .ready() +//! # .await? +//! # .call(Request::new(Full::from("foo"))) +//! # .await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! # When the callbacks are called +//! +//! ### `on_request` +//! +//! The `on_request` callback is called when the request arrives at the +//! middleware in [`Service::call`] just prior to passing the request to the +//! inner service. +//! +//! ### `on_response` +//! +//! The `on_response` callback is called when the inner service's response +//! future completes with `Ok(response)` regardless if the response is +//! classified as a success or a failure. +//! +//! For example if you're using [`ServerErrorsAsFailures`] as your classifier +//! and the inner service responds with `500 Internal Server Error` then the +//! `on_response` callback is still called. `on_failure` would _also_ be called +//! in this case since the response was classified as a failure. +//! +//! ### `on_body_chunk` +//! +//! The `on_body_chunk` callback is called when the response body produces a new +//! chunk, that is when [`Body::poll_frame`] returns a data frame. +//! +//! `on_body_chunk` is called even if the chunk is empty. +//! +//! ### `on_eos` +//! +//! The `on_eos` callback is called when a streaming response body ends, that is +//! when [`Body::poll_frame`] returns a trailers frame. +//! +//! `on_eos` is called even if the trailers produced are `None`. +//! +//! ### `on_failure` +//! +//! The `on_failure` callback is called when: +//! +//! - The inner [`Service`]'s response future resolves to an error. +//! - A response is classified as a failure. +//! - [`Body::poll_frame`] returns an error. +//! - An end-of-stream is classified as a failure. +//! +//! # Recording fields on the span +//! +//! All callbacks receive a reference to the [tracing] [`Span`], corresponding to this request, +//! produced by the closure passed to [`TraceLayer::make_span_with`]. It can be used to [record +//! field values][record] that weren't known when the span was created. +//! +//! ```rust +//! use http::{Request, Response, HeaderMap, StatusCode}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::trace::TraceLayer; +//! use tracing::Span; +//! use std::time::Duration; +//! # use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! let service = ServiceBuilder::new() +//! .layer( +//! TraceLayer::new_for_http() +//! .make_span_with(|request: &Request>| { +//! tracing::debug_span!( +//! "http-request", +//! status_code = tracing::field::Empty, +//! ) +//! }) +//! .on_response(|response: &Response>, _latency: Duration, span: &Span| { +//! span.record("status_code", &tracing::field::display(response.status())); +//! +//! tracing::debug!("response generated") +//! }) +//! ) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! # Providing classifiers +//! +//! Tracing requires determining if a response is a success or failure. [`MakeClassifier`] is used +//! to create a classifier for the incoming request. See the docs for [`MakeClassifier`] and +//! [`ClassifyResponse`] for more details on classification. +//! +//! A [`MakeClassifier`] can be provided when creating a [`TraceLayer`]: +//! +//! ```rust +//! use http::{Request, Response}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::ServiceBuilder; +//! use tower_http::{ +//! trace::TraceLayer, +//! classify::{ +//! MakeClassifier, ClassifyResponse, ClassifiedResponse, NeverClassifyEos, +//! SharedClassifier, +//! }, +//! }; +//! use std::convert::Infallible; +//! +//! # async fn handle(request: Request>) -> Result>, Infallible> { +//! # Ok(Response::new(Full::from("foo"))) +//! # } +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! # tracing_subscriber::fmt::init(); +//! # +//! // Our `MakeClassifier` that always crates `MyClassifier` classifiers. +//! #[derive(Copy, Clone)] +//! struct MyMakeClassify; +//! +//! impl MakeClassifier for MyMakeClassify { +//! type Classifier = MyClassifier; +//! type FailureClass = &'static str; +//! type ClassifyEos = NeverClassifyEos<&'static str>; +//! +//! fn make_classifier(&self, req: &Request) -> Self::Classifier { +//! MyClassifier +//! } +//! } +//! +//! // A classifier that classifies failures as `"something went wrong..."`. +//! #[derive(Copy, Clone)] +//! struct MyClassifier; +//! +//! impl ClassifyResponse for MyClassifier { +//! type FailureClass = &'static str; +//! type ClassifyEos = NeverClassifyEos<&'static str>; +//! +//! fn classify_response( +//! self, +//! res: &Response +//! ) -> ClassifiedResponse { +//! // Classify based on the status code. +//! if res.status().is_server_error() { +//! ClassifiedResponse::Ready(Err("something went wrong...")) +//! } else { +//! ClassifiedResponse::Ready(Ok(())) +//! } +//! } +//! +//! fn classify_error(self, error: &E) -> Self::FailureClass +//! where +//! E: std::fmt::Display + 'static, +//! { +//! "something went wrong..." +//! } +//! } +//! +//! let service = ServiceBuilder::new() +//! // Create a trace layer that uses our classifier. +//! .layer(TraceLayer::new(MyMakeClassify)) +//! .service_fn(handle); +//! +//! // Since `MyClassifier` is `Clone` we can also use `SharedClassifier` +//! // to avoid having to define a separate `MakeClassifier`. +//! let service = ServiceBuilder::new() +//! .layer(TraceLayer::new(SharedClassifier::new(MyClassifier))) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! [`TraceLayer`] comes with convenience methods for using common classifiers: +//! +//! - [`TraceLayer::new_for_http`] classifies based on the status code. It doesn't consider +//! streaming responses. +//! - [`TraceLayer::new_for_grpc`] classifies based on the gRPC protocol and supports streaming +//! responses. +//! +//! [tracing]: https://crates.io/crates/tracing +//! [`Service`]: tower_service::Service +//! [`Service::call`]: tower_service::Service::call +//! [`MakeClassifier`]: crate::classify::MakeClassifier +//! [`ClassifyResponse`]: crate::classify::ClassifyResponse +//! [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record +//! [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with +//! [`Span`]: tracing::Span +//! [`ServerErrorsAsFailures`]: crate::classify::ServerErrorsAsFailures +//! [`Body::poll_frame`]: http_body::Body::poll_frame + +use std::{fmt, time::Duration}; + +use tracing::Level; + +pub use self::{ + body::ResponseBody, + future::ResponseFuture, + layer::TraceLayer, + make_span::{DefaultMakeSpan, MakeSpan}, + on_body_chunk::{DefaultOnBodyChunk, OnBodyChunk}, + on_eos::{DefaultOnEos, OnEos}, + on_failure::{DefaultOnFailure, OnFailure}, + on_request::{DefaultOnRequest, OnRequest}, + on_response::{DefaultOnResponse, OnResponse}, + service::Trace, +}; +use crate::{ + classify::{GrpcErrorsAsFailures, ServerErrorsAsFailures, SharedClassifier}, + LatencyUnit, +}; + +/// MakeClassifier for HTTP requests. +pub type HttpMakeClassifier = SharedClassifier; + +/// MakeClassifier for gRPC requests. +pub type GrpcMakeClassifier = SharedClassifier; + +macro_rules! event_dynamic_lvl { + ( $(target: $target:expr,)? $(parent: $parent:expr,)? $lvl:expr, $($tt:tt)* ) => { + match $lvl { + tracing::Level::ERROR => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::ERROR, + $($tt)* + ); + } + tracing::Level::WARN => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::WARN, + $($tt)* + ); + } + tracing::Level::INFO => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::INFO, + $($tt)* + ); + } + tracing::Level::DEBUG => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::DEBUG, + $($tt)* + ); + } + tracing::Level::TRACE => { + tracing::event!( + $(target: $target,)? + $(parent: $parent,)? + tracing::Level::TRACE, + $($tt)* + ); + } + } + }; +} + +mod body; +mod future; +mod layer; +mod make_span; +mod on_body_chunk; +mod on_eos; +mod on_failure; +mod on_request; +mod on_response; +mod service; + +const DEFAULT_MESSAGE_LEVEL: Level = Level::DEBUG; +const DEFAULT_ERROR_LEVEL: Level = Level::ERROR; + +struct Latency { + unit: LatencyUnit, + duration: Duration, +} + +impl fmt::Display for Latency { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.unit { + LatencyUnit::Seconds => write!(f, "{} s", self.duration.as_secs_f64()), + LatencyUnit::Millis => write!(f, "{} ms", self.duration.as_millis()), + LatencyUnit::Micros => write!(f, "{} μs", self.duration.as_micros()), + LatencyUnit::Nanos => write!(f, "{} ns", self.duration.as_nanos()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::classify::ServerErrorsFailureClass; + use crate::test_helpers::Body; + use bytes::Bytes; + use http::{HeaderMap, Request, Response}; + use once_cell::sync::Lazy; + use std::{ + sync::atomic::{AtomicU32, Ordering}, + time::Duration, + }; + use tower::{BoxError, Service, ServiceBuilder, ServiceExt}; + use tracing::Span; + + #[tokio::test] + async fn unary_request() { + static ON_REQUEST_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_RESPONSE_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_BODY_CHUNK_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_EOS: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_FAILURE: Lazy = Lazy::new(|| AtomicU32::new(0)); + + let trace_layer = TraceLayer::new_for_http() + .make_span_with(|_req: &Request| { + tracing::info_span!("test-span", foo = tracing::field::Empty) + }) + .on_request(|_req: &Request, span: &Span| { + span.record("foo", 42); + ON_REQUEST_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_response(|_res: &Response, _latency: Duration, _span: &Span| { + ON_RESPONSE_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| { + ON_BODY_CHUNK_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_eos( + |_trailers: Option<&HeaderMap>, _latency: Duration, _span: &Span| { + ON_EOS.fetch_add(1, Ordering::SeqCst); + }, + ) + .on_failure( + |_class: ServerErrorsFailureClass, _latency: Duration, _span: &Span| { + ON_FAILURE.fetch_add(1, Ordering::SeqCst); + }, + ); + + let mut svc = ServiceBuilder::new().layer(trace_layer).service_fn(echo); + + let res = svc + .ready() + .await + .unwrap() + .call(Request::new(Body::from("foobar"))) + .await + .unwrap(); + + assert_eq!(1, ON_REQUEST_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(1, ON_RESPONSE_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(0, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + + crate::test_helpers::to_bytes(res.into_body()) + .await + .unwrap(); + assert_eq!(1, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(1, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + } + + #[tokio::test] + async fn streaming_response() { + static ON_REQUEST_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_RESPONSE_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_BODY_CHUNK_COUNT: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_EOS: Lazy = Lazy::new(|| AtomicU32::new(0)); + static ON_FAILURE: Lazy = Lazy::new(|| AtomicU32::new(0)); + + let trace_layer = TraceLayer::new_for_http() + .on_request(|_req: &Request, _span: &Span| { + ON_REQUEST_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_response(|_res: &Response, _latency: Duration, _span: &Span| { + ON_RESPONSE_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_body_chunk(|_chunk: &Bytes, _latency: Duration, _span: &Span| { + ON_BODY_CHUNK_COUNT.fetch_add(1, Ordering::SeqCst); + }) + .on_eos( + |_trailers: Option<&HeaderMap>, _latency: Duration, _span: &Span| { + ON_EOS.fetch_add(1, Ordering::SeqCst); + }, + ) + .on_failure( + |_class: ServerErrorsFailureClass, _latency: Duration, _span: &Span| { + ON_FAILURE.fetch_add(1, Ordering::SeqCst); + }, + ); + + let mut svc = ServiceBuilder::new() + .layer(trace_layer) + .service_fn(streaming_body); + + let res = svc + .ready() + .await + .unwrap() + .call(Request::new(Body::empty())) + .await + .unwrap(); + + assert_eq!(1, ON_REQUEST_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(1, ON_RESPONSE_COUNT.load(Ordering::SeqCst), "request"); + assert_eq!(0, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(0, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + + crate::test_helpers::to_bytes(res.into_body()) + .await + .unwrap(); + assert_eq!(3, ON_BODY_CHUNK_COUNT.load(Ordering::SeqCst), "body chunk"); + assert_eq!(1, ON_EOS.load(Ordering::SeqCst), "eos"); + assert_eq!(0, ON_FAILURE.load(Ordering::SeqCst), "failure"); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } + + async fn streaming_body(_req: Request) -> Result, BoxError> { + use futures_util::stream::iter; + + let stream = iter(vec![ + Ok::<_, BoxError>(Bytes::from("one")), + Ok::<_, BoxError>(Bytes::from("two")), + Ok::<_, BoxError>(Bytes::from("three")), + ]); + + let body = Body::from_stream(stream); + + Ok(Response::new(body)) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_body_chunk.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_body_chunk.rs new file mode 100644 index 0000000000000000000000000000000000000000..543f2a63658f76806dc03d922facb4af1952ecb7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_body_chunk.rs @@ -0,0 +1,64 @@ +use std::time::Duration; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a body chunk has been sent. +/// +/// See the [module docs](../trace/index.html#on_body_chunk) for details on exactly when the +/// `on_body_chunk` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnBodyChunk { + /// Do the thing. + /// + /// `latency` is the duration since the response was sent or since the last body chunk as sent. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// + /// If you're using [hyper] as your server `B` will most likely be [`Bytes`]. + /// + /// [hyper]: https://hyper.rs + /// [`Bytes`]: https://docs.rs/bytes/latest/bytes/struct.Bytes.html + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_body_chunk(&mut self, chunk: &B, latency: Duration, span: &Span); +} + +impl OnBodyChunk for F +where + F: FnMut(&B, Duration, &Span), +{ + fn on_body_chunk(&mut self, chunk: &B, latency: Duration, span: &Span) { + self(chunk, latency, span) + } +} + +impl OnBodyChunk for () { + #[inline] + fn on_body_chunk(&mut self, _: &B, _: Duration, _: &Span) {} +} + +/// The default [`OnBodyChunk`] implementation used by [`Trace`]. +/// +/// Simply does nothing. +/// +/// [`Trace`]: super::Trace +#[derive(Debug, Default, Clone)] +pub struct DefaultOnBodyChunk { + _priv: (), +} + +impl DefaultOnBodyChunk { + /// Create a new `DefaultOnBodyChunk`. + pub fn new() -> Self { + Self { _priv: () } + } +} + +impl OnBodyChunk for DefaultOnBodyChunk { + #[inline] + fn on_body_chunk(&mut self, _: &B, _: Duration, _: &Span) {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_eos.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_eos.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab90fc9c0d4d728199ca7f4a6f14bcbee4cc95e6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_eos.rs @@ -0,0 +1,107 @@ +use super::{Latency, DEFAULT_MESSAGE_LEVEL}; +use crate::{classify::grpc_errors_as_failures::ParsedGrpcStatus, LatencyUnit}; +use http::header::HeaderMap; +use std::time::Duration; +use tracing::{Level, Span}; + +/// Trait used to tell [`Trace`] what to do when a stream closes. +/// +/// See the [module docs](../trace/index.html#on_eos) for details on exactly when the `on_eos` +/// callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnEos { + /// Do the thing. + /// + /// `stream_duration` is the duration since the response was sent. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, span: &Span); +} + +impl OnEos for () { + #[inline] + fn on_eos(self, _: Option<&HeaderMap>, _: Duration, _: &Span) {} +} + +impl OnEos for F +where + F: FnOnce(Option<&HeaderMap>, Duration, &Span), +{ + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, span: &Span) { + self(trailers, stream_duration, span) + } +} + +/// The default [`OnEos`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnEos { + level: Level, + latency_unit: LatencyUnit, +} + +impl Default for DefaultOnEos { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + latency_unit: LatencyUnit::Millis, + } + } +} + +impl DefaultOnEos { + /// Create a new [`DefaultOnEos`]. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`Level::DEBUG`]: https://docs.rs/tracing/latest/tracing/struct.Level.html#associatedconstant.DEBUG + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } +} + +impl OnEos for DefaultOnEos { + fn on_eos(self, trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span) { + let stream_duration = Latency { + unit: self.latency_unit, + duration: stream_duration, + }; + let status = trailers.and_then(|trailers| { + match crate::classify::grpc_errors_as_failures::classify_grpc_metadata( + trailers, + crate::classify::GrpcCode::Ok.into_bitmask(), + ) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Some(0), + ParsedGrpcStatus::NonSuccess(status) => Some(status.get()), + ParsedGrpcStatus::GrpcStatusHeaderMissing => None, + } + }); + + event_dynamic_lvl!(self.level, %stream_duration, status, "end of stream"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_failure.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_failure.rs new file mode 100644 index 0000000000000000000000000000000000000000..7dfa186dc642d07e9b28e2596415696534970dc2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_failure.rs @@ -0,0 +1,100 @@ +use super::{Latency, DEFAULT_ERROR_LEVEL}; +use crate::LatencyUnit; +use std::{fmt, time::Duration}; +use tracing::{Level, Span}; + +/// Trait used to tell [`Trace`] what to do when a request fails. +/// +/// See the [module docs](../trace/index.html#on_failure) for details on exactly when the +/// `on_failure` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnFailure { + /// Do the thing. + /// + /// `latency` is the duration since the request was received. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, span: &Span); +} + +impl OnFailure for () { + #[inline] + fn on_failure(&mut self, _: FailureClass, _: Duration, _: &Span) {} +} + +impl OnFailure for F +where + F: FnMut(FailureClass, Duration, &Span), +{ + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, span: &Span) { + self(failure_classification, latency, span) + } +} + +/// The default [`OnFailure`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnFailure { + level: Level, + latency_unit: LatencyUnit, +} + +impl Default for DefaultOnFailure { + fn default() -> Self { + Self { + level: DEFAULT_ERROR_LEVEL, + latency_unit: LatencyUnit::Millis, + } + } +} + +impl DefaultOnFailure { + /// Create a new `DefaultOnFailure`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Defaults to [`Level::ERROR`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } +} + +impl OnFailure for DefaultOnFailure +where + FailureClass: fmt::Display, +{ + fn on_failure(&mut self, failure_classification: FailureClass, latency: Duration, _: &Span) { + let latency = Latency { + unit: self.latency_unit, + duration: latency, + }; + event_dynamic_lvl!( + self.level, + classification = %failure_classification, + %latency, + "response failed" + ); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_request.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_request.rs new file mode 100644 index 0000000000000000000000000000000000000000..07de1893dbb4a9852c5be29b423c2c0ef1490ffe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_request.rs @@ -0,0 +1,82 @@ +use super::DEFAULT_MESSAGE_LEVEL; +use http::Request; +use tracing::Level; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a request is received. +/// +/// See the [module docs](../trace/index.html#on_request) for details on exactly when the +/// `on_request` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnRequest { + /// Do the thing. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_request(&mut self, request: &Request, span: &Span); +} + +impl OnRequest for () { + #[inline] + fn on_request(&mut self, _: &Request, _: &Span) {} +} + +impl OnRequest for F +where + F: FnMut(&Request, &Span), +{ + fn on_request(&mut self, request: &Request, span: &Span) { + self(request, span) + } +} + +/// The default [`OnRequest`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnRequest { + level: Level, +} + +impl Default for DefaultOnRequest { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + } + } +} + +impl DefaultOnRequest { + /// Create a new `DefaultOnRequest`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Please note that while this will set the level for the tracing events + /// themselves, it might cause them to lack expected information, like + /// request method or path. You can address this using + /// [`DefaultMakeSpan::level`]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`DefaultMakeSpan::level`]: crate::trace::DefaultMakeSpan::level + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } +} + +impl OnRequest for DefaultOnRequest { + fn on_request(&mut self, _: &Request, _: &Span) { + event_dynamic_lvl!(self.level, "started processing request"); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_response.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_response.rs new file mode 100644 index 0000000000000000000000000000000000000000..c6ece840dd63f295d077c8caab44e63bba5adef3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/on_response.rs @@ -0,0 +1,161 @@ +use super::{Latency, DEFAULT_MESSAGE_LEVEL}; +use crate::LatencyUnit; +use http::Response; +use std::time::Duration; +use tracing::Level; +use tracing::Span; + +/// Trait used to tell [`Trace`] what to do when a response has been produced. +/// +/// See the [module docs](../trace/index.html#on_response) for details on exactly when the +/// `on_response` callback is called. +/// +/// [`Trace`]: super::Trace +pub trait OnResponse { + /// Do the thing. + /// + /// `latency` is the duration since the request was received. + /// + /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure + /// passed to [`TraceLayer::make_span_with`]. It can be used to [record field values][record] + /// that weren't known when the span was created. + /// + /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html + /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record + /// [`TraceLayer::make_span_with`]: crate::trace::TraceLayer::make_span_with + fn on_response(self, response: &Response, latency: Duration, span: &Span); +} + +impl OnResponse for () { + #[inline] + fn on_response(self, _: &Response, _: Duration, _: &Span) {} +} + +impl OnResponse for F +where + F: FnOnce(&Response, Duration, &Span), +{ + fn on_response(self, response: &Response, latency: Duration, span: &Span) { + self(response, latency, span) + } +} + +/// The default [`OnResponse`] implementation used by [`Trace`]. +/// +/// [`Trace`]: super::Trace +#[derive(Clone, Debug)] +pub struct DefaultOnResponse { + level: Level, + latency_unit: LatencyUnit, + include_headers: bool, +} + +impl Default for DefaultOnResponse { + fn default() -> Self { + Self { + level: DEFAULT_MESSAGE_LEVEL, + latency_unit: LatencyUnit::Millis, + include_headers: false, + } + } +} + +impl DefaultOnResponse { + /// Create a new `DefaultOnResponse`. + pub fn new() -> Self { + Self::default() + } + + /// Set the [`Level`] used for [tracing events]. + /// + /// Please note that while this will set the level for the tracing events + /// themselves, it might cause them to lack expected information, like + /// request method or path. You can address this using + /// [`DefaultMakeSpan::level`]. + /// + /// Defaults to [`Level::DEBUG`]. + /// + /// [tracing events]: https://docs.rs/tracing/latest/tracing/#events + /// [`DefaultMakeSpan::level`]: crate::trace::DefaultMakeSpan::level + pub fn level(mut self, level: Level) -> Self { + self.level = level; + self + } + + /// Set the [`LatencyUnit`] latencies will be reported in. + /// + /// Defaults to [`LatencyUnit::Millis`]. + pub fn latency_unit(mut self, latency_unit: LatencyUnit) -> Self { + self.latency_unit = latency_unit; + self + } + + /// Include response headers on the [`Event`]. + /// + /// By default headers are not included. + /// + /// [`Event`]: tracing::Event + pub fn include_headers(mut self, include_headers: bool) -> Self { + self.include_headers = include_headers; + self + } +} + +impl OnResponse for DefaultOnResponse { + fn on_response(self, response: &Response, latency: Duration, _: &Span) { + let latency = Latency { + unit: self.latency_unit, + duration: latency, + }; + let response_headers = self + .include_headers + .then(|| tracing::field::debug(response.headers())); + + event_dynamic_lvl!( + self.level, + %latency, + status = status(response), + response_headers, + "finished processing request" + ); + } +} + +fn status(res: &Response) -> Option { + use crate::classify::grpc_errors_as_failures::ParsedGrpcStatus; + + // gRPC-over-HTTP2 uses the "application/grpc[+format]" content type, and gRPC-Web uses + // "application/grpc-web[+format]" or "application/grpc-web-text[+format]", where "format" is + // the message format, e.g. +proto, +json. + // + // So, valid grpc content types include (but are not limited to): + // - application/grpc + // - application/grpc+proto + // - application/grpc-web+proto + // - application/grpc-web-text+proto + // + // For simplicity, we simply check that the content type starts with "application/grpc". + let is_grpc = res + .headers() + .get(http::header::CONTENT_TYPE) + .map_or(false, |value| { + value.as_bytes().starts_with("application/grpc".as_bytes()) + }); + + if is_grpc { + match crate::classify::grpc_errors_as_failures::classify_grpc_metadata( + res.headers(), + crate::classify::GrpcCode::Ok.into_bitmask(), + ) { + ParsedGrpcStatus::Success + | ParsedGrpcStatus::HeaderNotString + | ParsedGrpcStatus::HeaderNotInt => Some(0), + ParsedGrpcStatus::NonSuccess(status) => Some(status.get()), + // if `grpc-status` is missing then its a streaming response and there is no status + // _yet_, so its neither success nor error + ParsedGrpcStatus::GrpcStatusHeaderMissing => None, + } + } else { + Some(res.status().as_u16().into()) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/service.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ab4c1f001109b81f4e2294a477cd9e3c3b2f084 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/trace/service.rs @@ -0,0 +1,325 @@ +use super::{ + DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnRequest, + DefaultOnResponse, GrpcMakeClassifier, HttpMakeClassifier, MakeSpan, OnBodyChunk, OnEos, + OnFailure, OnRequest, OnResponse, ResponseBody, ResponseFuture, TraceLayer, +}; +use crate::classify::{ + GrpcErrorsAsFailures, MakeClassifier, ServerErrorsAsFailures, SharedClassifier, +}; +use http::{Request, Response}; +use http_body::Body; +use std::{ + fmt, + task::{Context, Poll}, + time::Instant, +}; +use tower_service::Service; + +/// Middleware that adds high level [tracing] to a [`Service`]. +/// +/// See the [module docs](crate::trace) for an example. +/// +/// [tracing]: https://crates.io/crates/tracing +/// [`Service`]: tower_service::Service +#[derive(Debug, Clone, Copy)] +pub struct Trace< + S, + M, + MakeSpan = DefaultMakeSpan, + OnRequest = DefaultOnRequest, + OnResponse = DefaultOnResponse, + OnBodyChunk = DefaultOnBodyChunk, + OnEos = DefaultOnEos, + OnFailure = DefaultOnFailure, +> { + pub(crate) inner: S, + pub(crate) make_classifier: M, + pub(crate) make_span: MakeSpan, + pub(crate) on_request: OnRequest, + pub(crate) on_response: OnResponse, + pub(crate) on_body_chunk: OnBodyChunk, + pub(crate) on_eos: OnEos, + pub(crate) on_failure: OnFailure, +} + +impl Trace { + /// Create a new [`Trace`] using the given [`MakeClassifier`]. + pub fn new(inner: S, make_classifier: M) -> Self + where + M: MakeClassifier, + { + Self { + inner, + make_classifier, + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } + + /// Returns a new [`Layer`] that wraps services with a [`TraceLayer`] middleware. + /// + /// [`Layer`]: tower_layer::Layer + pub fn layer(make_classifier: M) -> TraceLayer + where + M: MakeClassifier, + { + TraceLayer::new(make_classifier) + } +} + +impl + Trace +{ + define_inner_service_accessors!(); + + /// Customize what to do when a request is received. + /// + /// `NewOnRequest` is expected to implement [`OnRequest`]. + /// + /// [`OnRequest`]: super::OnRequest + pub fn on_request( + self, + new_on_request: NewOnRequest, + ) -> Trace { + Trace { + on_request: new_on_request, + inner: self.inner, + on_failure: self.on_failure, + on_eos: self.on_eos, + on_body_chunk: self.on_body_chunk, + make_span: self.make_span, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been produced. + /// + /// `NewOnResponse` is expected to implement [`OnResponse`]. + /// + /// [`OnResponse`]: super::OnResponse + pub fn on_response( + self, + new_on_response: NewOnResponse, + ) -> Trace { + Trace { + on_response: new_on_response, + inner: self.inner, + on_request: self.on_request, + on_failure: self.on_failure, + on_body_chunk: self.on_body_chunk, + on_eos: self.on_eos, + make_span: self.make_span, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a body chunk has been sent. + /// + /// `NewOnBodyChunk` is expected to implement [`OnBodyChunk`]. + /// + /// [`OnBodyChunk`]: super::OnBodyChunk + pub fn on_body_chunk( + self, + new_on_body_chunk: NewOnBodyChunk, + ) -> Trace { + Trace { + on_body_chunk: new_on_body_chunk, + on_eos: self.on_eos, + make_span: self.make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a streaming response has closed. + /// + /// `NewOnEos` is expected to implement [`OnEos`]. + /// + /// [`OnEos`]: super::OnEos + pub fn on_eos( + self, + new_on_eos: NewOnEos, + ) -> Trace { + Trace { + on_eos: new_on_eos, + make_span: self.make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_body_chunk: self.on_body_chunk, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize what to do when a response has been classified as a failure. + /// + /// `NewOnFailure` is expected to implement [`OnFailure`]. + /// + /// [`OnFailure`]: super::OnFailure + pub fn on_failure( + self, + new_on_failure: NewOnFailure, + ) -> Trace { + Trace { + on_failure: new_on_failure, + inner: self.inner, + make_span: self.make_span, + on_body_chunk: self.on_body_chunk, + on_request: self.on_request, + on_eos: self.on_eos, + on_response: self.on_response, + make_classifier: self.make_classifier, + } + } + + /// Customize how to make [`Span`]s that all request handling will be wrapped in. + /// + /// `NewMakeSpan` is expected to implement [`MakeSpan`]. + /// + /// [`MakeSpan`]: super::MakeSpan + /// [`Span`]: tracing::Span + pub fn make_span_with( + self, + new_make_span: NewMakeSpan, + ) -> Trace { + Trace { + make_span: new_make_span, + inner: self.inner, + on_failure: self.on_failure, + on_request: self.on_request, + on_body_chunk: self.on_body_chunk, + on_response: self.on_response, + on_eos: self.on_eos, + make_classifier: self.make_classifier, + } + } +} + +impl + Trace< + S, + HttpMakeClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, + DefaultOnEos, + DefaultOnFailure, + > +{ + /// Create a new [`Trace`] using [`ServerErrorsAsFailures`] which supports classifying + /// regular HTTP responses based on the status code. + pub fn new_for_http(inner: S) -> Self { + Self { + inner, + make_classifier: SharedClassifier::new(ServerErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl + Trace< + S, + GrpcMakeClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, + DefaultOnEos, + DefaultOnFailure, + > +{ + /// Create a new [`Trace`] using [`GrpcErrorsAsFailures`] which supports classifying + /// gRPC responses and streams based on the `grpc-status` header. + pub fn new_for_grpc(inner: S) -> Self { + Self { + inner, + make_classifier: SharedClassifier::new(GrpcErrorsAsFailures::default()), + make_span: DefaultMakeSpan::new(), + on_request: DefaultOnRequest::default(), + on_response: DefaultOnResponse::default(), + on_body_chunk: DefaultOnBodyChunk::default(), + on_eos: DefaultOnEos::default(), + on_failure: DefaultOnFailure::default(), + } + } +} + +impl< + S, + ReqBody, + ResBody, + M, + OnRequestT, + OnResponseT, + OnFailureT, + OnBodyChunkT, + OnEosT, + MakeSpanT, + > Service> + for Trace +where + S: Service, Response = Response>, + ReqBody: Body, + ResBody: Body, + ResBody::Error: fmt::Display + 'static, + S::Error: fmt::Display + 'static, + M: MakeClassifier, + M::Classifier: Clone, + MakeSpanT: MakeSpan, + OnRequestT: OnRequest, + OnResponseT: OnResponse + Clone, + OnBodyChunkT: OnBodyChunk + Clone, + OnEosT: OnEos + Clone, + OnFailureT: OnFailure + Clone, +{ + type Response = + Response>; + type Error = S::Error; + type Future = + ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let start = Instant::now(); + + let span = self.make_span.make_span(&req); + + let classifier = self.make_classifier.make_classifier(&req); + + let future = { + let _guard = span.enter(); + self.on_request.on_request(&req, &span); + self.inner.call(req) + }; + + ResponseFuture { + inner: future, + span, + classifier: Some(classifier), + on_response: Some(self.on_response.clone()), + on_body_chunk: Some(self.on_body_chunk.clone()), + on_eos: Some(self.on_eos.clone()), + on_failure: Some(self.on_failure.clone()), + start, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/validate_request.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/validate_request.rs new file mode 100644 index 0000000000000000000000000000000000000000..efb301e4f8630ae423d04981e9399569ff17137b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-http-0.6.8/src/validate_request.rs @@ -0,0 +1,587 @@ +//! Middleware that validates requests. +//! +//! # Example +//! +//! ``` +//! use tower_http::validate_request::ValidateRequestHeaderLayer; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use http_body_util::Full; +//! use bytes::Bytes; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let mut service = ServiceBuilder::new() +//! // Require the `Accept` header to be `application/json`, `*/*` or `application/*` +//! .layer(ValidateRequestHeaderLayer::accept("application/json")) +//! .service_fn(handle); +//! +//! // Requests with the correct value are allowed through +//! let request = Request::builder() +//! .header(ACCEPT, "application/json") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::OK, response.status()); +//! +//! // Requests with an invalid value get a `406 Not Acceptable` response +//! let request = Request::builder() +//! .header(ACCEPT, "text/strings") +//! .body(Full::default()) +//! .unwrap(); +//! +//! let response = service +//! .ready() +//! .await? +//! .call(request) +//! .await?; +//! +//! assert_eq!(StatusCode::NOT_ACCEPTABLE, response.status()); +//! # Ok(()) +//! # } +//! ``` +//! +//! Custom validation can be made by implementing [`ValidateRequest`]: +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeaderLayer, ValidateRequest}; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use http_body_util::Full; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! use bytes::Bytes; +//! +//! #[derive(Clone, Copy)] +//! pub struct MyHeader { /* ... */ } +//! +//! impl ValidateRequest for MyHeader { +//! type ResponseBody = Full; +//! +//! fn validate( +//! &mut self, +//! request: &mut Request, +//! ) -> Result<(), Response> { +//! // validate the request... +//! # unimplemented!() +//! } +//! } +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! Ok(Response::new(Full::default())) +//! } +//! +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! // Validate requests using `MyHeader` +//! .layer(ValidateRequestHeaderLayer::custom(MyHeader { /* ... */ })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` +//! +//! Or using a closure: +//! +//! ``` +//! use tower_http::validate_request::{ValidateRequestHeaderLayer, ValidateRequest}; +//! use http::{Request, Response, StatusCode, header::ACCEPT}; +//! use bytes::Bytes; +//! use http_body_util::Full; +//! use tower::{Service, ServiceExt, ServiceBuilder, service_fn, BoxError}; +//! +//! async fn handle(request: Request>) -> Result>, BoxError> { +//! # todo!(); +//! // ... +//! } +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), BoxError> { +//! let service = ServiceBuilder::new() +//! .layer(ValidateRequestHeaderLayer::custom(|request: &mut Request>| { +//! // Validate the request +//! # Ok::<_, Response>>(()) +//! })) +//! .service_fn(handle); +//! # Ok(()) +//! # } +//! ``` + +use http::{header, Request, Response, StatusCode}; +use mime::{Mime, MimeIter}; +use pin_project_lite::pin_project; +use std::{ + fmt, + future::Future, + marker::PhantomData, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tower_layer::Layer; +use tower_service::Service; + +/// Layer that applies [`ValidateRequestHeader`] which validates all requests. +/// +/// See the [module docs](crate::validate_request) for an example. +#[derive(Debug, Clone)] +pub struct ValidateRequestHeaderLayer { + validate: T, +} + +impl ValidateRequestHeaderLayer> { + /// Validate requests have the required Accept header. + /// + /// The `Accept` header is required to be `*/*`, `type/*` or `type/subtype`, + /// as configured. + /// + /// # Panics + /// + /// Panics if `header_value` is not in the form: `type/subtype`, such as `application/json` + /// See `AcceptHeader::new` for when this method panics. + /// + /// # Example + /// + /// ``` + /// use http_body_util::Full; + /// use bytes::Bytes; + /// use tower_http::validate_request::{AcceptHeader, ValidateRequestHeaderLayer}; + /// + /// let layer = ValidateRequestHeaderLayer::>>::accept("application/json"); + /// ``` + /// + /// [`Accept`]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept + pub fn accept(value: &str) -> Self + where + ResBody: Default, + { + Self::custom(AcceptHeader::new(value)) + } +} + +impl ValidateRequestHeaderLayer { + /// Validate requests using a custom method. + pub fn custom(validate: T) -> ValidateRequestHeaderLayer { + Self { validate } + } +} + +impl Layer for ValidateRequestHeaderLayer +where + T: Clone, +{ + type Service = ValidateRequestHeader; + + fn layer(&self, inner: S) -> Self::Service { + ValidateRequestHeader::new(inner, self.validate.clone()) + } +} + +/// Middleware that validates requests. +/// +/// See the [module docs](crate::validate_request) for an example. +#[derive(Clone, Debug)] +pub struct ValidateRequestHeader { + inner: S, + validate: T, +} + +impl ValidateRequestHeader { + fn new(inner: S, validate: T) -> Self { + Self::custom(inner, validate) + } + + define_inner_service_accessors!(); +} + +impl ValidateRequestHeader> { + /// Validate requests have the required Accept header. + /// + /// The `Accept` header is required to be `*/*`, `type/*` or `type/subtype`, + /// as configured. + /// + /// # Panics + /// + /// See `AcceptHeader::new` for when this method panics. + pub fn accept(inner: S, value: &str) -> Self + where + ResBody: Default, + { + Self::custom(inner, AcceptHeader::new(value)) + } +} + +impl ValidateRequestHeader { + /// Validate requests using a custom method. + pub fn custom(inner: S, validate: T) -> ValidateRequestHeader { + Self { inner, validate } + } +} + +impl Service> for ValidateRequestHeader +where + V: ValidateRequest, + S: Service, Response = Response>, +{ + type Response = Response; + type Error = S::Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: Request) -> Self::Future { + match self.validate.validate(&mut req) { + Ok(_) => ResponseFuture::future(self.inner.call(req)), + Err(res) => ResponseFuture::invalid_header_value(res), + } + } +} + +pin_project! { + /// Response future for [`ValidateRequestHeader`]. + pub struct ResponseFuture { + #[pin] + kind: Kind, + } +} + +impl ResponseFuture { + fn future(future: F) -> Self { + Self { + kind: Kind::Future { future }, + } + } + + fn invalid_header_value(res: Response) -> Self { + Self { + kind: Kind::Error { + response: Some(res), + }, + } + } +} + +pin_project! { + #[project = KindProj] + enum Kind { + Future { + #[pin] + future: F, + }, + Error { + response: Option>, + }, + } +} + +impl Future for ResponseFuture +where + F: Future, E>>, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project().kind.project() { + KindProj::Future { future } => future.poll(cx), + KindProj::Error { response } => { + let response = response.take().expect("future polled after completion"); + Poll::Ready(Ok(response)) + } + } + } +} + +/// Trait for validating requests. +pub trait ValidateRequest { + /// The body type used for responses to unvalidated requests. + type ResponseBody; + + /// Validate the request. + /// + /// If `Ok(())` is returned then the request is allowed through, otherwise not. + fn validate(&mut self, request: &mut Request) -> Result<(), Response>; +} + +impl ValidateRequest for F +where + F: FnMut(&mut Request) -> Result<(), Response>, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, request: &mut Request) -> Result<(), Response> { + self(request) + } +} + +/// Type that performs validation of the Accept header. +pub struct AcceptHeader { + header_value: Arc, + _ty: PhantomData ResBody>, +} + +impl AcceptHeader { + /// Create a new `AcceptHeader`. + /// + /// # Panics + /// + /// Panics if `header_value` is not in the form: `type/subtype`, such as `application/json` + fn new(header_value: &str) -> Self + where + ResBody: Default, + { + Self { + header_value: Arc::new( + header_value + .parse::() + .expect("value is not a valid header value"), + ), + _ty: PhantomData, + } + } +} + +impl Clone for AcceptHeader { + fn clone(&self) -> Self { + Self { + header_value: self.header_value.clone(), + _ty: PhantomData, + } + } +} + +impl fmt::Debug for AcceptHeader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AcceptHeader") + .field("header_value", &self.header_value) + .finish() + } +} + +impl ValidateRequest for AcceptHeader +where + ResBody: Default, +{ + type ResponseBody = ResBody; + + fn validate(&mut self, req: &mut Request) -> Result<(), Response> { + if !req.headers().contains_key(header::ACCEPT) { + return Ok(()); + } + if req + .headers() + .get_all(header::ACCEPT) + .into_iter() + .filter_map(|header| header.to_str().ok()) + .any(|h| { + MimeIter::new(h) + .map(|mim| { + if let Ok(mim) = mim { + let typ = self.header_value.type_(); + let subtype = self.header_value.subtype(); + match (mim.type_(), mim.subtype()) { + (t, s) if t == typ && s == subtype => true, + (t, mime::STAR) if t == typ => true, + (mime::STAR, mime::STAR) => true, + _ => false, + } + } else { + false + } + }) + .reduce(|acc, mim| acc || mim) + .unwrap_or(false) + }) + { + return Ok(()); + } + let mut res = Response::new(ResBody::default()); + *res.status_mut() = StatusCode::NOT_ACCEPTABLE; + Err(res) + } +} + +#[cfg(test)] +mod tests { + #[allow(unused_imports)] + use super::*; + use crate::test_helpers::Body; + use http::header; + use tower::{BoxError, ServiceBuilder, ServiceExt}; + + #[tokio::test] + async fn valid_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn valid_accept_header_accept_all_json() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/*") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn valid_accept_header_accept_all() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "*/*") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn invalid_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "invalid") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + #[tokio::test] + async fn not_accepted_accept_header_subtype() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "application/strings") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + #[tokio::test] + async fn not_accepted_accept_header() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + #[tokio::test] + async fn accepted_multiple_header_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings") + .header(header::ACCEPT, "invalid, application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_inner_header_value() { + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/json")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, "text/strings, invalid, application/json") + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_header_with_quotes_valid() { + let value = "foo/bar; parisien=\"baguette, text/html, jambon, fromage\", application/*"; + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("application/xml")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, value) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::OK); + } + + #[tokio::test] + async fn accepted_header_with_quotes_invalid() { + let value = "foo/bar; parisien=\"baguette, text/html, jambon, fromage\""; + let mut service = ServiceBuilder::new() + .layer(ValidateRequestHeaderLayer::accept("text/html")) + .service_fn(echo); + + let request = Request::get("/") + .header(header::ACCEPT, value) + .body(Body::empty()) + .unwrap(); + + let res = service.ready().await.unwrap().call(request).await.unwrap(); + + assert_eq!(res.status(), StatusCode::NOT_ACCEPTABLE); + } + + async fn echo(req: Request) -> Result, BoxError> { + Ok(Response::new(req.into_body())) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..7eb7cbf5edbf66514b7c712a003e249bd608e57e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "fec9e559e276ba9609f939d3b0d2e4fa0504de6f" + }, + "path_in_vcs": "tower-layer" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/CHANGELOG.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..1ca7a54bff62dbddd91c74f87a78a07cc58ae6df --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/CHANGELOG.md @@ -0,0 +1,48 @@ +# 0.3.3 (August 1, 2024) + +### Added + +- **builder,util**: add convenience methods for boxing services ([#616]) +- **all**: new functions const when possible ([#760]) + +[#616]: https://github.com/tower-rs/tower/pull/616 +[#760]: https://github.com/tower-rs/tower/pull/760 + +# 0.3.2 (Octpber 10, 2022) + +## Added + +- Implement `Layer` for tuples of up to 16 elements ([#694]) + +[#694]: https://github.com/tower-rs/tower/pull/694 + +# 0.3.1 (January 7, 2021) + +### Added + +- Added `layer_fn`, for constructing a `Layer` from a function taking + a `Service` and returning a different `Service` ([#491]) +- Added an implementation of `Layer` for `&Layer` ([#446]) +- Multiple documentation improvements ([#487], [#490]) + +[#491]: https://github.com/tower-rs/tower/pull/491 +[#446]: https://github.com/tower-rs/tower/pull/446 +[#487]: https://github.com/tower-rs/tower/pull/487 +[#490]: https://github.com/tower-rs/tower/pull/490 + +# 0.3.0 (November 29, 2019) + +- Move layer builder from `tower-util` to tower-layer. + +# 0.3.0-alpha.2 (September 30, 2019) + +- Move to `futures-*-preview 0.3.0-alpha.19` +- Move to `pin-project 0.4` + +# 0.3.0-alpha.1 (September 11, 2019) + +- Move to `std::future` + +# 0.1.0 (April 26, 2019) + +- Initial release diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b8b9d75e6153e8619214afeed3cfb659c924ca38 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml @@ -0,0 +1,41 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "tower-layer" +version = "0.3.3" +authors = ["Tower Maintainers "] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Decorates a `Service` to allow easy composition between `Service`s. +""" +homepage = "https://github.com/tower-rs/tower" +documentation = "https://docs.rs/tower-layer/0.3.3" +readme = "README.md" +categories = [ + "asynchronous", + "network-programming", +] +license = "MIT" +repository = "https://github.com/tower-rs/tower" + +[lib] +name = "tower_layer" +path = "src/lib.rs" + +[dependencies] + +[dev-dependencies] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..0cf4186573f711bd4c720ac07b22cf0ac9cba395 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/Cargo.toml.orig @@ -0,0 +1,26 @@ +[package] +name = "tower-layer" +# When releasing to crates.io: +# - Update doc url +# - Cargo.toml +# - README.md +# - Update CHANGELOG.md. +# - Create "v0.1.x" git tag. +version = "0.3.3" +authors = ["Tower Maintainers "] +license = "MIT" +readme = "README.md" +repository = "https://github.com/tower-rs/tower" +homepage = "https://github.com/tower-rs/tower" +documentation = "https://docs.rs/tower-layer/0.3.3" +description = """ +Decorates a `Service` to allow easy composition between `Service`s. +""" +categories = ["asynchronous", "network-programming"] +edition = "2018" + +[dependencies] + +[dev-dependencies] +tower-service = { path = "../tower-service" } +tower = { path = "../tower" } diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/LICENSE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b980cacc77e700c30fe42f8f3bcc79ed55ee7de9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2019 Tower Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..48f9dbb7c88c6c10b1abc17f44a11163e919d200 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tower-layer-0.3.3/README.md @@ -0,0 +1,43 @@ +# Tower Layer + +Decorates a [Tower] `Service`, transforming either the request or the response. + +[![Crates.io][crates-badge]][crates-url] +[![Documentation][docs-badge]][docs-url] +[![Documentation (master)][docs-master-badge]][docs-master-url] +[![MIT licensed][mit-badge]][mit-url] +[![Build Status][actions-badge]][actions-url] +[![Discord chat][discord-badge]][discord-url] + +[crates-badge]: https://img.shields.io/crates/v/tower-layer.svg +[crates-url]: https://crates.io/crates/tower-layer +[docs-badge]: https://docs.rs/tower-layer/badge.svg +[docs-url]: https://docs.rs/tower-layer +[docs-master-badge]: https://img.shields.io/badge/docs-master-blue +[docs-master-url]: https://tower-rs.github.io/tower/tower_layer +[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg +[mit-url]: LICENSE +[actions-badge]: https://github.com/tower-rs/tower/workflows/CI/badge.svg +[actions-url]:https://github.com/tower-rs/tower/actions?query=workflow%3ACI +[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white +[discord-url]: https://discord.gg/EeF3cQw + +## Overview + +Often, many of the pieces needed for writing network applications can be +reused across multiple services. The `Layer` trait can be used to write +reusable components that can be applied to very different kinds of services; +for example, it can be applied to services operating on different protocols, +and to both the client and server side of a network transaction. + +## License + +This project is licensed under the [MIT license](LICENSE). + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in Tower by you, shall be licensed as MIT, without any additional +terms or conditions. + +[Tower]: https://crates.io/crates/tower \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..f6a3e2726039c41cbcdad963599c81ea9a2926a9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "65d17389fad2919dab23c1e62cdf1226615c72bd" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitattributes b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..088860ab9d4d36a7f694d5731d867a8a91e1bdaa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitattributes @@ -0,0 +1,3 @@ +src/gen/consts.rs linguist-generated +src/gen/generic_const_mappings.rs linguist-generated +src/gen/op.rs linguist-generated diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..34169b35ccf93916f306aa3ab2ea1fae9b570d9c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/.gitignore @@ -0,0 +1,2 @@ +target +.direnv/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/CHANGELOG.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..67ae6bdab39f89e5200eb700516c0176ceae8198 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/CHANGELOG.md @@ -0,0 +1,133 @@ +# Changelog + +This project follows semantic versioning. + +The MSRV (Minimum Supported Rust Version) is 1.37.0, and typenum is tested +against this Rust version. + +### Unreleased + +### 1.19.0 (2025-10-01) +- [fixed] Conflicting trait implementations with the `const-generics` feature + on 16-bit targets +- [fixed] Remove deprecated feature flag +- [added] Support `core::fmt::Binary` +- [added] Constants for 2^N - 1 +- [added] Implemented `ToInt` for `isize`, `i128`, and `u128` + +### 1.18.0 (2025-02-17) +- [changed] Remove build scripts; instead check-in the built code (PR #219) +- [added] Constants for 3600 (PR #220) +- [added] Elixir-style syntax for `tarr!` macro (PR #214) +- [added] `FoldAdd` and `FoldMul` to get the sum/product of an array (PR #209) + +### 1.17.0 (2023-09-15) +- [removed] Remove `force_unix_path_separator` feature, make it the default +- [added] docs.rs metadata and cfg options +- [added] Playground metadata + +### 1.16.0 (2022-12-05) +- [added] `const INT` field to the `ToInt` trait. +- [added] `const-generics` field with `U` mapping where `N` is a const generic. + +### 1.15.0 (2021-12-25) +- [fixed] Cross-compilation issue due to doing math in build script. (PR #177) +- [added] New feature `scale_info` for using inside + [Substrate](https://github.com/paritytech/substrate.git)-based runtimes (PR + #175) + +### 1.14.0 (2021-09-01) +- [changed] Sealed all marker traits. Documentation already stated that these + should not be implemented outside the crate, so this is not considered a + breaking change. + +### 1.13.0 (2021-03-12) +- [changed] MSRV from 1.22.0 to 1.37.0. +- [fixed] `op` macro with 2018 edition import. +- [changed] Allowed calling `assert_type_eq` and `assert_type` at top level. +- [added] Marker trait `Zero` for `Z0`, `U0`, and `B0`. +- [added] Implementation of `Pow` trait for f32 and f64 with negative exponent. +- [added] Trait `ToInt`. + +### 1.12.0 (2020-04-13) +- [added] Feature `force_unix_path_separator` to support building without Cargo. +- [added] Greatest common divisor operator `Gcd` with alias `Gcf`. +- [added] `gcd` to the `op!` macro. +- [changed] Added `Copy` bound to `Rhs` of `Mul` impl for ``. +- [changed] Added `Copy` bound to `Rhs` of `Div` impl for ``. +- [changed] Added `Copy` bound to `Rhs` of `PartialDiv` impl for ``. +- [changed] Added `Copy` bound to `Rhs` of `Rem` impl for ``. +- [fixed] Make all functions #[inline]. + +### 1.11.2 (2019-08-26) +- [fixed] Cross compilation from Linux to Windows. + +### 1.11.1 (2019-08-25) +- [fixed] Builds on earlier Rust builds again and added Rust 1.22.0 to Travis to + prevent future breakage. + +### 1.11.0 (2019-08-25) +- [added] Integer `log2` to the `op!` macro. +- [added] Integer binary logarithm operator `Logarithm2` with alias `Log2`. +- [changed] Removed `feature(i128_type)` when running with the `i128` + feature. Kept the feature flag. for typenum to maintain compatibility with + old Rust versions. +- [added] Integer `sqrt` to the `op!` macro. +- [added] Integer square root operator `SquareRoot` with alias `Sqrt`. +- [fixed] Bug with attempting to create U1024 type alias twice. + +### 1.10.0 (2018-03-11) +- [added] The `PowerOfTwo` marker trait. +- [added] Associated constants for `Bit`, `Unsigned`, and `Integer`. + +### 1.9.0 (2017-05-14) +- [added] The `Abs` type operator and corresponding `AbsVal` alias. +- [added] The feature `i128` that enables creating 128-bit integers from + typenums. +- [added] The `assert_type!` and `assert_type_eq!` macros. +- [added] Operators to the `op!` macro, including those performed by `cmp!`. +- [fixed] Bug in `op!` macro involving functions and convoluted expressions. +- [deprecated] The `cmp!` macro. + +### 1.8.0 (2017-04-12) +- [added] The `op!` macro for conveniently performing type-level operations. +- [added] The `cmp!` macro for conveniently performing type-level comparisons. +- [added] Some comparison type-operators that are used by the `cmp!` macro. + +### 1.7.0 (2017-03-24) +- [added] Type operators `Min` and `Max` with accompanying aliases `Minimum` and + `Maximum` + +### 1.6.0 (2017-02-24) +- [fixed] Bug in `Array` division. +- [fixed] Bug where `Rem` would sometimes exit early with the wrong answer. +- [added] `PartialDiv` operator that performs division as a partial function -- + it's defined only when there is no remainder. + +### 1.5.2 (2017-02-04) +- [fixed] Bug between `Div` implementation and type system. + +### 1.5.1 (2016-11-08) +- [fixed] Expanded implementation of `Pow` for primitives. + +### 1.5.0 (2016-11-03) +- [added] Functions to the `Pow` and `Len` traits. This is *technically* a + breaking change, but it would only break someone's code if they have a custom + impl for `Pow`. I would be very surprised if that is anyone other than me. + +### 1.4.0 (2016-10-29) +- [added] Type-level arrays of type-level integers. (PR #66) +- [added] The types in this crate are now instantiable. (Issue #67, PR #68) + +### 1.3.1 (2016-03-31) +- [fixed] Bug with recent nightlies. + +### 1.3.0 (2016-02-07) +- [changed] Removed dependency on libstd. (Issue #53, PR #55) +- [changed] Reorganized module structure. (PR #57) + +### 1.2.0 (2016-01-03) +- [added] This change log! +- [added] Convenience type aliases for operators. (Issue #48, PR #50) +- [added] Types in this crate now derive all possible traits. (Issue #42, PR + #51) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..91f035d750c7dfeb12d4bab1bf6ae59bd8f0d72b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.lock @@ -0,0 +1,214 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "derive_more" +version = "0.99.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.106 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.106 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "indexmap" +version = "2.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "equivalent 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-slice-cast 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "impl-trait-for-tuples 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec-derive 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-crate 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "once_cell 1.21.3 (registry+https://github.com/rust-lang/crates.io-index)", + "toml_edit 0.19.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-ident 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scale-info" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.99.20 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scale-info-derive 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scale-info-derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-crate 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-ident 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-ident 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "toml_edit" +version = "0.19.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "indexmap 2.11.4 (registry+https://github.com/rust-lang/crates.io-index)", + "toml_datetime 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", + "winnow 0.5.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "typenum" +version = "1.19.0" +dependencies = [ + "scale-info 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum arrayvec 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +"checksum byte-slice-cast 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" +"checksum cfg-if 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +"checksum derive_more 0.99.20 (registry+https://github.com/rust-lang/crates.io-index)" = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +"checksum equivalent 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +"checksum hashbrown 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +"checksum impl-trait-for-tuples 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +"checksum indexmap 2.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +"checksum memchr 2.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +"checksum once_cell 1.21.3 (registry+https://github.com/rust-lang/crates.io-index)" = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +"checksum parity-scale-codec 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +"checksum parity-scale-codec-derive 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +"checksum proc-macro-crate 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +"checksum proc-macro2 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)" = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +"checksum quote 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)" = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +"checksum scale-info 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" +"checksum scale-info-derive 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" +"checksum syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)" = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +"checksum syn 2.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +"checksum toml_datetime 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +"checksum toml_edit 0.19.15 (registry+https://github.com/rust-lang/crates.io-index)" = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +"checksum unicode-ident 1.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" +"checksum winnow 0.5.40 (registry+https://github.com/rust-lang/crates.io-index)" = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c629a80c58b6ad4f2cf2279ff75b5e9948873695 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml @@ -0,0 +1,81 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.37.0" +name = "typenum" +version = "1.19.0" +authors = [ + "Paho Lurie-Gregg ", + "Andre Bogus ", +] +build = "build.rs" +exclude = [ + "/.github/", + "/clippy.toml", + "/flake.lock", + "/flake.nix", + "/justfile", + "/.envrc", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Typenum is a Rust library for type-level numbers evaluated at + compile time. It currently supports bits, unsigned integers, and signed + integers. It also provides a type-level array of type-level numbers, but its + implementation is incomplete.""" +documentation = "https://docs.rs/typenum" +readme = "README.md" +categories = ["no-std"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/paholg/typenum" + +[package.metadata.docs.rs] +features = [ + "i128", + "const-generics", +] +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[package.metadata.playground] +features = [ + "i128", + "const-generics", +] + +[features] +const-generics = [] +force_unix_path_separator = [] +i128 = [] +no_std = [] +scale_info = ["scale-info/derive"] +strict = [] + +[lib] +name = "typenum" +path = "src/lib.rs" + +[[test]] +name = "test" +path = "tests/test.rs" + +[dependencies.scale-info] +version = "1.0" +optional = true +default-features = false diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..55bc3cc4abbd9806b03da8dd7130b27f740b292d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/Cargo.toml.orig @@ -0,0 +1,40 @@ +[package] +name = "typenum" +version = "1.19.0" # remember to update html_root_url +authors = ["Paho Lurie-Gregg ", "Andre Bogus "] +documentation = "https://docs.rs/typenum" +repository = "https://github.com/paholg/typenum" +readme = "README.md" +license = "MIT OR Apache-2.0" +description = """Typenum is a Rust library for type-level numbers evaluated at + compile time. It currently supports bits, unsigned integers, and signed + integers. It also provides a type-level array of type-level numbers, but its + implementation is incomplete.""" +categories = ["no-std"] +edition = "2018" +rust-version = "1.37.0" +exclude = ["/.github/", "/clippy.toml", "/flake.lock", "/flake.nix", "/justfile", "/.envrc"] + +[dependencies] +scale-info = { version = "1.0", default-features = false, optional = true } + +[lib] +name = "typenum" + +[features] +no_std = [] # Deprecated +i128 = [] +strict = [] +force_unix_path_separator = [] # Deprecated +const-generics = [] +scale_info = ["scale-info/derive"] + +[package.metadata.docs.rs] +features = ["i128", "const-generics"] +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +features = ["i128", "const-generics"] + +[workspace] +members = ["generate"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..364b7731ca3f552248fbe13284741350d0ce9d4f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE @@ -0,0 +1 @@ +MIT OR Apache-2.0 \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..4bf355c3e02b7d32d990a3b7d2acfdf012e9c2ef --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2014 Paho Lurie-Gregg + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..e567a4d2802f702583a903a791cf75f494d2ed81 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Paho Lurie-Gregg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b198775fb432a96eccc4137a8774cdfb3b6df09 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/README.md @@ -0,0 +1,67 @@ +[![crates.io](https://img.shields.io/crates/v/typenum.svg)](https://crates.io/crates/typenum) +[![Build Status](https://github.com/paholg/typenum/actions/workflows/check.yml/badge.svg)](https://github.com/paholg/typenum/actions/workflows/check.yml) + +Typenum +===== + +Typenum is a Rust library for type-level numbers evaluated at compile time. It +currently supports bits, unsigned integers, and signed integers. + +Typenum depends only on libcore, and so is suitable for use on any platform! + +For the full documentation, go [here](https://docs.rs/typenum). + +### Importing + +While `typenum` is divided into several modules, they are all re-exported +through the crate root, so you can import anything contained herein with `use +typenum::whatever;`, ignoring the crate structure. + +You may also find it useful to treat the `consts` module as a prelude, +performing a glob import. + +### Example + +Here is a trivial example of `typenum`'s use: + +```rust +use typenum::{Sum, Exp, Integer, N2, P3, P4}; + +type X = Sum; +assert_eq!(::to_i32(), 7); + +type Y = Exp; +assert_eq!(::to_i32(), -8); +``` + +For a non-trivial example of its use, see one of the crates that depends on +it. The full list is +[here](https://crates.io/crates/typenum/reverse_dependencies). Of note are +[dimensioned](https://crates.io/crates/dimensioned/) which does compile-time +type checking for arbitrary unit systems and +[generic-array](https://crates.io/crates/generic-array/) which provides arrays +whose length you can generically refer to. + +### Error messages + + +Typenum's error messages aren't great, and can be difficult to parse. The good +news is that the fine folks at Auxon have written a tool to help with it. Please +take a look at [tnfilt](https://github.com/auxoncorp/tnfilt). + +### License + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license + ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..d76d5b4f8a441448b319d97b17c93ff3ba13b9f9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/typenum-1.19.0/build.rs @@ -0,0 +1,417 @@ +use std::{cmp, env, fmt, fs::File, io::Write, path::PathBuf}; + +enum UIntCode { + Term, + Zero(Box), + One(Box), +} + +enum IntCode { + Zero, + Pos(Box), + Neg(Box), +} + +impl fmt::Display for UIntCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + UIntCode::Term => write!(f, "UTerm"), + UIntCode::Zero(ref inner) => write!(f, "UInt<{}, B0>", inner), + UIntCode::One(ref inner) => write!(f, "UInt<{}, B1>", inner), + } + } +} + +impl fmt::Display for IntCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + IntCode::Zero => write!(f, "Z0"), + IntCode::Pos(ref inner) => write!(f, "PInt<{}>", inner), + IntCode::Neg(ref inner) => write!(f, "NInt<{}>", inner), + } + } +} + +fn gen_uint(u: u64) -> UIntCode { + let mut result = UIntCode::Term; + let mut x = 1u64 << 63; + while x > u { + x >>= 1 + } + while x > 0 { + result = if x & u > 0 { + UIntCode::One(Box::new(result)) + } else { + UIntCode::Zero(Box::new(result)) + }; + x >>= 1; + } + result +} + +fn gen_int(i: i64) -> IntCode { + use std::cmp::Ordering::{Equal, Greater, Less}; + + match i.cmp(&0) { + Greater => IntCode::Pos(Box::new(gen_uint(i as u64))), + Less => IntCode::Neg(Box::new(gen_uint(i.abs() as u64))), + Equal => IntCode::Zero, + } +} + +/// Computes the greatest common divisor of two integers. +fn gcdi(mut a: i64, mut b: i64) -> i64 { + a = a.abs(); + b = b.abs(); + + while a != 0 { + let tmp = b % a; + b = a; + a = tmp; + } + + b +} + +fn gcdu(mut a: u64, mut b: u64) -> u64 { + while a != 0 { + let tmp = b % a; + b = a; + a = tmp; + } + + b +} + +fn sign(i: i64) -> char { + use std::cmp::Ordering::*; + match i.cmp(&0) { + Greater => 'P', + Less => 'N', + Equal => '_', + } +} + +struct UIntTest { + a: u64, + op: &'static str, + b: Option, + r: u64, +} + +impl fmt::Display for UIntTest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.b { + Some(b) => write!( + f, + " +#[test] +#[allow(non_snake_case)] +fn test_{a}_{op}_{b}() {{ + type A = {gen_a}; + type B = {gen_b}; + type U{r} = {result}; + + #[allow(non_camel_case_types)] + type U{a}{op}U{b} = <>::Output as Same>::Output; + + assert_eq!(::to_u64(), ::to_u64()); +}}", + gen_a = gen_uint(self.a), + gen_b = gen_uint(b), + r = self.r, + result = gen_uint(self.r), + a = self.a, + b = b, + op = self.op + ), + None => write!( + f, + " +#[test] +#[allow(non_snake_case)] +fn test_{a}_{op}() {{ + type A = {gen_a}; + type U{r} = {result}; + + #[allow(non_camel_case_types)] + type {op}U{a} = <::Output as Same>::Output; + assert_eq!(<{op}U{a} as Unsigned>::to_u64(), ::to_u64()); +}}", + gen_a = gen_uint(self.a), + r = self.r, + result = gen_uint(self.r), + a = self.a, + op = self.op + ), + } + } +} + +fn uint_binary_test(left: u64, operator: &'static str, right: u64, result: u64) -> UIntTest { + UIntTest { + a: left, + op: operator, + b: Option::Some(right), + r: result, + } +} + +struct IntBinaryTest { + a: i64, + op: &'static str, + b: i64, + r: i64, +} + +impl fmt::Display for IntBinaryTest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + " +#[test] +#[allow(non_snake_case)] +fn test_{sa}{a}_{op}_{sb}{b}() {{ + type A = {gen_a}; + type B = {gen_b}; + type {sr}{r} = {result}; + + #[allow(non_camel_case_types)] + type {sa}{a}{op}{sb}{b} = <>::Output as Same<{sr}{r}>>::Output; + + assert_eq!(<{sa}{a}{op}{sb}{b} as Integer>::to_i64(), <{sr}{r} as Integer>::to_i64()); +}}", + gen_a = gen_int(self.a), + gen_b = gen_int(self.b), + r = self.r.abs(), + sr = sign(self.r), + result = gen_int(self.r), + a = self.a.abs(), + b = self.b.abs(), + sa = sign(self.a), + sb = sign(self.b), + op = self.op + ) + } +} + +fn int_binary_test(left: i64, operator: &'static str, right: i64, result: i64) -> IntBinaryTest { + IntBinaryTest { + a: left, + op: operator, + b: right, + r: result, + } +} + +struct IntUnaryTest { + op: &'static str, + a: i64, + r: i64, +} + +impl fmt::Display for IntUnaryTest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + " +#[test] +#[allow(non_snake_case)] +fn test_{sa}{a}_{op}() {{ + type A = {gen_a}; + type {sr}{r} = {result}; + + #[allow(non_camel_case_types)] + type {op}{sa}{a} = <::Output as Same<{sr}{r}>>::Output; + assert_eq!(<{op}{sa}{a} as Integer>::to_i64(), <{sr}{r} as Integer>::to_i64()); +}}", + gen_a = gen_int(self.a), + r = self.r.abs(), + sr = sign(self.r), + result = gen_int(self.r), + a = self.a.abs(), + sa = sign(self.a), + op = self.op + ) + } +} + +fn int_unary_test(operator: &'static str, num: i64, result: i64) -> IntUnaryTest { + IntUnaryTest { + op: operator, + a: num, + r: result, + } +} + +fn uint_cmp_test(a: u64, b: u64) -> String { + format!( + " +#[test] +#[allow(non_snake_case)] +fn test_{a}_Cmp_{b}() {{ + type A = {gen_a}; + type B = {gen_b}; + + #[allow(non_camel_case_types)] + type U{a}CmpU{b} = >::Output; + assert_eq!(::to_ordering(), Ordering::{result:?}); +}}", + a = a, + b = b, + gen_a = gen_uint(a), + gen_b = gen_uint(b), + result = a.cmp(&b) + ) +} + +fn int_cmp_test(a: i64, b: i64) -> String { + format!( + " +#[test] +#[allow(non_snake_case)] +fn test_{sa}{a}_Cmp_{sb}{b}() {{ + type A = {gen_a}; + type B = {gen_b}; + + #[allow(non_camel_case_types)] + type {sa}{a}Cmp{sb}{b} = >::Output; + assert_eq!(<{sa}{a}Cmp{sb}{b} as Ord>::to_ordering(), Ordering::{result:?}); +}}", + a = a.abs(), + b = b.abs(), + sa = sign(a), + sb = sign(b), + gen_a = gen_int(a), + gen_b = gen_int(b), + result = a.cmp(&b) + ) +} +pub fn gen_tests() -> String { + // will test all permutations of number pairs up to this (and down to its opposite for ints) + let high: i64 = 5; + + let uints = (0u64..high as u64 + 1).flat_map(|a| (a..a + 1).cycle().zip(0..high as u64 + 1)); + let ints = (-high..high + 1).flat_map(|a| (a..a + 1).cycle().zip(-high..high + 1)); + + let mut result = String::new(); + + result.push_str( + " +use typenum::*; +use core::ops::*; +use core::cmp::Ordering; +", + ); + // uint operators: + for (a, b) in uints { + let mut tests = vec![ + uint_binary_test(a, "BitAnd", b, a & b), + uint_binary_test(a, "BitOr", b, a | b), + uint_binary_test(a, "BitXor", b, a ^ b), + uint_binary_test(a, "Shl", b, a << b), + uint_binary_test(a, "Shr", b, a >> b), + uint_binary_test(a, "Add", b, a + b), + uint_binary_test(a, "Mul", b, a * b), + uint_binary_test(a, "Pow", b, a.pow(b as u32)), + uint_binary_test(a, "Min", b, cmp::min(a, b)), + uint_binary_test(a, "Max", b, cmp::max(a, b)), + uint_binary_test(a, "Gcd", b, gcdu(a, b)), + ]; + if a >= b { + tests.push(uint_binary_test(a, "Sub", b, a - b)); + } + if b != 0 { + tests.push(uint_binary_test(a, "Div", b, a / b)); + tests.push(uint_binary_test(a, "Rem", b, a % b)); + if a % b == 0 { + tests.push(uint_binary_test(a, "PartialDiv", b, a / b)); + } + } + + for test in tests { + result.push_str(&test.to_string()); + } + result.push_str(&uint_cmp_test(a, b)); + } + + // int operators: + for (a, b) in ints { + let mut tests = vec![ + int_binary_test(a, "Add", b, a + b), + int_binary_test(a, "Sub", b, a - b), + int_binary_test(a, "Mul", b, a * b), + int_binary_test(a, "Min", b, cmp::min(a, b)), + int_binary_test(a, "Max", b, cmp::max(a, b)), + int_binary_test(a, "Gcd", b, gcdi(a, b)), + ]; + if b != 0 { + tests.push(int_binary_test(a, "Div", b, a / b)); + tests.push(int_binary_test(a, "Rem", b, a % b)); + if a % b == 0 { + tests.push(int_binary_test(a, "PartialDiv", b, a / b)); + } + } + if b >= 0 || a.abs() == 1 { + let result = if b < 0 { + if a == 1 { + a + } else if a == -1 { + a.pow((-b) as u32) + } else { + unreachable!() + } + } else { + a.pow(b as u32) + }; + tests.push(int_binary_test(a, "Pow", b, result)); + } + for test in tests { + result.push_str(&test.to_string()); + } + result.push_str(&int_cmp_test(a, b)); + } + + // int unary operators: + for n in -high..high + 1 { + let tests = vec![ + int_unary_test("Neg", n, -n), + int_unary_test("Abs", n, n.abs()), + ]; + for test in tests { + result.push_str(&test.to_string()); + } + } + + result +} + +#[cfg_attr( + feature = "no_std", + deprecated( + since = "1.3.0", + note = "the `no_std` flag is no longer necessary and will be removed in the future" + ) +)] +pub fn no_std() {} + +#[cfg_attr( + feature = "force_unix_path_separator", + deprecated( + since = "1.17.0", + note = "the `force_unix_path_separator` flag is no longer necessary and will be removed in the future" + ) +)] +pub fn force_unix_path_separator() {} + +fn main() { + no_std(); + force_unix_path_separator(); + println!("cargo:rerun-if-changed=tests"); + + let tests = gen_tests(); + let out_dir = env::var("OUT_DIR").unwrap(); + let dest = PathBuf::from(out_dir).join("tests.rs"); + let mut f = File::create(&dest).unwrap(); + f.write_all(tests.as_bytes()).unwrap(); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..a5ff089174e713d9089b17869a9ca90d7f205536 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "3101cfd168190b60bf445dee32c5a20bd74996e0" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..345b2e46780ca5d9170acb0050f4fdcd08686a0c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/.gitignore @@ -0,0 +1,3 @@ +/scripts/CaseFolding.txt +/target +/Cargo.lock diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..fa48db98fec564b25ea652ffce7a690c40f75844 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "unicase" +version = "2.9.0" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..32bf2580623a6203270a374726759079392e3f7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml @@ -0,0 +1,47 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "unicase" +version = "2.9.0" +authors = ["Sean McArthur "] +build = false +exclude = ["scripts/*"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A case-insensitive wrapper around strings." +documentation = "https://docs.rs/unicase" +readme = "README.md" +keywords = [ + "lowercase", + "case", + "case-insensitive", + "case-folding", + "no_std", +] +categories = [ + "internationalization", + "text-processing", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/seanmonstar/unicase" + +[features] +nightly = [] + +[lib] +name = "unicase" +path = "src/lib.rs" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..a07479edf310a7cc2316095518865933ddd8dca4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/Cargo.toml.orig @@ -0,0 +1,20 @@ +[package] + +name = "unicase" +version = "2.9.0" # don't forget to update html_root_url +authors = ["Sean McArthur "] +description = "A case-insensitive wrapper around strings." +repository = "https://github.com/seanmonstar/unicase" +documentation = "https://docs.rs/unicase" +license = "MIT OR Apache-2.0" +readme = "README.md" +keywords = ["lowercase", "case", "case-insensitive", "case-folding", "no_std"] +categories = ["internationalization", "text-processing", "no-std"] +edition = "2018" + +exclude = [ + "scripts/*" +] + +[features] +nightly = [] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..f14adbf879b8f4f801795c6c8775afc1235252f6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/LICENSE-MIT @@ -0,0 +1,20 @@ +Copyright (c) 2014-2026 Sean McArthur + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f145e13ff86103fc2985c11ef0d970a298817fc0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/unicase-2.9.0/README.md @@ -0,0 +1,32 @@ +# unicase + +[![crates.io](https://img.shields.io/crates/v/unicase.svg)](https://crates.io/crates/unicase) +[![Released API docs](https://docs.rs/unicase/badge.svg)](https://docs.rs/unicase) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) +[![CI](https://github.com/seanmonstar/unicase/workflows/CI/badge.svg)](https://github.com/seanmonstar/unicase/actions?query=workflow%3ACI) + +Compare strings when case is not important (using Unicode Case-folding). + +```rust +// ignore ASCII case +let a = UniCase::new("foobar"); +let b = UniCase::new("FOOBAR"); + +assert_eq!(a, b); + +// using unicode case-folding +let c = UniCase::new("Maße") +let d = UniCase::new("MASSE"); +assert_eq!(c, d); +``` + +## License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..2d3b66974951b1bb21abe3b8c1ff124ff9a8848c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "6b604ea2365bd535fb66eb4bfe92a9e00e333090" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..4fffb2f89cbd8f2169ce9914bd16bd43785bb368 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/COPYRIGHT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/COPYRIGHT new file mode 100644 index 0000000000000000000000000000000000000000..b5e94574045c0a6ee8bde420247de15f0de8cc8d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/COPYRIGHT @@ -0,0 +1,42 @@ +Copyright Mozilla Foundation + +Licensed under the Apache License (Version 2.0), or the MIT license, +(the "Licenses") at your option. You may not use this file except in +compliance with one of the Licenses. You may obtain copies of the +Licenses at: + + https://www.apache.org/licenses/LICENSE-2.0 + https://opensource.org/licenses/MIT + +Unless required by applicable law or agreed to in writing, software +distributed under the Licenses is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the Licenses for the specific language governing permissions and +limitations under the Licenses. + +-- + +Test code is dedicated to the Public Domain when so designated (see +the individual files for PD/CC0-dedicated sections). + +-- + +The implementation for Utf8CharIndices was adapted from the +CharIndices implementation of the Rust standard library at revision +ab32548539ec38a939c1b58599249f3b54130026 +(https://github.com/rust-lang/rust/blob/ab32548539ec38a939c1b58599249f3b54130026/library/core/src/str/iter.rs). + +Excerpt from https://github.com/rust-lang/rust/blob/ab32548539ec38a939c1b58599249f3b54130026/COPYRIGHT , +which refers to +https://github.com/rust-lang/rust/blob/ab32548539ec38a939c1b58599249f3b54130026/LICENSE-APACHE +and +https://github.com/rust-lang/rust/blob/ab32548539ec38a939c1b58599249f3b54130026/LICENSE-MIT +: + +For full authorship information, see the version control history or +https://thanks.rust-lang.org + +Except as otherwise noted (below and/or in individual files), Rust is +licensed under the Apache License, Version 2.0 or + or the MIT license + or , at your option. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d1008fdb4441b396e234284878c5f323d03b2e11 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "utf8_iter" +version = "1.0.4" +authors = ["Henri Sivonen "] +description = "Iterator by char over potentially-invalid UTF-8 in &[u8]" +homepage = "https://docs.rs/utf8_iter/" +documentation = "https://docs.rs/utf8_iter/" +readme = "README.md" +keywords = [ + "encoding", + "UTF-8", + "unicode", + "iterator", +] +categories = [ + "text-processing", + "encoding", + "internationalization", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/hsivonen/utf8_iter" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..263d2d87db4c1aaa72059213c89f03bd3bfd0e29 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/Cargo.toml.orig @@ -0,0 +1,13 @@ +[package] +name = "utf8_iter" +version = "1.0.4" +edition = "2021" +description = "Iterator by char over potentially-invalid UTF-8 in &[u8]" +authors = ["Henri Sivonen "] +license = "Apache-2.0 OR MIT" +readme = "README.md" +documentation = "https://docs.rs/utf8_iter/" +homepage = "https://docs.rs/utf8_iter/" +repository = "https://github.com/hsivonen/utf8_iter" +keywords = ["encoding", "UTF-8", "unicode", "iterator"] +categories = ["text-processing", "encoding", "internationalization"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-APACHE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..3317c82e2f677cf6d16b1ac57672bfdab2b15066 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright Mozilla Foundation + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/README.md new file mode 100644 index 0000000000000000000000000000000000000000..08334d4db82302e01b9fb1284a61d84b34da0dba --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/utf8_iter-1.0.4/README.md @@ -0,0 +1,56 @@ +# utf8_iter + +[![crates.io](https://img.shields.io/crates/v/utf8_iter.svg)](https://crates.io/crates/utf8_iter) +[![docs.rs](https://docs.rs/utf8_iter/badge.svg)](https://docs.rs/utf8_iter/) + +utf8_iter provides iteration by `char` over potentially-invalid UTF-8 `&[u8]` +such that UTF-8 errors are handled according to the WHATWG Encoding Standard. + +Iteration by `Result` is provided as an alternative that +distinguishes UTF-8 errors from U+FFFD appearing in the input. + +An implementation of `char_indices()` analogous to the same-name method on +`str` is also provided. + +Key parts of the code are copypaste from the UTF-8 to UTF-16 conversion code +in `encoding_rs`, which was optimized for speed in the case of valid input. +The implementation here uses the structure that was found to be fast in the +`encoding_rs` context but the structure hasn't been benchmarked in this +context. + +This is a `no_std` crate. + +## Licensing + +TL;DR: `Apache-2.0 OR MIT` + +Please see the file named +[COPYRIGHT](https://github.com/hsivonen/utf8_iter/blob/master/COPYRIGHT). + +## Documentation + +Generated [API documentation](https://docs.rs/utf8_iter/) is available +online. + +## Release Notes + +### 1.0.4 + +* Add iteration by `Result`. + +### 1.0.3 + +* Fix an error in documentation. + +### 1.0.2 + +* `char_indices()` implementation. + +### 1.0.1 + +* `as_slice()` method. +* Implement `DoubleEndedIterator` + +### 1.0.0 + +The initial release. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..75cd5c64df0cbb65e69d753880e3f5293e0e214e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "f136a81add9706ed0b36ac3c4b0da943de8e690d" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitignore b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..693699042b1a8ccf697636d3cd34b200f3a8278b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitignore @@ -0,0 +1,3 @@ +/target +**/*.rs.bk +Cargo.lock diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitmodules b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..bd8a9ff20a4fa7ecd9844688e420fd8c3df90f96 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/.gitmodules @@ -0,0 +1,3 @@ +[submodule "crates/witx-bindgen/WASI"] + path = crates/witx-bindgen/WASI + url = https://github.com/WebAssembly/WASI diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..5c5ebdd259703deba694ae25da117e801018cfe8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CODE_OF_CONDUCT.md @@ -0,0 +1,49 @@ +# Contributor Covenant Code of Conduct + +*Note*: this Code of Conduct pertains to individuals' behavior. Please also see the [Organizational Code of Conduct][OCoC]. + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the Bytecode Alliance CoC team at [report@bytecodealliance.org](mailto:report@bytecodealliance.org). The CoC team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The CoC team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the Bytecode Alliance's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[OCoC]: ORG_CODE_OF_CONDUCT.md +[homepage]: https://www.contributor-covenant.org +[version]: https://www.contributor-covenant.org/version/1/4/ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CONTRIBUTING.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..2db6d0ddf3b6cac8b8cb46f4779aec83e0c014c9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/CONTRIBUTING.md @@ -0,0 +1,8 @@ +# Contributing to wasi-core + +wasi-core follows the same development style as Cranelift, so checkout +[Cranelift's CONTRIBUTING.md]. Of course, for wasi-core-specific issues, please +use the [wasi-core issue tracker]. + +[Cranelift's CONTRIBUTING.md]: https://github.com/CraneStation/cranelift/blob/master/CONTRIBUTING.md +[wasi-core issue tracker]: https://github.com/CraneStation/wasi-core/issues/new diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..5b2d914c6ffe104740030f65d33a92b9ca67a1a2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.lock @@ -0,0 +1,23 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "rustc-std-workspace-alloc" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff66d57013a5686e1917ed6a025d54dd591fcda71a41fe07edf4d16726aefa86" + +[[package]] +name = "rustc-std-workspace-core" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +dependencies = [ + "rustc-std-workspace-alloc", + "rustc-std-workspace-core", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..72c25fcf43f07d47ce3c2e16b4dd711cd6ae6a98 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml @@ -0,0 +1,59 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +authors = ["The Cranelift Project Developers"] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Experimental WASI API bindings for Rust" +documentation = "https://docs.rs/wasi" +readme = "README.md" +keywords = [ + "webassembly", + "wasm", +] +categories = [ + "no-std", + "wasm", +] +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +repository = "https://github.com/bytecodealliance/wasi" + +[badges.maintenance] +status = "experimental" + +[features] +default = ["std"] +rustc-dep-of-std = [ + "core", + "rustc-std-workspace-alloc", +] +std = [] + +[lib] +name = "wasi" +path = "src/lib.rs" + +[dependencies.core] +version = "1.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.rustc-std-workspace-alloc] +version = "1.0" +optional = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..21d6a610380215beac883f2f1ce8443e0b910817 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/Cargo.toml.orig @@ -0,0 +1,29 @@ +[package] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +authors = ["The Cranelift Project Developers"] +license = "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" +description = "Experimental WASI API bindings for Rust" +edition = "2018" +categories = ["no-std", "wasm"] +keywords = ["webassembly", "wasm"] +repository = "https://github.com/bytecodealliance/wasi" +readme = "README.md" +documentation = "https://docs.rs/wasi" + +[workspace] +members = ['crates/witx-bindgen', 'crates/wasi-ephemeral'] + +[dependencies] +# When built as part of libstd +core = { version = "1.0", optional = true, package = "rustc-std-workspace-core" } +rustc-std-workspace-alloc = { version = "1.0", optional = true } + +[features] +default = ["std"] +std = [] +# Unstable feature to support being a libstd dependency +rustc-dep-of-std = ["core", "rustc-std-workspace-alloc"] + +[badges] +maintenance = { status = "experimental" } diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-APACHE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-APACHE new file mode 100644 index 0000000000000000000000000000000000000000..16fe87b06e802f094b3fbb0894b137bca2b16ef1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-Apache-2.0_WITH_LLVM-exception b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-Apache-2.0_WITH_LLVM-exception new file mode 100644 index 0000000000000000000000000000000000000000..f9d81955f4bcb8f96a025e2ecc46f39ec536d465 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-Apache-2.0_WITH_LLVM-exception @@ -0,0 +1,220 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +--- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-MIT b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-MIT new file mode 100644 index 0000000000000000000000000000000000000000..31aa79387f27e730e33d871925e152e35e428031 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/ORG_CODE_OF_CONDUCT.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/ORG_CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..6f4fb3f537d154768878020bbbb7fc2897956066 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/ORG_CODE_OF_CONDUCT.md @@ -0,0 +1,143 @@ +# Bytecode Alliance Organizational Code of Conduct (OCoC) + +*Note*: this Code of Conduct pertains to organizations' behavior. Please also see the [Individual Code of Conduct](CODE_OF_CONDUCT.md). + +## Preamble + +The Bytecode Alliance (BA) welcomes involvement from organizations, +including commercial organizations. This document is an +*organizational* code of conduct, intended particularly to provide +guidance to commercial organizations. It is distinct from the +[Individual Code of Conduct (ICoC)](CODE_OF_CONDUCT.md), and does not +replace the ICoC. This OCoC applies to any group of people acting in +concert as a BA member or as a participant in BA activities, whether +or not that group is formally incorporated in some jurisdiction. + +The code of conduct described below is not a set of rigid rules, and +we did not write it to encompass every conceivable scenario that might +arise. For example, it is theoretically possible there would be times +when asserting patents is in the best interest of the BA community as +a whole. In such instances, consult with the BA, strive for +consensus, and interpret these rules with an intent that is generous +to the community the BA serves. + +While we may revise these guidelines from time to time based on +real-world experience, overall they are based on a simple principle: + +*Bytecode Alliance members should observe the distinction between + public community functions and private functions — especially + commercial ones — and should ensure that the latter support, or at + least do not harm, the former.* + +## Guidelines + + * **Do not cause confusion about Wasm standards or interoperability.** + + Having an interoperable WebAssembly core is a high priority for + the BA, and members should strive to preserve that core. It is fine + to develop additional non-standard features or APIs, but they + should always be clearly distinguished from the core interoperable + Wasm. + + Treat the WebAssembly name and any BA-associated names with + respect, and follow BA trademark and branding guidelines. If you + distribute a customized version of software originally produced by + the BA, or if you build a product or service using BA-derived + software, use names that clearly distinguish your work from the + original. (You should still provide proper attribution to the + original, of course, wherever such attribution would normally be + given.) + + Further, do not use the WebAssembly name or BA-associated names in + other public namespaces in ways that could cause confusion, e.g., + in company names, names of commercial service offerings, domain + names, publicly-visible social media accounts or online service + accounts, etc. It may sometimes be reasonable, however, to + register such a name in a new namespace and then immediately donate + control of that account to the BA, because that would help the project + maintain its identity. + + For further guidance, see the BA Trademark and Branding Policy + [TODO: create policy, then insert link]. + + * **Do not restrict contributors.** If your company requires + employees or contractors to sign non-compete agreements, those + agreements must not prevent people from participating in the BA or + contributing to related projects. + + This does not mean that all non-compete agreements are incompatible + with this code of conduct. For example, a company may restrict an + employee's ability to solicit the company's customers. However, an + agreement must not block any form of technical or social + participation in BA activities, including but not limited to the + implementation of particular features. + + The accumulation of experience and expertise in individual persons, + who are ultimately free to direct their energy and attention as + they decide, is one of the most important drivers of progress in + open source projects. A company that limits this freedom may hinder + the success of the BA's efforts. + + * **Do not use patents as offensive weapons.** If any BA participant + prevents the adoption or development of BA technologies by + asserting its patents, that undermines the purpose of the + coalition. The collaboration fostered by the BA cannot include + members who act to undermine its work. + + * **Practice responsible disclosure** for security vulnerabilities. + Use designated, non-public reporting channels to disclose technical + vulnerabilities, and give the project a reasonable period to + respond, remediate, and patch. [TODO: optionally include the + security vulnerability reporting URL here.] + + Vulnerability reporters may patch their company's own offerings, as + long as that patching does not significantly delay the reporting of + the vulnerability. Vulnerability information should never be used + for unilateral commercial advantage. Vendors may legitimately + compete on the speed and reliability with which they deploy + security fixes, but withholding vulnerability information damages + everyone in the long run by risking harm to the BA project's + reputation and to the security of all users. + + * **Respect the letter and spirit of open source practice.** While + there is not space to list here all possible aspects of standard + open source practice, some examples will help show what we mean: + + * Abide by all applicable open source license terms. Do not engage + in copyright violation or misattribution of any kind. + + * Do not claim others' ideas or designs as your own. + + * When others engage in publicly visible work (e.g., an upcoming + demo that is coordinated in a public issue tracker), do not + unilaterally announce early releases or early demonstrations of + that work ahead of their schedule in order to secure private + advantage (such as marketplace advantage) for yourself. + + The BA reserves the right to determine what constitutes good open + source practices and to take action as it deems appropriate to + encourage, and if necessary enforce, such practices. + +## Enforcement + +Instances of organizational behavior in violation of the OCoC may +be reported by contacting the Bytecode Alliance CoC team at +[report@bytecodealliance.org](mailto:report@bytecodealliance.org). The +CoC team will review and investigate all complaints, and will respond +in a way that it deems appropriate to the circumstances. The CoC team +is obligated to maintain confidentiality with regard to the reporter of +an incident. Further details of specific enforcement policies may be +posted separately. + +When the BA deems an organization in violation of this OCoC, the BA +will, at its sole discretion, determine what action to take. The BA +will decide what type, degree, and duration of corrective action is +needed, if any, before a violating organization can be considered for +membership (if it was not already a member) or can have its membership +reinstated (if it was a member and the BA canceled its membership due +to the violation). + +In practice, the BA's first approach will be to start a conversation, +with punitive enforcement used only as a last resort. Violations +often turn out to be unintentional and swiftly correctable with all +parties acting in good faith. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..801f56a4e023c55e06882611c7b41eb61b9efe0b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/README.md @@ -0,0 +1,94 @@ +

+

wasi

+ +A
Bytecode Alliance project + +

+ WASI API Bindings for Rust +

+ +

+ Crates.io version + Download + docs.rs docs +

+
+ +This crate contains API bindings for [WASI](https://github.com/WebAssembly/WASI) +system calls in Rust, and currently reflects the `wasi_snapshot_preview1` +module. This crate is quite low-level and provides conceptually a "system call" +interface. In most settings, it's better to use the Rust standard library, which +has WASI support. + +The `wasi` crate is also entirely procedurally generated from the `*.witx` files +describing the WASI apis. While some conveniences are provided the bindings here +are intentionally low-level! + +# Usage + +First you can depend on this crate via `Cargo.toml`: + +```toml +[dependencies] +wasi = "0.8.0" +``` + +Next you can use the APIs in the root of the module like so: + +```rust +fn main() { + let stdout = 1; + let message = "Hello, World!\n"; + let data = [wasi::Ciovec { + buf: message.as_ptr(), + buf_len: message.len(), + }]; + wasi::fd_write(stdout, &data).unwrap(); +} +``` + +Next you can use a tool like [`cargo +wasi`](https://github.com/bytecodealliance/cargo-wasi) to compile and run your +project: + +To compile Rust projects to wasm using WASI, use the `wasm32-wasi` target, +like this: + +``` +$ cargo wasi run + Compiling wasi v0.8.0+wasi-snapshot-preview1 + Compiling wut v0.1.0 (/code) + Finished dev [unoptimized + debuginfo] target(s) in 0.34s + Running `/.cargo/bin/cargo-wasi target/wasm32-wasi/debug/wut.wasm` + Running `target/wasm32-wasi/debug/wut.wasm` +Hello, World! +``` + +# Development + +The bulk of the `wasi` crate is generated by the `witx-bindgen` tool, which lives at +`crates/witx-bindgen` and is part of the cargo workspace. + +The `src/lib_generated.rs` file can be re-generated with the following +command: + +``` +cargo run -p witx-bindgen -- crates/witx-bindgen/WASI/phases/snapshot/witx/wasi_snapshot_preview1.witx > src/lib_generated.rs +``` + +Note that this uses the WASI standard repository as a submodule. If you do not +have this submodule present in your source tree, run: +``` +git submodule update --init +``` + +# License + +This project is licensed under the Apache 2.0 license with the LLVM exception. +See [LICENSE](LICENSE) for more details. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this project by you, as defined in the Apache-2.0 license, +shall be licensed as above, without any additional terms or conditions. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/SECURITY.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..3513b9cb35734dd840996b5cded145e51c94ede0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wasi-0.11.1+wasi-snapshot-preview1/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +Building secure foundations for software development is at the core of what we do in the Bytecode Alliance. Contributions of external security researchers are a vital part of that. + +## Scope + +If you believe you've found a security issue in any website, service, or software owned or operated by the Bytecode Alliance, we encourage you to notify us. + +## How to Submit a Report + +To submit a vulnerability report to the Bytecode Alliance, please contact us at [security@bytecodealliance.org](mailto:security@bytecodealliance.org). Your submission will be reviewed and validated by a member of our security team. + +## Safe Harbor + +The Bytecode Alliance supports safe harbor for security researchers who: + +* Make a good faith effort to avoid privacy violations, destruction of data, and interruption or degradation of our services. +* Only interact with accounts you own or with explicit permission of the account holder. If you do encounter Personally Identifiable Information (PII) contact us immediately, do not proceed with access, and immediately purge any local information. +* Provide us with a reasonable amount of time to resolve vulnerabilities prior to any disclosure to the public or a third-party. + +We will consider activities conducted consistent with this policy to constitute "authorized" conduct and will not pursue civil action or initiate a complaint to law enforcement. We will help to the extent we can if legal action is initiated by a third party against you. + +Please submit a report to us before engaging in conduct that may be inconsistent with or unaddressed by this policy. + +## Preferences + +* Please provide detailed reports with reproducible steps and a clearly defined impact. +* Submit one vulnerability per report. +* Social engineering (e.g. phishing, vishing, smishing) is prohibited. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/FUNDING.yml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..2869fec98f72fbe4b7ea36d5128ee1650cb34370 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [BurntSushi] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/workflows/ci.yml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/workflows/ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..eeb51475c38ff077e32c55988447ead50fe41fb9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/.github/workflows/ci.yml @@ -0,0 +1,87 @@ +name: ci +on: + pull_request: + branches: + - master + push: + branches: + - master + schedule: + - cron: '00 01 * * *' + +# The section is needed to drop write-all permissions that are granted on +# `schedule` event. By specifying any permission explicitly all others are set +# to none. By using the principle of least privilege the damage a compromised +# workflow can do (because of an injection or compromised third party tool or +# action) is restricted. Currently the worklow doesn't need any additional +# permission except for pulling the code. Adding labels to issues, commenting +# on pull-requests, etc. may need additional permissions: +# +# Syntax for this section: +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions +# +# Reference for how to assign permissions on a job-by-job basis: +# https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs +# +# Reference for available permissions that we can enable if needed: +# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token +permissions: + # to fetch code (actions/checkout) + contents: read + +jobs: + test: + name: test + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - build: pinned + os: windows-latest + rust: 1.72.0 + - build: stable + os: windows-latest + rust: stable + - build: beta + os: windows-latest + rust: beta + - build: nightly + os: windows-latest + rust: nightly + - build: win-gnu + os: windows-latest + rust: stable-x86_64-gnu + - build: linux + os: ubuntu-latest + rust: stable + - build: macos + os: macos-latest + rust: stable + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - run: cargo build --verbose + - run: cargo doc --verbose + - run: cargo test --verbose + - name: Show all computer names + run: cargo test --lib sysinfo::tests::itworks -- --nocapture + + rustfmt: + name: rustfmt + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: stable + components: rustfmt + - name: Check formatting + run: | + cargo fmt --all -- --check diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/console.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/console.rs new file mode 100644 index 0000000000000000000000000000000000000000..9d68e5ac204c974fd4bb5ba72b13e0a6d88fade8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/console.rs @@ -0,0 +1,407 @@ +use std::{io, mem}; + +use windows_sys::Win32::Foundation::HANDLE; +use windows_sys::Win32::System::Console::{GetConsoleMode, SetConsoleMode}; +use windows_sys::Win32::System::Console::{ + GetConsoleScreenBufferInfo, SetConsoleTextAttribute, + CONSOLE_SCREEN_BUFFER_INFO, ENABLE_VIRTUAL_TERMINAL_PROCESSING, + FOREGROUND_BLUE, FOREGROUND_GREEN, FOREGROUND_INTENSITY, FOREGROUND_RED, +}; + +use crate::{AsHandleRef, HandleRef}; + +use FOREGROUND_BLUE as FG_BLUE; +use FOREGROUND_GREEN as FG_GREEN; +use FOREGROUND_INTENSITY as FG_INTENSITY; +use FOREGROUND_RED as FG_RED; + +const FG_CYAN: u16 = FG_BLUE | FG_GREEN; +const FG_MAGENTA: u16 = FG_BLUE | FG_RED; +const FG_YELLOW: u16 = FG_GREEN | FG_RED; +const FG_WHITE: u16 = FG_BLUE | FG_GREEN | FG_RED; + +/// Query the given handle for information about the console's screen buffer. +/// +/// The given handle should represent a console. Otherwise, an error is +/// returned. +/// +/// This corresponds to calling [`GetConsoleScreenBufferInfo`]. +/// +/// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo +pub fn screen_buffer_info( + h: H, +) -> io::Result { + unsafe { + let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); + let rc = GetConsoleScreenBufferInfo(h.as_raw() as HANDLE, &mut info); + if rc == 0 { + return Err(io::Error::last_os_error()); + } + Ok(ScreenBufferInfo(info)) + } +} + +/// Set the text attributes of the console represented by the given handle. +/// +/// This corresponds to calling [`SetConsoleTextAttribute`]. +/// +/// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute +pub fn set_text_attributes( + h: H, + attributes: u16, +) -> io::Result<()> { + if unsafe { SetConsoleTextAttribute(h.as_raw() as HANDLE, attributes) } + == 0 + { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } +} + +/// Query the mode of the console represented by the given handle. +/// +/// This corresponds to calling [`GetConsoleMode`], which describes the return +/// value. +/// +/// [`GetConsoleMode`]: https://docs.microsoft.com/en-us/windows/console/getconsolemode +pub fn mode(h: H) -> io::Result { + let mut mode = 0; + if unsafe { GetConsoleMode(h.as_raw() as HANDLE, &mut mode) } == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(mode) + } +} + +/// Set the mode of the console represented by the given handle. +/// +/// This corresponds to calling [`SetConsoleMode`], which describes the format +/// of the mode parameter. +/// +/// [`SetConsoleMode`]: https://docs.microsoft.com/en-us/windows/console/setconsolemode +pub fn set_mode(h: H, mode: u32) -> io::Result<()> { + if unsafe { SetConsoleMode(h.as_raw() as HANDLE, mode) } == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } +} + +/// Represents console screen buffer information such as size, cursor position +/// and styling attributes. +/// +/// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`]. +/// +/// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +#[derive(Clone)] +pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO); + +impl ScreenBufferInfo { + /// Returns the size of the console screen buffer, in character columns and + /// rows. + /// + /// This corresponds to `dwSize`. + pub fn size(&self) -> (i16, i16) { + (self.0.dwSize.X, self.0.dwSize.Y) + } + + /// Returns the position of the cursor in terms of column and row + /// coordinates of the console screen buffer. + /// + /// This corresponds to `dwCursorPosition`. + pub fn cursor_position(&self) -> (i16, i16) { + (self.0.dwCursorPosition.X, self.0.dwCursorPosition.Y) + } + + /// Returns the character attributes associated with this console. + /// + /// This corresponds to `wAttributes`. + /// + /// See [`char info`] for more details. + /// + /// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str + pub fn attributes(&self) -> u16 { + self.0.wAttributes + } + + /// Returns the maximum size of the console window, in character columns + /// and rows, given the current screen buffer size and font and the screen + /// size. + pub fn max_window_size(&self) -> (i16, i16) { + (self.0.dwMaximumWindowSize.X, self.0.dwMaximumWindowSize.Y) + } + + /// Returns the console screen buffer coordinates of the upper-left and + /// lower-right corners of the display window. + /// + /// This corresponds to `srWindow`. + pub fn window_rect(&self) -> SmallRect { + SmallRect { + left: self.0.srWindow.Left, + top: self.0.srWindow.Top, + right: self.0.srWindow.Right, + bottom: self.0.srWindow.Bottom, + } + } +} + +/// Defines the coordinates of the upper left and lower right corners of a rectangle. +/// +/// This corresponds to [`SMALL_RECT`]. +/// +/// [`SMALL_RECT`]: https://docs.microsoft.com/en-us/windows/console/small-rect-str +pub struct SmallRect { + pub left: i16, + pub top: i16, + pub right: i16, + pub bottom: i16, +} + +/// A Windows console. +/// +/// This represents a very limited set of functionality available to a Windows +/// console. In particular, it can only change text attributes such as color +/// and intensity. This may grow over time. If you need more routines, please +/// file an issue and/or PR. +/// +/// There is no way to "write" to this console. Simply write to +/// stdout or stderr instead, while interleaving instructions to the console +/// to change text attributes. +/// +/// A common pitfall when using a console is to forget to flush writes to +/// stdout before setting new text attributes. +/// +/// # Example +/// ```no_run +/// # #[cfg(windows)] +/// # { +/// use winapi_util::console::{Console, Color, Intense}; +/// +/// let mut con = Console::stdout().unwrap(); +/// con.fg(Intense::Yes, Color::Cyan).unwrap(); +/// println!("This text will be intense cyan."); +/// con.reset().unwrap(); +/// println!("This text will be normal."); +/// # } +/// ``` +#[derive(Debug)] +pub struct Console { + kind: HandleKind, + start_attr: TextAttributes, + cur_attr: TextAttributes, +} + +#[derive(Clone, Copy, Debug)] +enum HandleKind { + Stdout, + Stderr, +} + +impl HandleKind { + fn handle(&self) -> HandleRef { + match *self { + HandleKind::Stdout => HandleRef::stdout(), + HandleKind::Stderr => HandleRef::stderr(), + } + } +} + +impl Console { + /// Get a console for a standard I/O stream. + fn create_for_stream(kind: HandleKind) -> io::Result { + let h = kind.handle(); + let info = screen_buffer_info(&h)?; + let attr = TextAttributes::from_word(info.attributes()); + Ok(Console { kind, start_attr: attr, cur_attr: attr }) + } + + /// Create a new Console to stdout. + /// + /// If there was a problem creating the console, then an error is returned. + pub fn stdout() -> io::Result { + Self::create_for_stream(HandleKind::Stdout) + } + + /// Create a new Console to stderr. + /// + /// If there was a problem creating the console, then an error is returned. + pub fn stderr() -> io::Result { + Self::create_for_stream(HandleKind::Stderr) + } + + /// Applies the current text attributes. + fn set(&mut self) -> io::Result<()> { + set_text_attributes(self.kind.handle(), self.cur_attr.to_word()) + } + + /// Apply the given intensity and color attributes to the console + /// foreground. + /// + /// If there was a problem setting attributes on the console, then an error + /// is returned. + pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> { + self.cur_attr.fg_color = color; + self.cur_attr.fg_intense = intense; + self.set() + } + + /// Apply the given intensity and color attributes to the console + /// background. + /// + /// If there was a problem setting attributes on the console, then an error + /// is returned. + pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> { + self.cur_attr.bg_color = color; + self.cur_attr.bg_intense = intense; + self.set() + } + + /// Reset the console text attributes to their original settings. + /// + /// The original settings correspond to the text attributes on the console + /// when this `Console` value was created. + /// + /// If there was a problem setting attributes on the console, then an error + /// is returned. + pub fn reset(&mut self) -> io::Result<()> { + self.cur_attr = self.start_attr; + self.set() + } + + /// Toggle virtual terminal processing. + /// + /// This method attempts to toggle virtual terminal processing for this + /// console. If there was a problem toggling it, then an error returned. + /// On success, the caller may assume that toggling it was successful. + /// + /// When virtual terminal processing is enabled, characters emitted to the + /// console are parsed for VT100 and similar control character sequences + /// that control color and other similar operations. + pub fn set_virtual_terminal_processing( + &mut self, + yes: bool, + ) -> io::Result<()> { + let vt = ENABLE_VIRTUAL_TERMINAL_PROCESSING; + + let handle = self.kind.handle(); + let old_mode = mode(&handle)?; + let new_mode = if yes { old_mode | vt } else { old_mode & !vt }; + if old_mode == new_mode { + return Ok(()); + } + set_mode(&handle, new_mode) + } +} + +/// A representation of text attributes for the Windows console. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +struct TextAttributes { + fg_color: Color, + fg_intense: Intense, + bg_color: Color, + bg_intense: Intense, +} + +impl TextAttributes { + fn to_word(&self) -> u16 { + let mut w = 0; + w |= self.fg_color.to_fg(); + w |= self.fg_intense.to_fg(); + w |= self.bg_color.to_bg(); + w |= self.bg_intense.to_bg(); + w + } + + fn from_word(word: u16) -> TextAttributes { + TextAttributes { + fg_color: Color::from_fg(word), + fg_intense: Intense::from_fg(word), + bg_color: Color::from_bg(word), + bg_intense: Intense::from_bg(word), + } + } +} + +/// Whether to use intense colors or not. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Intense { + Yes, + No, +} + +impl Intense { + fn to_bg(&self) -> u16 { + self.to_fg() << 4 + } + + fn from_bg(word: u16) -> Intense { + Intense::from_fg(word >> 4) + } + + fn to_fg(&self) -> u16 { + match *self { + Intense::No => 0, + Intense::Yes => FG_INTENSITY, + } + } + + fn from_fg(word: u16) -> Intense { + if word & FG_INTENSITY > 0 { + Intense::Yes + } else { + Intense::No + } + } +} + +/// The set of available colors for use with a Windows console. +#[allow(missing_docs)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum Color { + Black, + Blue, + Green, + Red, + Cyan, + Magenta, + Yellow, + White, +} + +impl Color { + fn to_bg(&self) -> u16 { + self.to_fg() << 4 + } + + fn from_bg(word: u16) -> Color { + Color::from_fg(word >> 4) + } + + fn to_fg(&self) -> u16 { + match *self { + Color::Black => 0, + Color::Blue => FG_BLUE, + Color::Green => FG_GREEN, + Color::Red => FG_RED, + Color::Cyan => FG_CYAN, + Color::Magenta => FG_MAGENTA, + Color::Yellow => FG_YELLOW, + Color::White => FG_WHITE, + } + } + + fn from_fg(word: u16) -> Color { + match word & 0b111 { + FG_BLUE => Color::Blue, + FG_GREEN => Color::Green, + FG_RED => Color::Red, + FG_CYAN => Color::Cyan, + FG_MAGENTA => Color::Magenta, + FG_YELLOW => Color::Yellow, + FG_WHITE => Color::White, + _ => Color::Black, + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/file.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/file.rs new file mode 100644 index 0000000000000000000000000000000000000000..67abfcf8916ad3f3b7954e2403193aace8b59423 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/file.rs @@ -0,0 +1,166 @@ +use std::{io, mem}; + +use windows_sys::Win32::Foundation::HANDLE; +use windows_sys::Win32::Foundation::{GetLastError, FILETIME, NO_ERROR}; +use windows_sys::Win32::Storage::FileSystem::{ + GetFileInformationByHandle, GetFileType, BY_HANDLE_FILE_INFORMATION, + FILE_ATTRIBUTE_HIDDEN, +}; + +use crate::AsHandleRef; + +/// Return various pieces of information about a file. +/// +/// This includes information such as a file's size, unique identifier and +/// time related fields. +/// +/// This corresponds to calling [`GetFileInformationByHandle`]. +/// +/// [`GetFileInformationByHandle`]: https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-getfileinformationbyhandle +pub fn information(h: H) -> io::Result { + unsafe { + let mut info: BY_HANDLE_FILE_INFORMATION = mem::zeroed(); + let rc = GetFileInformationByHandle(h.as_raw() as HANDLE, &mut info); + if rc == 0 { + return Err(io::Error::last_os_error()); + }; + Ok(Information(info)) + } +} + +/// Returns the file type of the given handle. +/// +/// If there was a problem querying the file type, then an error is returned. +/// +/// This corresponds to calling [`GetFileType`]. +/// +/// [`GetFileType`]: https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-getfiletype +pub fn typ(h: H) -> io::Result { + unsafe { + let rc = GetFileType(h.as_raw() as HANDLE); + if rc == 0 && GetLastError() != NO_ERROR { + return Err(io::Error::last_os_error()); + } + Ok(Type(rc)) + } +} + +/// Returns true if and only if the given file attributes contain the +/// `FILE_ATTRIBUTE_HIDDEN` attribute. +pub fn is_hidden(file_attributes: u64) -> bool { + file_attributes & (FILE_ATTRIBUTE_HIDDEN as u64) > 0 +} + +/// Represents file information such as creation time, file size, etc. +/// +/// This wraps a [`BY_HANDLE_FILE_INFORMATION`]. +/// +/// [`BY_HANDLE_FILE_INFORMATION`]: https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/ns-fileapi-_by_handle_file_information +#[derive(Clone)] +pub struct Information(BY_HANDLE_FILE_INFORMATION); + +impl Information { + /// Returns file attributes. + /// + /// This corresponds to `dwFileAttributes`. + pub fn file_attributes(&self) -> u64 { + self.0.dwFileAttributes as u64 + } + + /// Returns true if and only if this file information has the + /// `FILE_ATTRIBUTE_HIDDEN` attribute. + pub fn is_hidden(&self) -> bool { + is_hidden(self.file_attributes()) + } + + /// Return the creation time, if one exists. + /// + /// This corresponds to `ftCreationTime`. + pub fn creation_time(&self) -> Option { + filetime_to_u64(self.0.ftCreationTime) + } + + /// Return the last access time, if one exists. + /// + /// This corresponds to `ftLastAccessTime`. + pub fn last_access_time(&self) -> Option { + filetime_to_u64(self.0.ftLastAccessTime) + } + + /// Return the last write time, if one exists. + /// + /// This corresponds to `ftLastWriteTime`. + pub fn last_write_time(&self) -> Option { + filetime_to_u64(self.0.ftLastWriteTime) + } + + /// Return the serial number of the volume that the file is on. + /// + /// This corresponds to `dwVolumeSerialNumber`. + pub fn volume_serial_number(&self) -> u64 { + self.0.dwVolumeSerialNumber as u64 + } + + /// Return the file size, in bytes. + /// + /// This corresponds to `nFileSizeHigh` and `nFileSizeLow`. + pub fn file_size(&self) -> u64 { + ((self.0.nFileSizeHigh as u64) << 32) | (self.0.nFileSizeLow as u64) + } + + /// Return the number of links to this file. + /// + /// This corresponds to `nNumberOfLinks`. + pub fn number_of_links(&self) -> u64 { + self.0.nNumberOfLinks as u64 + } + + /// Return the index of this file. The index of a file is a purpotedly + /// unique identifier for a file within a particular volume. + pub fn file_index(&self) -> u64 { + ((self.0.nFileIndexHigh as u64) << 32) | (self.0.nFileIndexLow as u64) + } +} + +/// Represents a Windows file type. +/// +/// This wraps the result of [`GetFileType`]. +/// +/// [`GetFileType`]: https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-getfiletype +#[derive(Clone)] +pub struct Type(u32); + +impl Type { + /// Returns true if this type represents a character file, which is + /// typically an LPT device or a console. + pub fn is_char(&self) -> bool { + self.0 == ::windows_sys::Win32::Storage::FileSystem::FILE_TYPE_CHAR + } + + /// Returns true if this type represents a disk file. + pub fn is_disk(&self) -> bool { + self.0 == ::windows_sys::Win32::Storage::FileSystem::FILE_TYPE_DISK + } + + /// Returns true if this type represents a sock, named pipe or an + /// anonymous pipe. + pub fn is_pipe(&self) -> bool { + self.0 == ::windows_sys::Win32::Storage::FileSystem::FILE_TYPE_PIPE + } + + /// Returns true if this type is not known. + /// + /// Note that this never corresponds to a failure. + pub fn is_unknown(&self) -> bool { + self.0 == ::windows_sys::Win32::Storage::FileSystem::FILE_TYPE_UNKNOWN + } +} + +fn filetime_to_u64(t: FILETIME) -> Option { + let v = ((t.dwHighDateTime as u64) << 32) | (t.dwLowDateTime as u64); + if v == 0 { + None + } else { + Some(v) + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ea9d4eabe1fb781317bcb8155e9dd1ab7264cba9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/lib.rs @@ -0,0 +1,35 @@ +/*! +This crate provides a smattering of safe routines for parts of windows-sys. The +primary purpose of this crate is to serve as a dumping ground for various +utility functions that make interactions with windows-sys safe. This permits the +centralization of `unsafe` when dealing with Windows APIs, and thus makes it +easier to audit. + +A key abstraction in this crate is the combination of the +[`Handle`](struct.Handle.html) +and +[`HandleRef`](struct.HandleRef.html) +types. Both represent a valid Windows handle to an I/O-like object, where +`Handle` is owned (the resource is closed when the handle is dropped) and +`HandleRef` is borrowed (the resource is not closed when the handle is +dropped). Many of the routines in this crate work on handles and accept +anything that can be safely converted into a `HandleRef`. This includes +standard library types such as `File`, `Stdin`, `Stdout` and `Stderr`. + +Note that this crate is completely empty on non-Windows platforms. +*/ + +#[cfg(windows)] +pub use win::*; + +/// Safe routines for dealing with the Windows console. +#[cfg(windows)] +pub mod console; +/// Safe routines for dealing with files and handles on Windows. +#[cfg(windows)] +pub mod file; +#[cfg(windows)] +/// Safe routines for querying various Windows specific properties. +pub mod sysinfo; +#[cfg(windows)] +mod win; diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/sysinfo.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/sysinfo.rs new file mode 100644 index 0000000000000000000000000000000000000000..53e7eae95a60372c812cbcce90fa49ef5fe7f181 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/sysinfo.rs @@ -0,0 +1,161 @@ +use std::{ffi::OsString, io}; + +use windows_sys::Win32::System::SystemInformation::{ + GetComputerNameExW, COMPUTER_NAME_FORMAT, +}; + +/// The type of name to be retrieved by [`get_computer_name`]. +#[derive(Clone, Copy, Debug)] +#[non_exhaustive] +pub enum ComputerNameKind { + /// The name of the DNS domain assigned to the local computer. If the local + /// computer is a node in a cluster, lpBuffer receives the DNS domain name + /// of the cluster virtual server. + DnsDomain, + /// The fully qualified DNS name that uniquely identifies the local + /// computer. This name is a combination of the DNS host name and the DNS + /// domain name, using the form HostName.DomainName. If the local computer + /// is a node in a cluster, lpBuffer receives the fully qualified DNS name + /// of the cluster virtual server. + DnsFullyQualified, + /// The DNS host name of the local computer. If the local computer is a + /// node in a cluster, lpBuffer receives the DNS host name of the cluster + /// virtual server. + DnsHostname, + /// The NetBIOS name of the local computer. If the local computer is a node + /// in a cluster, lpBuffer receives the NetBIOS name of the cluster virtual + /// server. + NetBios, + /// The name of the DNS domain assigned to the local computer. If the local + /// computer is a node in a cluster, lpBuffer receives the DNS domain name + /// of the local computer, not the name of the cluster virtual server. + PhysicalDnsDomain, + /// The fully qualified DNS name that uniquely identifies the computer. If + /// the local computer is a node in a cluster, lpBuffer receives the fully + /// qualified DNS name of the local computer, not the name of the cluster + /// virtual server. + /// + /// The fully qualified DNS name is a combination of the DNS host name and + /// the DNS domain name, using the form HostName.DomainName. + PhysicalDnsFullyQualified, + /// The DNS host name of the local computer. If the local computer is a + /// node in a cluster, lpBuffer receives the DNS host name of the local + /// computer, not the name of the cluster virtual server. + PhysicalDnsHostname, + /// The NetBIOS name of the local computer. If the local computer is a node + /// in a cluster, lpBuffer receives the NetBIOS name of the local computer, + /// not the name of the cluster virtual server. + PhysicalNetBios, +} + +impl ComputerNameKind { + fn to_format(&self) -> COMPUTER_NAME_FORMAT { + use self::ComputerNameKind::*; + use windows_sys::Win32::System::SystemInformation; + + match *self { + DnsDomain => SystemInformation::ComputerNameDnsDomain, + DnsFullyQualified => { + SystemInformation::ComputerNameDnsFullyQualified + } + DnsHostname => SystemInformation::ComputerNameDnsHostname, + NetBios => SystemInformation::ComputerNameNetBIOS, + PhysicalDnsDomain => { + SystemInformation::ComputerNamePhysicalDnsDomain + } + PhysicalDnsFullyQualified => { + SystemInformation::ComputerNamePhysicalDnsFullyQualified + } + PhysicalDnsHostname => { + SystemInformation::ComputerNamePhysicalDnsHostname + } + PhysicalNetBios => SystemInformation::ComputerNamePhysicalNetBIOS, + } + } +} +/// Retrieves a NetBIOS or DNS name associated with the local computer. +/// +/// The names are established at system startup, when the system reads them +/// from the registry. +/// +/// This corresponds to calling [`GetComputerNameExW`]. +/// +/// [`GetComputerNameExW`]: https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getcomputernameexw +pub fn get_computer_name(kind: ComputerNameKind) -> io::Result { + use std::os::windows::ffi::OsStringExt; + + let format = kind.to_format(); + let mut len1 = 0; + // SAFETY: As documented, we call this with a null pointer which will in + // turn cause this routine to write the required buffer size fo `len1`. + // Also, we explicitly ignore the return value since we expect this call to + // fail given that the destination buffer is too small by design. + let _ = + unsafe { GetComputerNameExW(format, std::ptr::null_mut(), &mut len1) }; + + let len = match usize::try_from(len1) { + Ok(len) => len, + Err(_) => { + return Err(io::Error::new( + io::ErrorKind::Other, + "GetComputerNameExW buffer length overflowed usize", + )) + } + }; + let mut buf = vec![0; len]; + let mut len2 = len1; + // SAFETY: We pass a valid pointer to an appropriately sized Vec. + let rc = + unsafe { GetComputerNameExW(format, buf.as_mut_ptr(), &mut len2) }; + if rc == 0 { + return Err(io::Error::last_os_error()); + } + // Apparently, the subsequent call writes the number of characters written + // to the buffer to `len2` but not including the NUL terminator. Notice + // that in the first call above, the length written to `len1` *does* + // include the NUL terminator. Therefore, we expect `len1` to be at least + // one greater than `len2`. If not, then something weird has happened and + // we report an error. + if len1 <= len2 { + let msg = format!( + "GetComputerNameExW buffer length mismatch, \ + expected length strictly less than {} \ + but got {}", + len1, len2, + ); + return Err(io::Error::new(io::ErrorKind::Other, msg)); + } + let len = usize::try_from(len2).expect("len1 fits implies len2 fits"); + Ok(OsString::from_wide(&buf[..len])) +} + +#[cfg(test)] +mod tests { + use super::*; + + // This test doesn't really check anything other than that we can + // successfully query all kinds of computer names. We just print them out + // since there aren't really any properties about the names that we can + // assert. + // + // We specifically run this test in CI with --nocapture so that we can see + // the output. + #[test] + fn itworks() { + let kinds = [ + ComputerNameKind::DnsDomain, + ComputerNameKind::DnsFullyQualified, + ComputerNameKind::DnsHostname, + ComputerNameKind::NetBios, + ComputerNameKind::PhysicalDnsDomain, + ComputerNameKind::PhysicalDnsFullyQualified, + ComputerNameKind::PhysicalDnsHostname, + ComputerNameKind::PhysicalNetBios, + ]; + for kind in kinds { + let result = get_computer_name(kind); + let name = result.unwrap(); + println!("{kind:?}: {name:?}"); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/win.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/win.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d8ffcd186f0cb4870f95c6c8f15892ec625fcdd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/winapi-util-0.1.11/src/win.rs @@ -0,0 +1,246 @@ +use std::{ + fs::File, + io, + os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}, + path::Path, + process, +}; + +/// A handle represents an owned and valid Windows handle to a file-like +/// object. +/// +/// When an owned handle is dropped, then the underlying raw handle is closed. +/// To get a borrowed handle, use `HandleRef`. +#[derive(Debug)] +pub struct Handle(File); + +impl AsRawHandle for Handle { + fn as_raw_handle(&self) -> RawHandle { + self.0.as_raw_handle() + } +} + +impl FromRawHandle for Handle { + unsafe fn from_raw_handle(handle: RawHandle) -> Handle { + Handle(File::from_raw_handle(handle)) + } +} + +impl IntoRawHandle for Handle { + fn into_raw_handle(self) -> RawHandle { + self.0.into_raw_handle() + } +} + +impl Handle { + /// Create an owned handle to the given file. + /// + /// When the returned handle is dropped, the file is closed. + /// + /// Note that if the given file represents a handle to a directory, then + /// it is generally required that it have been opened with the + /// [`FILE_FLAG_BACKUP_SEMANTICS`] flag in order to use it in various + /// calls such as `information` or `typ`. To have this done automatically + /// for you, use the `from_path_any` constructor. + /// + /// [`FILE_FLAG_BACKUP_SEMANTICS`]: https://docs.microsoft.com/en-us/windows/desktop/api/FileAPI/nf-fileapi-createfilea + pub fn from_file(file: File) -> Handle { + Handle(file) + } + + /// Open a file to the given file path, and return an owned handle to that + /// file. + /// + /// When the returned handle is dropped, the file is closed. + /// + /// If there was a problem opening the file, then the corresponding error + /// is returned. + pub fn from_path>(path: P) -> io::Result { + Ok(Handle::from_file(File::open(path)?)) + } + + /// Like `from_path`, but supports opening directory handles as well. + /// + /// If you use `from_path` on a directory, then subsequent queries using + /// that handle will fail. + pub fn from_path_any>(path: P) -> io::Result { + use std::fs::OpenOptions; + use std::os::windows::fs::OpenOptionsExt; + use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS; + + let file = OpenOptions::new() + .read(true) + .custom_flags(FILE_FLAG_BACKUP_SEMANTICS) + .open(path)?; + Ok(Handle::from_file(file)) + } + + /// Return this handle as a standard `File` reference. + pub fn as_file(&self) -> &File { + &self.0 + } + + /// Return this handle as a standard `File` mutable reference. + pub fn as_file_mut(&mut self) -> &mut File { + &mut self.0 + } +} + +/// Represents a borrowed and valid Windows handle to a file-like object, such +/// as stdin/stdout/stderr or an actual file. +/// +/// When a borrowed handle is dropped, then the underlying raw handle is +/// **not** closed. To get an owned handle, use `Handle`. +#[derive(Debug)] +pub struct HandleRef(HandleRefInner); + +/// The representation of a HandleRef, on which we define a custom Drop impl +/// that avoids closing the underlying raw handle. +#[derive(Debug)] +struct HandleRefInner(Option); + +impl Drop for HandleRefInner { + fn drop(&mut self) { + self.0.take().unwrap().into_raw_handle(); + } +} + +impl AsRawHandle for HandleRef { + fn as_raw_handle(&self) -> RawHandle { + self.as_file().as_raw_handle() + } +} + +impl Clone for HandleRef { + fn clone(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl HandleRef { + /// Create a borrowed handle to stdin. + /// + /// When the returned handle is dropped, stdin is not closed. + pub fn stdin() -> HandleRef { + unsafe { HandleRef::from_raw_handle(io::stdin().as_raw_handle()) } + } + + /// Create a handle to stdout. + /// + /// When the returned handle is dropped, stdout is not closed. + pub fn stdout() -> HandleRef { + unsafe { HandleRef::from_raw_handle(io::stdout().as_raw_handle()) } + } + + /// Create a handle to stderr. + /// + /// When the returned handle is dropped, stderr is not closed. + pub fn stderr() -> HandleRef { + unsafe { HandleRef::from_raw_handle(io::stderr().as_raw_handle()) } + } + + /// Create a borrowed handle to the given file. + /// + /// When the returned handle is dropped, the file is not closed. + pub fn from_file(file: &File) -> HandleRef { + unsafe { HandleRef::from_raw_handle(file.as_raw_handle()) } + } + + /// Create a borrowed handle from the given raw handle. + /// + /// Note that unlike the `FromRawHandle` trait, this constructor does + /// **not** consume ownership of the given handle. That is, when the + /// borrowed handle created by this constructor is dropped, the underlying + /// handle will not be closed. + /// + /// # Safety + /// + /// This is unsafe because there is no guarantee that the given raw handle + /// is a valid handle. The caller must ensure this is true before invoking + /// this constructor. + pub unsafe fn from_raw_handle(handle: RawHandle) -> HandleRef { + HandleRef(HandleRefInner(Some(File::from_raw_handle(handle)))) + } + + /// Return this handle as a standard `File` reference. + pub fn as_file(&self) -> &File { + (self.0).0.as_ref().unwrap() + } + + /// Return this handle as a standard `File` mutable reference. + pub fn as_file_mut(&mut self) -> &mut File { + (self.0).0.as_mut().unwrap() + } +} + +/// Construct borrowed and valid Windows handles from file-like objects. +pub trait AsHandleRef { + /// A borrowed handle that wraps the raw handle of the `Self` object. + fn as_handle_ref(&self) -> HandleRef; + + /// A convenience routine for extracting a `HandleRef` from `Self`, and + /// then extracting a raw handle from the `HandleRef`. + fn as_raw(&self) -> RawHandle { + self.as_handle_ref().as_raw_handle() + } +} + +impl<'a, T: AsHandleRef> AsHandleRef for &'a T { + fn as_handle_ref(&self) -> HandleRef { + (**self).as_handle_ref() + } +} + +impl AsHandleRef for Handle { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for HandleRef { + fn as_handle_ref(&self) -> HandleRef { + self.clone() + } +} + +impl AsHandleRef for File { + fn as_handle_ref(&self) -> HandleRef { + HandleRef::from_file(self) + } +} + +impl AsHandleRef for io::Stdin { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for io::Stdout { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for io::Stderr { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for process::ChildStdin { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for process::ChildStdout { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} + +impl AsHandleRef for process::ChildStderr { + fn as_handle_ref(&self) -> HandleRef { + unsafe { HandleRef::from_raw_handle(self.as_raw_handle()) } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..0d08bdf593e44e3f03e71cc05eac87417971020c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "32c3144490c016fe496a0aed769bce60987a2e9d" + }, + "path_in_vcs": "crates/libs/interface" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..772d94899e6f6421b74b3756b846aa8e2cd8a1e2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.lock @@ -0,0 +1,47 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "windows-interface" +version = "0.59.3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..20022bd5850d98ac2d78a1b8f895d37cd79d37e1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml @@ -0,0 +1,65 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.74" +name = "windows-interface" +version = "0.59.3" +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "The interface macro for the Windows crates" +readme = "readme.md" +categories = ["os::windows-apis"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/microsoft/windows-rs" + +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +targets = [] + +[lib] +name = "windows_interface" +path = "src/lib.rs" +proc-macro = true + +[dependencies.proc-macro2] +version = "1.0" +default-features = false + +[dependencies.quote] +version = "1.0" +default-features = false + +[dependencies.syn] +version = "2.0" +features = [ + "parsing", + "proc-macro", + "printing", + "full", + "clone-impls", +] +default-features = false + +[dev-dependencies] + +[lints.rust] +missing_unsafe_on_extern = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(windows_raw_dylib, windows_slim_errors)"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..c84aeec46a11136207e91f64d87c1bf9ca5e4256 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/Cargo.toml.orig @@ -0,0 +1,28 @@ +[package] +name = "windows-interface" +version = "0.59.3" +edition = "2021" +rust-version = "1.74" +license = "MIT OR Apache-2.0" +description = "The interface macro for the Windows crates" +repository = "https://github.com/microsoft/windows-rs" +categories = ["os::windows-apis"] +readme = "readme.md" + +[dependencies] +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { workspace = true, features = ["parsing", "proc-macro", "printing", "full", "clone-impls"] } + +[dev-dependencies] +windows-core = { path = "../core" } + +[lints] +workspace = true + +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +targets = [] + +[lib] +proc-macro = true diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-apache-2.0 b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-apache-2.0 new file mode 100644 index 0000000000000000000000000000000000000000..b5ed4ecec27b39347f6f6c081cf6c512f62bca0a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-apache-2.0 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) Microsoft Corporation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-mit b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-mit new file mode 100644 index 0000000000000000000000000000000000000000..9e841e7a26e4eb057b24511e7b92d42b257a80e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/license-mit @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/readme.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..1160db5ef842bf9b9806007bb9de8c47630d92d3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows-interface-0.59.3/readme.md @@ -0,0 +1,3 @@ +## The interface macro for the Windows crates + +See [windows-core](https://crates.io/crates/windows-core) for more information. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..3dcdf937220d6866d0d508772222d13ae7df5324 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "db06b51c2ebb743efb544d40e3064efa49f28d38" + }, + "path_in_vcs": "crates/targets/aarch64_gnullvm" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c66697f46d2bbc42238657ff7d8891a623a82658 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml @@ -0,0 +1,34 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "windows_aarch64_gnullvm" +version = "0.52.6" +authors = ["Microsoft"] +build = "build.rs" +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Import lib for Windows" +readme = false +license = "MIT OR Apache-2.0" +repository = "https://github.com/microsoft/windows-rs" + +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +targets = [] + +[lib] +name = "windows_aarch64_gnullvm" +path = "src/lib.rs" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..0dfcf6c50ce1fb5f79668b22d9a9b19c68ed3a08 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/Cargo.toml.orig @@ -0,0 +1,13 @@ +[package] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +authors = ["Microsoft"] +edition = "2021" +rust-version = "1.56" +license = "MIT OR Apache-2.0" +description = "Import lib for Windows" +repository = "https://github.com/microsoft/windows-rs" + +[package.metadata.docs.rs] +default-target = "x86_64-pc-windows-msvc" +targets = [] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/build.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..bff093245fb572e55674d020acca106fd9161220 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/build.rs @@ -0,0 +1,8 @@ +fn main() { + let dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + + println!( + "cargo:rustc-link-search=native={}", + std::path::Path::new(&dir).join("lib").display() + ); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-apache-2.0 b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-apache-2.0 new file mode 100644 index 0000000000000000000000000000000000000000..b5ed4ecec27b39347f6f6c081cf6c512f62bca0a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-apache-2.0 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) Microsoft Corporation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-mit b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-mit new file mode 100644 index 0000000000000000000000000000000000000000..9e841e7a26e4eb057b24511e7b92d42b257a80e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_aarch64_gnullvm-0.52.6/license-mit @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-apache-2.0 b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-apache-2.0 new file mode 100644 index 0000000000000000000000000000000000000000..b5ed4ecec27b39347f6f6c081cf6c512f62bca0a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-apache-2.0 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) Microsoft Corporation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-mit b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-mit new file mode 100644 index 0000000000000000000000000000000000000000..9e841e7a26e4eb057b24511e7b92d42b257a80e5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnu-0.52.6/license-mit @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnullvm-0.52.6/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnullvm-0.52.6/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c9ac1ac8e4bd702086402213af792ae0636d192 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/windows_i686_gnullvm-0.52.6/src/lib.rs @@ -0,0 +1 @@ +#![no_std] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..c64200b4ee72cdba8ac258d21c42b596e7a3f0c1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "b7e65f9a4c317494cce2d18ea02b3d6eaaea7985" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..66c1b99a932447bfd24e7368adf4fa86a4ad4252 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml @@ -0,0 +1,62 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "yasna" +version = "0.5.2" +authors = ["Masaki Hara "] +include = [ + "src/**/*.rs", + "Cargo.toml", +] +description = "ASN.1 library for Rust" +homepage = "https://github.com/qnighy/yasna.rs" +documentation = "https://docs.rs/yasna" +readme = "README.md" +keywords = [ + "parser", + "serialization", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/qnighy/yasna.rs" + +[package.metadata.docs.rs] +features = [ + "num-bigint", + "bit-vec", + "time", + "std", +] + +[dependencies.bit-vec] +version = "0.6.1" +features = ["std"] +optional = true +default-features = false + +[dependencies.num-bigint] +version = "0.4" +optional = true + +[dependencies.time] +version = "0.3.1" +features = ["std"] +optional = true +default-features = false + +[dev-dependencies.num-traits] +version = "0.2" +default-features = false + +[features] +default = [] +std = [] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..70a1e031013d3f4b6b8b8d7edee7a75051a934ce --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/Cargo.toml.orig @@ -0,0 +1,49 @@ +[package] +name = "yasna" +version = "0.5.2" +authors = ["Masaki Hara "] + +description = "ASN.1 library for Rust" +documentation = "https://docs.rs/yasna" +homepage = "https://github.com/qnighy/yasna.rs" +repository = "https://github.com/qnighy/yasna.rs" +readme = "README.md" +keywords = ["parser", "serialization"] +license = "MIT OR Apache-2.0" +edition = "2018" +include = [ + "src/**/*.rs", + "Cargo.toml", +] + +[features] +default = [] +std = [] + +[package.metadata.docs.rs] +features = ["num-bigint", "bit-vec", "time", "std"] + +[dependencies] + +[dependencies.num-bigint] +version = "0.4" +optional = true + +[dev-dependencies.num-traits] +version = "0.2" +default-features = false + +[dependencies.bit-vec] +version = "0.6.1" +default-features = false +features = ["std"] +optional = true + +[dependencies.time] +version = "0.3.1" +optional = true +default-features = false +features = ["std"] + +[workspace] +members = ["fuzz"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9c39636ee64653c291e9b12ba4b52dbf1b63b75a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/yasna-0.5.2/README.md @@ -0,0 +1,93 @@ +# yasna.rs: ASN.1 library for Rust + +[![Build Status](https://github.com/qnighy/yasna.rs/actions/workflows/test.yml/badge.svg)](https://github.com/qnighy/yasna.rs/actions) +[![](https://img.shields.io/crates/v/yasna.svg)](https://crates.io/crates/yasna) + +This is a Rust library for reading and writing ASN.1 data. + +- [crates.io/crates/yasna](https://crates.io/crates/yasna) +- [Documentation](https://qnighy.github.io/yasna.rs/yasna/index.html) + +Since this library is at an early stage, the APIs are subject to change. However, `BERReader` and `DERWriter` functionalities are getting stable. + +## Serialization/Construction + +Serialization in DER (Distinguished Encoding Rules) is supported. It can also be used for serialization in BER (Basic Encoding Rules). + +```rust +fn main() { + let der = yasna::construct_der(|writer| { + writer.write_sequence(|writer| { + writer.next().write_i64(10); + writer.next().write_bool(true); + return Ok(()); + }) + }); + println!("(10, true) = {:?}", der); +} +``` + +Currently, these datatypes are supported: + +- BOOLEAN, INTEGER, BITSTRING, OCTETSTRING, NULL, OBJECT IDENTIFIER, +- SEQUENCE, SEQUENCE OF, SET, SET OF, CHOICE, +- UTF8String, NumericString, PrintableString, VisibleString, IA5String, BMPString, +- UTCTime, GeneralizedTime, +- Explicitly/Implicitly tagged types, +- DEFAULT/OPTIONAL in SEQUENCE/SET. + +These datatypes are *not* supported: + +- REAL +- TeletexString, VideotexString, GraphicString, GeneralString, UniversalString, +- TIME, DATE, TIME-OF-DAY, DATE-TIME, DURATION. + +## Deserialization/Parsing + +Deserialization in BER (Basic Encoding Rules) or DER (Distinguished Encoding Rules) is supported. + +```rust +fn main() { + let asn = yasna::parse_der(&[48, 6, 2, 1, 10, 1, 1, 255], |reader| { + reader.read_sequence(|reader| { + let i = reader.next().read_i64()?; + let b = reader.next().read_bool()?; + return Ok((i, b)); + }) + }).unwrap(); + println!("{:?} = [48, 6, 2, 1, 10, 1, 1, 255]", asn); +} +``` + +Currently, these datatypes are supported: + +- BOOLEAN, INTEGER, BITSTRING, OCTETSTRING, NULL, OBJECT IDENTIFIER, +- SEQUENCE, SEQUENCE OF, SET, SET OF, CHOICE, +- UTF8String, NumericString, PrintableString, VisibleString, IA5String, BMPString, +- UTCTime, GeneralizedTime, +- Explicitly/Implicitly tagged types, +- DEFAULT/OPTIONAL in SEQUENCE. + +These datatypes are *not* supported: + +- REAL +- TeletexString, VideotexString, GraphicString, GeneralString, UniversalString, +- TIME, DATE, TIME-OF-DAY, DATE-TIME, DURATION. +- DEFAULT/OPTIONAL in SET. + +## Other encodings + +This library is currently specialized for BER (Basic Encoding Rules) and DER (Distinguished Encoding Rules). Other encodings such as CER (Canonical Encoding Rules), PER (Packed Encoding Rules), and XER (XML Encoding Rules) are currently out of scope. + +## Streaming + +This library is currently specialized for on-memory serialization/deserialization. There are no plans for streaming ones. + +## Compatibility + +The minimum supported Rust version (MSRV) of `yasna.rs` is Rust 1.36.0. +Optional feature flags that enable interoperability with third-party crates (e.g. `time`) follow the policy of that crate if stricter. + +## License + +This library is distributed under MIT/Apache-2.0 dual license. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/development.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/development.md new file mode 100644 index 0000000000000000000000000000000000000000..e4cf61f90073bf69c4269105227371a74213a402 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/development.md @@ -0,0 +1,97 @@ + + +# Development Guidelines + +This document covers guidelines for developing code changes. + +## Build and Test + +This repository uses a wrapper script (`cargo.sh`) to ensure consistent +toolchain usage and configuration. + +> [!IMPORTANT] +> **NEVER** run `cargo` directly. +> **ALWAYS** use `./cargo.sh` for all cargo sub-commands. +> +> **Why?** `cargo.sh` ensures that the toolchains used in development match +> those in CI, which is important because some features are only available on +> specific toolchains, and because UI tests rely on the text of compiler errors, +> which changes between toolchain versions. + +### Syntax + +`./cargo.sh + [args]` + +This is equivalent to: + +`cargo +1.2.3 [args]` + +...where `1.2.3` is the toolchain version named by `` (e.g., `msrv` -> +`1.56.0`). + +### Toolchains + +The `` argument is mandatory: +- `msrv`: Minimum Supported Rust Version. +- `stable`: Stable toolchain. +- `nightly`: Nightly toolchain. +- `all`: Runs on `msrv`, `stable`, and `nightly` sequentially. +- Version-gated: e.g., `no-zerocopy-core-error-1-81-0` (see `Cargo.toml`). + + +## MSRV (Minimum Supported Rust Version) + +The MSRV is **1.56.0**. + +- **Do NOT** use features stabilized after 1.56.0 unless version-gated. +- **Requirement:** Ask for user approval before introducing new version-gated + behavior. +- **Verify**: Ensure code compiles on 1.56.0 (`./cargo.sh +msrv ...`). + +### Version Gating Convention + +We use `[package.metadata.build-rs]` in `Cargo.toml` to gate features by Rust version. + +1. **Define**: Add `no-zerocopy-- = ""` to `Cargo.toml`. +2. **Use**: Use `#[cfg(not(no_zerocopy__))]` (note underscores). +3. **Document**: For public items, use `#[cfg_attr(doc_cfg, doc(cfg(rust = "")))]`. + +**Important:** The toolchains listed in `.github/workflows/ci.yml` and +`Cargo.toml` (under `[package.metadata.build-rs]`) must be kept in sync. If you +add a new version-gated feature, ensure it is reflected in both places. + +## UI Tests + +For advice on how to add, modify, or remove UI tests (in `tests/ui-*` or +`zerocopy-derive/tests/ui-*`), refer to [agent_docs/ui_tests.md](./ui_tests.md). + +## Macro Development + +- **Shared Logic:** Put shared macro logic in `src/util/macro_util.rs` to avoid + code duplication in generated code. +- **Lints:** Generated code often triggers lints. Use `#[allow(...)]` liberally + in generated code to suppress them. + ```rust + // GOOD: Suppress lints that might be triggered by generated names. + // Example: Using a variant name (PascalCase) as a field name (snake_case). + // Input: `enum MyEnum { VariantA }` + // Generated: `union Variants { __field_VariantA: ... }` + quote! { + #[allow(non_snake_case)] + union ___ZerocopyVariants { + #(#fields)* + } + } + ``` + +## Unsafe Code + +`unsafe` code is extremely dangerous and should be avoided unless absolutely +necessary. For guidelines on writing unsafe code, including pointer casts and +safety comments, refer to [agent_docs/unsafe_code.md](./unsafe_code.md). diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/reviewing.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/reviewing.md new file mode 100644 index 0000000000000000000000000000000000000000..7ca4d2cb0c8e278134477f8f4d6afcc102380487 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/reviewing.md @@ -0,0 +1,109 @@ + + +# Reviewing + +This document outlines the protocols and standards for AI agents performing code +reviews in the `zerocopy` repository. + +## 1. The "Analyze-First" Mandate + +Prevent hallucination by grounding your review in reality. + +* **Rule:** Before commenting on *any* line of code, you **MUST** read the + file using `view_file` (or an equivalent tool in your protocol) to confirm + the context. +* **Why:** Diffs often miss surrounding context (e.g., `cfg` gates, trait + bounds, imports) that changes the validity of the code. +* **Protocol:** + 1. Review is requested (manually by a user or automatically via CI/PR). + 2. **YOU** call `view_file` (or equivalent) on the relevant files. + 3. **YOU** analyze the code in strict steps (Safety -> Logic -> Style). + 4. **YOU** generate the review. + +## 2. Reviewer Personas + +You are not just a "helper"; you are a multi-disciplinary expert. Switch between +these personas as you review: + +### A. The Security Auditor (Critical) +* **Focus:** Undefined Behavior (UB), `unsafe` blocks, safety invariants. +* **Reference:** You **MUST** verify compliance with + [`unsafe_code.md`](unsafe_code.md). +* **Checklist:** + * [ ] Does every `unsafe` block have a `// SAFETY:` comment? + * [ ] Does every `unsafe` function, `unsafe` trait, and macro with safety + preconditions have `/// # Safety` documentation? + * [ ] Do safety comments comply with each rule in + [`unsafe_code.md`](unsafe_code.md)? + +### B. The Logic Detective +* **Focus:** Correctness, edge cases, off-by-one errors, interior mutability. +* **Checklist:** + * [ ] Does the code panic on valid input? + * [ ] Are unwrap/expect calls justified? + * [ ] Does the logic handle ZSTs (Zero-Sized Types) correctly? + * [ ] Are generics properly bounded? + +### C. The Style Cop +* **Focus:** Readability, idiomatic Rust, project standards. +* **Reference:** [`style.md`](style.md) +* **Checklist:** + * [ ] Are each of the style guidelines in [`style.md`](style.md) followed? + * [ ] Is there unnecessary complexity? + +### D. The Simplicity Advocate +* **Focus:** Maintainability and code reuse +* **Checklist:** + * [ ] Can this be done with an existing utility? (Search the codebase for + similar patterns.) + * [ ] Is the implementation surprisingly complex for what it does? + * [ ] Are there "clever" one-liners that should be expanded for + readability? + * [ ] Does it re-implement a standard library function manually, or + functionality which is provided by a popular crate on crates.io? + +## 3. Operational Protocols + +### Chain-of-Thought (CoT) Requirement +You **MUST** output your reasoning before your final verdict. +* **Bad:** "This looks good." +* **Good:** "I checked the `unsafe` block on line 42. It casts `*mut T` to + `*mut u8`. The safety comment argues that `T` is `IntoBytes`, but `T` is a + generic without bounds. This is unsound. **Finding:** Unsound `unsafe` + block." + +### Actionable Feedback +Every critique **MUST** be actionable. +* **Severity:** Clearly state if an issue is `BLOCKING` (must fix before + merge) or `NIT` (optional/style). +* **Fix:** Provide the exact code snippet to fix the issue. + + +### Handling TODO comments +`TODO` comments are used to prevent a PR from being merged until they are +resolved. When you encounter a `TODO` comment: +1. **Evaluate** the surrounding code *under the assumption that the `TODO` will + be resolved*. +2. **Critique** only if the `TODO` is insufficient (i.e., the code would still + be problematic *even if* the `TODO` were resolved). +3. **Safety Placeholders:** A `// SAFETY: TODO` comment is a valid placeholder + for a safety comment, and a `/// TODO` comment in a `/// # Safety` doc + section is a valid placeholder for safety documentation. **DO NOT** flag + the first as a missing safety justification or a critical issue, and **DO + NOT** flag the second as missing safety documentation. You must assume the + author will write a sound justification or accurate safety documentation + before merging. + + + +## 4. Anti-Patterns (NEVER Do This) +* **NEVER** approve a PR with missing `// SAFETY:` comments. +* **NEVER** assume a function works as named; check its definition. +* **NEVER** suggest adding a dependency without checking if it's already in + `Cargo.toml`. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/style.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/style.md new file mode 100644 index 0000000000000000000000000000000000000000..074ffa649f2f0571bc9b6d8e4558a314320270dd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/style.md @@ -0,0 +1,45 @@ + + +# Style Guidelines + +This document covers code style and formatting guidelines for the project, as +well as commit message requirements. + +## File Headers + +Each file must contain a copyright header (see `src/lib.rs` for example) which is +based on that file's creation year. + +## Formatting + +Refer to `ci/check_fmt.sh`. + +## Comments + +- Wrap all comments (`//`, `///`, `//!`) at **80 columns** from the left margin, + taking into account any preceding code or comments. +- **Exceptions:** Markdown tables, ASCII diagrams, long URLs, code blocks, or + other cases where wrapping would impair readability. + +## Markdown Files + +- Wrap paragraphs and bulleted lists at **80 columns** from the left margin, + taking into account any preceding code or comments. For example, a markdown + block inside of a `/// Lorem ipsum...` comment should have lines no more than + 76 columns wide. + - In bulleted lists, indent subsequent lines by 2 spaces. + - Do not wrap links if it breaks them. +- Always put a blank line between a section header and the beginning of the section. + +## Pull Requests and Commit Messages + +Use GitHub issue syntax in commit messages: + +- Resolves issue: `Closes #123` +- Progress on issue: `Makes progress on #123` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/ui_tests.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/ui_tests.md new file mode 100644 index 0000000000000000000000000000000000000000..9a25ab0196344714d27128715efe349bd6c2fc64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/ui_tests.md @@ -0,0 +1,48 @@ + + +# UI & Output Tests + +When updating UI test files (`tests/ui-*` or `zerocopy-derive/tests/ui-*`) or +functionality which could affect compiler error output or derive output, run: +`./tools/update-expected-test-output.sh`. + +**Note:** We maintain separate UI tests for different toolchains (`ui-msrv`, +`ui-stable`, `ui-nightly`) because compiler output varies. The script handles +this automatically. + +### Symlink Pattern + +To share test code across toolchains while allowing for different error output, +we use a symlink pattern: + +1. **Canonical Source:** The `ui-nightly` directory holds the actual source + files (`.rs`). +2. **Symlinks:** The `ui-stable` and `ui-msrv` directories contain *symlinks* + to the `.rs` files in `ui-nightly`. + - **Example:** `tests/ui-stable/foo.rs` -> `../ui-nightly/foo.rs` +3. **Unique Output:** Each directory contains its own `.stderr` files. + +### Workflow Rules + +- **Adding a Test:** + 1. Create the `.rs` file in `ui-nightly`. + 2. Create relative symlinks in `ui-stable` and `ui-msrv` pointing to the + new file in `ui-nightly`. + 3. Run `./tools/update-expected-test-output.sh` to generate the `.stderr` files. +- **Modifying a Test:** + 1. Edit the `.rs` file in `ui-nightly`. + 2. Run `./tools/update-expected-test-output.sh` to update the `.stderr` files. +- **Removing a Test:** + 1. Delete the `.rs` file from `ui-nightly`. + 2. Delete the symlinks from `ui-stable` and `ui-msrv`. + 3. Delete the corresponding `.stderr` files from all three directories. + +**NEVER** edit `.stderr` files directly. Only update them via the script or the +commands it runs. If a `.stderr` file is causing a test failure and updating it +via tooling does not fix the failure, that indicates a bug. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/unsafe_code.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/unsafe_code.md new file mode 100644 index 0000000000000000000000000000000000000000..1a3295b662f205946dbde57fab693b81d0a9a66d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/unsafe_code.md @@ -0,0 +1,84 @@ + + +# Unsafe Code Guidelines + +`unsafe` code is extremely dangerous and should be avoided unless absolutely +necessary. When it is absolutely necessary to write `unsafe` code, it should be +done extremely carefully. + +This document covers guidelines for writing unsafe code, including pointer casts +and safety comments. + +## Pointer Casts + +- **Avoid `&slice[0] as *const T`**: Use `slice.as_ptr()`. Accessing subsequent + elements via pointer arithmetic on a single-element pointer is UB. + ```rust + let slice = &[1, 2]; + + // BAD: Derived from reference to single element. + let ptr = &slice[0] as *const i32; + // SAFETY: UB! `ptr` has provenance only for the first element. + // Accessing `ptr.add(1)` is out of bounds for this provenance. + let val = unsafe { *ptr.add(1) }; + + // GOOD: Derived from the slice itself. + let ptr = slice.as_ptr(); + // SAFETY: Safe because `ptr` has provenance for the entire slice. + let val = unsafe { *ptr.add(1) }; + ``` +- **Avoid converting `&mut T` to `*const T`**: This reborrows as a shared + reference, restricting permissions. Cast `&mut T` to `*mut T` first. + ```rust + let mut val = 42; + let r = &mut val; + + // BAD: `r as *const i32` creates a shared reborrow. + // The resulting pointer loses write provenance. + let ptr = r as *const i32 as *mut i32; + // SAFETY: UB! Writing to a pointer derived from a shared reborrow. + unsafe { *ptr = 0 }; + + // GOOD: `r as *mut i32` preserves write provenance. + let ptr = r as *mut i32; + // SAFETY: Safe because `ptr` retains mutable provenance. + unsafe { *ptr = 0 }; + ``` + +## Safety Comments + +Every `unsafe` block must be documented with a `// SAFETY:` comment. +- **Requirement:** The comment must prove soundness using *only* text from the + stable [Rust Reference](https://doc.rust-lang.org/reference/) or [standard + library documentation](https://doc.rust-lang.org/std/). +- **Citation:** You must cite and quote the relevant text from the + documentation. Citations must cite a specific version of the documentation + (e.g. https://doc.rust-lang.org/1.91.0/reference/ or + https://doc.rust-lang.org/1.91.0/std/). +- **Prohibition:** Do not rely on "common sense" or behavior not guaranteed by + the docs. + ```rust + // BAD: Missing justification for "obvious" properties. + // SAFETY: `ptr` and `field` are from the same object. + let offset = unsafe { field.cast::().offset_from(ptr.cast::()) }; + + // GOOD: Explicitly justifies every requirement, even trivial ones. + // SAFETY: + // - `ptr` and `field` are derived from the same allocated object [1]. + // - The distance between them is trivially a multiple of `u8`'s size (1) [2], + // satisfying `offset_from`'s alignment requirement [1]. + // + // [1] Per https://doc.rust-lang.org/1.91.0/std/primitive.pointer.html#method.offset_from: + // + // Both pointers must be derived from the same allocated object, and the + // distance between them must be a multiple of the element size. + // + // [2] https://doc.rust-lang.org/1.91.0/reference/type-layout.html#primitive-data-layout + let offset = unsafe { field.cast::().offset_from(ptr.cast::()) }; + ``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/validation.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/validation.md new file mode 100644 index 0000000000000000000000000000000000000000..feeadb74276391008cd9495cced6dc300cdb9761 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/agent_docs/validation.md @@ -0,0 +1,84 @@ + + +# Validating Changes + +This document covers the procedures and requirements for validating changes to +the project, including linting, testing, and pre-submission checks. + +## Linting + +Clippy should **always** be run on the `nightly` toolchain. + +```bash +./cargo.sh +nightly clippy +./cargo.sh +nightly clippy --tests +``` + +### Strict Linting + +- We deny warnings in CI. Even warnings not explicitly listed in `lib.rs` will + cause CI to fail. + - **Why:** We maintain a zero-warning policy so that new warnings (which often + indicate bugs) are immediately obvious and not obscured by existing ones. +- Do not introduce new warnings. +- Respect the strict `deny` list in `src/lib.rs`. + +## Validating Changes + +Ensure the library builds on all supported toolchains and that Clippy passes. + +```bash +./cargo.sh +msrv check --tests --features __internal_use_only_features_that_work_on_stable +./cargo.sh +stable check --tests --features __internal_use_only_features_that_work_on_stable +./cargo.sh +nightly check --tests --all-features +./cargo.sh +nightly clippy --tests --all-features --workspace +``` + +**Note:** Tests are rarely toolchain-sensitive. Running tests on `nightly` is +usually sufficient. + +## Testing Strategy + +- **Unit Tests:** Place unit tests in a `mod tests` module within the source + file they test. +- **UI/Compile-Fail Tests:** + - **`zerocopy`:** Place in `tests/ui-*` (top-level). The top-level `tests` + directory contains *only* UI tests. + - **`zerocopy-derive`:** Place in `zerocopy-derive/tests/ui-*`. +- **Derive Integration Tests:** Place integration tests for derive macros in + `zerocopy-derive/tests`. +- **Derive Output Tests:** Place unit tests that verify the *generated code* + (token streams) in `zerocopy-derive/src/output_tests.rs`. +- **Formal Verification (Kani):** Place Kani proofs in a `mod proofs` module + within the source file they test. + - **Purpose:** Use the + [Kani Rust Verifier](https://model-checking.github.io/kani/) to prove the + soundness of `unsafe` code or code relied upon by `unsafe` blocks. Unlike + testing, which checks specific inputs, Kani proves properties for *all* + possible inputs. + - **How to Write Proofs:** + - **Harnesses:** Mark proof functions with `#[kani::proof]`. + - **Inputs:** Use `kani::any()` to generate arbitrary inputs. + - **Assumptions:** Use `kani::assume(condition)` to constrain inputs to + valid states (e.g., `align.is_power_of_two()`). + - **Assertions:** Use `assert!(condition)` to verify the properties you + want to prove. + - **CI:** Kani runs in CI using the `model-checking/kani-github-action` with + specific feature flags to ensure compatibility. + + + +## Feature Gates + +When editing code gated by a feature, compile **with and without** that feature. + +```bash +./cargo.sh +stable check --tests +./cargo.sh +stable check --tests --feature foo +``` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_actions.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_actions.sh new file mode 100644 index 0000000000000000000000000000000000000000..6eef7ea610d5766722b2fc937e7b444282f8e01f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_actions.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# Copyright 2025 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +script_name="ci/check_actions.sh" + +# Ensure action-validator is installed +if [ ! -x "$HOME/.cargo/bin/action-validator" ]; then + echo "$script_name: action-validator not found, installing..." >&2 + # Install specific version to ensure reproducibility + cargo install -q action-validator --version 0.8.0 --locked +fi +export PATH="$HOME/.cargo/bin:$PATH" + +# Files to exclude from validation (e.g., because they are not Actions/Workflows) +# Use relative paths matching `find .github` output +EXCLUDE_FILES=( + "./.github/dependabot.yml" + "./.github/release.yml" +) + +failed=0 + +# Use process substitution and while loop to handle filenames with spaces robustly +while IFS= read -r -d '' file; do + # Check if file is in exclusion list + for exclude in "${EXCLUDE_FILES[@]}"; do + if [[ "$file" == "$exclude" ]]; then + continue 2 + fi + done + + if ! output=$(action-validator "$file" 2>&1); then + echo "$script_name: ❌ Validation failed for $file" >&2 + echo "$output" | sed "s|^|$script_name: |" >&2 + failed=1 + fi +done < <(find ./.github -type f \( -iname '*.yml' -o -iname '*.yaml' \) -print0) + +if [[ $failed -ne 0 ]]; then + echo "$script_name: One or more files failed validation." >&2 + exit 1 +fi diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_all_toolchains_tested.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_all_toolchains_tested.sh new file mode 100644 index 0000000000000000000000000000000000000000..88d4208a0ee0b4d1ee1d08edade10aa0d78b9208 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_all_toolchains_tested.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +# Check whether the set of toolchains tested in this file (other than +# 'msrv', 'stable', and 'nightly') is equal to the set of toolchains +# listed in the 'package.metadata.build-rs' section of Cargo.toml. +# +# If the inputs to `diff` are not identical, `diff` exits with a +# non-zero error code, which causes this script to fail (thanks to +# `set -e`). +diff \ + <(cat .github/workflows/ci.yml | yq '.jobs.build_test.strategy.matrix.toolchain | .[]' | \ + sort -u | grep -v '^\(msrv\|stable\|nightly\)$') \ + <(cargo metadata -q --format-version 1 | \ + jq -r ".packages[] | select(.name == \"zerocopy\").metadata.\"build-rs\" | keys | .[]" | \ + sort -u) >&2 diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_fmt.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_fmt.sh new file mode 100644 index 0000000000000000000000000000000000000000..5d5b5856c18be2f0d2bb2598b5980271f31ed476 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_fmt.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail +files=$(find . -iname '*.rs' -type f -not -path './target/*' -not -iname '*.expected.rs' -not -path './vendor/*' -not -path './tools/vendor/*') +# check that find succeeded +if [[ -z $files ]] +then + exit 1 +fi +./cargo.sh +nightly fmt --check -- $files >&2 diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_job_dependencies.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_job_dependencies.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9e0d362fc931a32b019732ea6e2e25ca77d63a2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_job_dependencies.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail +which yq > /dev/null +jobs=$(for i in $(find .github -iname '*.yaml' -or -iname '*.yml') + do + # Select jobs that are triggered by pull request. + if yq -e '.on | has("pull_request")' "$i" 2>/dev/null >/dev/null + then + # This gets the list of jobs that all-jobs-succeed does not depend on. + comm -23 \ + <(yq -r '.jobs | keys | .[]' "$i" | sort | uniq) \ + <(yq -r '.jobs.all-jobs-succeed.needs[]' "$i" | sort | uniq) + fi + + # The grep call here excludes all-jobs-succeed from the list of jobs that + # all-jobs-succeed does not depend on. If all-jobs-succeed does + # not depend on itself, we do not care about it. + done | sort | uniq | (grep -v '^all-jobs-succeed$' || true) +) + +if [ -n "$jobs" ] +then + missing_jobs="$(echo "$jobs" | tr ' ' '\n')" + echo "all-jobs-succeed missing dependencies on some jobs: $missing_jobs" | tee -a $GITHUB_STEP_SUMMARY >&2 + exit 1 +fi diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_msrv_is_minimal.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_msrv_is_minimal.sh new file mode 100644 index 0000000000000000000000000000000000000000..b67a52c5c35c79def2ca6fb51f1caad3962020b7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_msrv_is_minimal.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# +# Copyright 2025 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +read -r -d '' PYTHON_SCRIPT <<'EOF' || true +import sys +import json + +def parse_version(v): + """Converts a version string to a tuple of integers.""" + return tuple(map(int, v.split("."))) + +def main(): + """ + Checks that the package's MSRV is strictly lower than any version + specified in `package.metadata.build-rs`. + """ + try: + data = json.load(sys.stdin) + except json.JSONDecodeError as e: + print(f"Error parsing JSON from cargo metadata: {e}", file=sys.stderr) + sys.exit(1) + + # Find the zerocopy package + try: + pkg = next(p for p in data["packages"] if p["name"] == "zerocopy") + except StopIteration: + print("Error: zerocopy package not found in metadata", file=sys.stderr) + sys.exit(1) + + msrv_str = pkg.get("rust_version") + if not msrv_str: + print("Error: rust-version not found in Cargo.toml", file=sys.stderr) + sys.exit(1) + + try: + msrv = parse_version(msrv_str) + except ValueError: + print(f"Error: Invalid MSRV format: {msrv_str}", file=sys.stderr) + sys.exit(1) + + build_rs_versions = (pkg.get("metadata") or {}).get("build-rs", {}) + + failed = False + for name, ver_str in build_rs_versions.items(): + try: + ver = parse_version(ver_str) + except ValueError: + print(f"Warning: Skipping invalid version format for {name}: {ver_str}", file=sys.stderr) + continue + + # Check that MSRV < Version (strictly lower) + if not (msrv < ver): + print(f"Error: MSRV ({msrv_str}) is not strictly lower than {name} ({ver_str})", file=sys.stderr) + failed = True + + if failed: + sys.exit(1) + +if __name__ == "__main__": + main() +EOF + +cargo metadata --format-version 1 --no-deps | python3 -c "$PYTHON_SCRIPT" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_readme.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_readme.sh new file mode 100644 index 0000000000000000000000000000000000000000..101a8df0737ada134b73dd63bafb635c16e3283f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_readme.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +# Install again in case the installation failed during the +# `generate_cache` step. We treat that step as best-effort and +# suppress all errors from it. +cargo install -q cargo-readme --version 3.2.0 + +diff <(cargo -q run --config tools/.cargo/config.toml --manifest-path tools/Cargo.toml -p generate-readme) README.md >&2 +exit $? diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_stale_stderr.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_stale_stderr.sh new file mode 100644 index 0000000000000000000000000000000000000000..e8a4a1417833db4828ae995ef884c56e25ce5588 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_stale_stderr.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# +# Copyright 2026 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +# Directories to search +DIRS=("tests" "zerocopy-derive/tests") + +EXIT_CODE=0 + +for dir in "${DIRS[@]}"; do + if [ ! -d "$dir" ]; then + echo "Warning: Directory $dir does not exist, skipping." >&2 + continue + fi + + # Find all .stderr files + while IFS= read -r -d '' stderr_file; do + # Construct the corresponding .rs file path + rs_file="${stderr_file%.stderr}.rs" + + # Check if the .rs file exists. The `-e` flag checks if file exists: + # It returns true for regular files and valid symlinks, and false for + # broken symlinks or missing files. + if [ ! -e "$rs_file" ]; then + echo "Error: Orphaned stderr file found: $stderr_file" >&2 + echo " Missing regular file or valid symlink: $rs_file" >&2 + EXIT_CODE=1 + fi + done < <(find "$dir" -name "*.stderr" -print0) +done + +if [ "$EXIT_CODE" -eq 1 ]; then + echo "Found stale .stderr files. Please delete them." >&2 +fi + +exit "$EXIT_CODE" diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_todo.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_todo.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9b973916f3038c3d3f2ec4d282bb0b6c26b42f6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_todo.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# +# Copyright 2025 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -euo pipefail + +# This allows us to leave XODO comments in this file and have them still be +# picked up by this script without having the script itself trigger false +# positives. The alternative would be to exclude this script entirely, which +# would mean that we couldn't use XODO comments in this script. +KEYWORD=$(echo XODO | sed -e 's/X/T/') + +# Make sure `rg` is installed (if this fails, `set -e` above will cause the +# script to exit). +rg --version >/dev/null + +# -H: Print filename (default for multiple files/recursive) +# -n: Print line number +# -w: Match whole words +# Match TODO, TODO-check-disable, and TODO-check-enable +output=$(rg -H -n -w "$KEYWORD|$KEYWORD-check-disable|$KEYWORD-check-enable" "$@" || true) + +if [ -z "$output" ]; then + exit 0 +fi + +# Track the disabled state for each file. Since we process lines in order for +# each file, we can maintain state. However, rg output might interleave files if +# not sorted, but usually it's grouped. To be safe, we sort the output by +# filename and line number. +sorted_output=$(echo "$output" | sort -t: -k1,1 -k2,2n) + +exit_code=0 +current_file="" +disabled=0 + +while IFS= read -r line; do + if [[ "$line" =~ ^(.*):([0-9]+):(.*)$ ]]; then + file="${BASH_REMATCH[1]}" + content="${BASH_REMATCH[3]}" + else + echo "Error: could not parse rg output line: $line" >&2 + exit 1 + fi + + if [ "$file" != "$current_file" ]; then + current_file="$file" + disabled=0 + fi + + if [[ "$content" == *"$KEYWORD-check-disable"* ]]; then + disabled=1 + elif [[ "$content" == *"$KEYWORD-check-enable"* ]]; then + disabled=0 + elif [[ "$content" == *"$KEYWORD"* ]]; then + if [ "$disabled" -eq 0 ]; then + if [ "$exit_code" -eq 0 ]; then + echo "Found $KEYWORD markers in the codebase." >&2 + echo "$KEYWORD is used for tasks that should be done before merging a PR; if you want to leave a message in the codebase, use FIXME." >&2 + echo "If you need to allow a $KEYWORD, wrap it in $KEYWORD-check-disable and $KEYWORD-check-enable." >&2 + echo "" >&2 + exit_code=1 + fi + echo "$line" >&2 + fi + fi +done <<< "$sorted_output" + +exit $exit_code diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_versions.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_versions.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ef9f1a6641067dc2d683b7b04a086c7cbb23c2d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/check_versions.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +# Usage: version +function version { + cargo metadata -q --format-version 1 | jq -r ".packages[] | select(.name == \"$1\").version" +} + +ver_zerocopy=$(version zerocopy) +ver_zerocopy_derive=$(version zerocopy-derive) + +# Usage: dependency-version +function dependency-version { + KIND="$1" + TARGET="$2" + cargo metadata -q --format-version 1 \ + | jq -r ".packages[] | select(.name == \"zerocopy\").dependencies[] | select((.name == \"zerocopy-derive\") and .kind == $KIND and .target == $TARGET).req" +} + +# The non-dev dependency version (kind `null` filters out the dev +# dependency, and target `null` filters out the targeted version). +zerocopy_derive_dep_ver=$(dependency-version null null) + +# The non-dev dependency, targeted version (kind `null` filters out +# the dev dependency). +zerocopy_derive_targeted_ver=$(dependency-version null '"cfg(any())"') + +# The dev dependency version (kind `"dev"` selects only the dev +# dependency). +zerocopy_derive_dev_dep_ver=$(dependency-version '"dev"' null) + +function assert-match { + VER_A="$1" + VER_B="$2" + SUCCESS_MSG="$3" + FAILURE_MSG="$4" + if [[ "$VER_A" == "$VER_B" ]]; then + echo "$SUCCESS_MSG" | tee -a $GITHUB_STEP_SUMMARY + else + echo "$FAILURE_MSG" | tee -a $GITHUB_STEP_SUMMARY >&2 + exit 1 + fi +} + +assert-match "$ver_zerocopy" "$ver_zerocopy_derive" \ + "Same crate version ($ver_zerocopy) found for zerocopy and zerocopy-derive." \ + "Different crate versions found for zerocopy ($ver_zerocopy) and zerocopy-derive ($ver_zerocopy_derive)." + +# Note the leading `=` sign - the dependency needs to be an exact one. +assert-match "=$ver_zerocopy_derive" "$zerocopy_derive_dep_ver" \ + "zerocopy depends upon same version of zerocopy-derive in-tree ($zerocopy_derive_dep_ver)." \ + "zerocopy depends upon different version of zerocopy-derive ($zerocopy_derive_dep_ver) than the one in-tree ($ver_zerocopy_derive)." + +# Note the leading `=` sign - the dependency needs to be an exact one. +assert-match "=$ver_zerocopy_derive" "$zerocopy_derive_dev_dep_ver" \ + "In dev mode, zerocopy depends upon same version of zerocopy-derive in-tree ($zerocopy_derive_dev_dep_ver)." \ + "In dev mode, zerocopy depends upon different version of zerocopy-derive ($zerocopy_derive_dev_dep_ver) than the one in-tree ($ver_zerocopy_derive)." + +assert-match "$zerocopy_derive_dep_ver" "$zerocopy_derive_targeted_ver" \ + "Same crate version ($zerocopy_derive_dep_ver) found for optional and targeted zerocopy-derive dependency." \ + "Different crate versions found for optional ($zerocopy_derive_dep_ver) and targeted ($zerocopy_derive_targeted_ver) dependency." diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/release_crate_version.sh b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/release_crate_version.sh new file mode 100644 index 0000000000000000000000000000000000000000..52f9b11280d55f77ba4fccf183c1780402b7574a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/ci/release_crate_version.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -e + +if [ $# -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +VERSION="$1" + +sed -i -e "s/^zerocopy-derive = { version = \"=[0-9a-zA-Z\.-]*\"/zerocopy-derive = { version = \"=$VERSION\"/" Cargo.toml +sed -i -e "s/^version = \"[0-9a-zA-Z\.-]*\"/version = \"$VERSION\"/" Cargo.toml zerocopy-derive/Cargo.toml + +# Ensure that `Cargo.lock` is updated. +cargo generate-lockfile diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/githooks/pre-push b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/githooks/pre-push new file mode 100644 index 0000000000000000000000000000000000000000..cbc7e341dc6fe8d99b2568df5affc705f8ab823a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/githooks/pre-push @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# +# Copyright 2024 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail +echo "Running pre-push git hook: $0" +# Forego redirecting stdout to /dev/null on check_fmt.sh because the output from +# `cargo fmt` is useful (and the good stuff is not delivered by stderr). +# +# Background all jobs and wait for them so they can run in parallel. +./ci/check_actions.sh & ACTIONS_PID=$! +./ci/check_fmt.sh & FMT_PID=$! +./ci/check_all_toolchains_tested.sh >/dev/null & TOOLCHAINS_PID=$! +./ci/check_job_dependencies.sh >/dev/null & JOB_DEPS_PID=$! +./ci/check_readme.sh >/dev/null & README_PID=$! +./ci/check_stale_stderr.sh >/dev/null & STALE_STDERR_PID=$! +./ci/check_todo.sh >/dev/null & XODO_PID=$! +./ci/check_versions.sh >/dev/null & VERSIONS_PID=$! +./ci/check_msrv_is_minimal.sh >/dev/null & MSRV_PID=$! + +# `wait ` exits with the same status code as the job it's waiting for. +# Since we `set -e` above, this will have the effect of causing the entire +# script to exit with a non-zero status code if any of these jobs does the same. +# Note that, while `wait` (with no PID argument) waits for all backgrounded +# jobs, it exits with code 0 even if one of the backgrounded jobs does not, so +# we can't use it here. +wait $ACTIONS_PID +wait $FMT_PID +wait $TOOLCHAINS_PID +wait $JOB_DEPS_PID +wait $README_PID +wait $STALE_STDERR_PID +wait $XODO_PID +wait $VERSIONS_PID +wait $MSRV_PID + +# Ensure that this script calls all scripts in `ci/*`. This isn't a foolproof +# check since it just checks for the string in this script (e.g., it could be in +# a comment, which would trigger a false positive), but it should catch obvious +# errors. Also note that this entire hook is a nice-to-have - failures that +# aren't caught here will still be caught in CI. +# +# This was added because, in #728, we added `ci/check_all_toolchains_tested.sh` +# without calling it from this script. +GLOBIGNORE="./*/release_crate_version.sh" # We don't want to run this one +for f in ./ci/*; do + grep "$f" githooks/pre-push >/dev/null || { echo "$f not called from githooks/pre-push" >&2 ; exit 1; } +done +unset GLOBIGNORE diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byte_slice.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byte_slice.rs new file mode 100644 index 0000000000000000000000000000000000000000..ace0b5dd6ca4d61a8945ba7fb8c7bfbd0fe10e65 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byte_slice.rs @@ -0,0 +1,432 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Traits for types that encapsulate a `[u8]`. +//! +//! These traits are used to bound the `B` parameter of [`Ref`]. + +use core::{ + cell, + ops::{Deref, DerefMut}, +}; + +// For each trait polyfill, as soon as the corresponding feature is stable, the +// polyfill import will be unused because method/function resolution will prefer +// the inherent method/function over a trait method/function. Thus, we suppress +// the `unused_imports` warning. +// +// See the documentation on `util::polyfills` for more information. +#[allow(unused_imports)] +use crate::util::polyfills::{self, NonNullExt as _, NumExt as _}; +#[cfg(doc)] +use crate::Ref; + +/// A mutable or immutable reference to a byte slice. +/// +/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is +/// implemented for various special reference types such as +/// [`Ref<[u8]>`](core::cell::Ref) and [`RefMut<[u8]>`](core::cell::RefMut). +/// +/// # Safety +/// +/// Implementations of `ByteSlice` must promise that their implementations of +/// [`Deref`] and [`DerefMut`] are "stable". In particular, given `B: ByteSlice` +/// and `b: B`, two calls, each to either `b.deref()` or `b.deref_mut()`, must +/// return a byte slice with the same address and length. This must hold even if +/// the two calls are separated by an arbitrary sequence of calls to methods on +/// `ByteSlice`, [`ByteSliceMut`], [`IntoByteSlice`], or [`IntoByteSliceMut`], +/// or on their super-traits. This does *not* need to hold if the two calls are +/// separated by any method calls, field accesses, or field modifications *other +/// than* those from these traits. +/// +/// Note that this also implies that, given `b: B`, the address and length +/// cannot be modified via objects other than `b`, either on the same thread or +/// on another thread. +pub unsafe trait ByteSlice: Deref + Sized {} + +/// A mutable reference to a byte slice. +/// +/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to +/// a byte slice, and is implemented for various special reference types such as +/// `RefMut<[u8]>`. +/// +/// `ByteSliceMut` is a shorthand for [`ByteSlice`] and [`DerefMut`]. +pub trait ByteSliceMut: ByteSlice + DerefMut {} +impl ByteSliceMut for B {} + +/// A [`ByteSlice`] which can be copied without violating dereference stability. +/// +/// # Safety +/// +/// If `B: CopyableByteSlice`, then the dereference stability properties +/// required by [`ByteSlice`] (see that trait's safety documentation) do not +/// only hold regarding two calls to `b.deref()` or `b.deref_mut()`, but also +/// hold regarding `c.deref()` or `c.deref_mut()`, where `c` is produced by +/// copying `b`. +pub unsafe trait CopyableByteSlice: ByteSlice + Copy + CloneableByteSlice {} + +/// A [`ByteSlice`] which can be cloned without violating dereference stability. +/// +/// # Safety +/// +/// If `B: CloneableByteSlice`, then the dereference stability properties +/// required by [`ByteSlice`] (see that trait's safety documentation) do not +/// only hold regarding two calls to `b.deref()` or `b.deref_mut()`, but also +/// hold regarding `c.deref()` or `c.deref_mut()`, where `c` is produced by +/// `b.clone()`, `b.clone().clone()`, etc. +pub unsafe trait CloneableByteSlice: ByteSlice + Clone {} + +/// A [`ByteSlice`] that can be split in two. +/// +/// # Safety +/// +/// Unsafe code may depend for its soundness on the assumption that `split_at` +/// and `split_at_unchecked` are implemented correctly. In particular, given `B: +/// SplitByteSlice` and `b: B`, if `b.deref()` returns a byte slice with address +/// `addr` and length `len`, then if `split <= len`, both of these +/// invocations: +/// - `b.split_at(split)` +/// - `b.split_at_unchecked(split)` +/// +/// ...will return `(first, second)` such that: +/// - `first`'s address is `addr` and its length is `split` +/// - `second`'s address is `addr + split` and its length is `len - split` +pub unsafe trait SplitByteSlice: ByteSlice { + /// Attempts to split `self` at the midpoint. + /// + /// `s.split_at(mid)` returns `Ok((s[..mid], s[mid..]))` if `mid <= + /// s.deref().len()` and otherwise returns `Err(s)`. + /// + /// # Safety + /// + /// Unsafe code may rely on this function correctly implementing the above + /// functionality. + #[inline] + fn split_at(self, mid: usize) -> Result<(Self, Self), Self> { + if mid <= self.deref().len() { + // SAFETY: Above, we ensure that `mid <= self.deref().len()`. By + // invariant on `ByteSlice`, a supertrait of `SplitByteSlice`, + // `.deref()` is guaranteed to be "stable"; i.e., it will always + // dereference to a byte slice of the same address and length. Thus, + // we can be sure that the above precondition remains satisfied + // through the call to `split_at_unchecked`. + unsafe { Ok(self.split_at_unchecked(mid)) } + } else { + Err(self) + } + } + + /// Splits the slice at the midpoint, possibly omitting bounds checks. + /// + /// `s.split_at_unchecked(mid)` returns `s[..mid]` and `s[mid..]`. + /// + /// # Safety + /// + /// `mid` must not be greater than `self.deref().len()`. + /// + /// # Panics + /// + /// Implementations of this method may choose to perform a bounds check and + /// panic if `mid > self.deref().len()`. They may also panic for any other + /// reason. Since it is optional, callers must not rely on this behavior for + /// soundness. + #[must_use] + unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self); +} + +/// A shorthand for [`SplitByteSlice`] and [`ByteSliceMut`]. +pub trait SplitByteSliceMut: SplitByteSlice + ByteSliceMut {} +impl SplitByteSliceMut for B {} + +#[allow(clippy::missing_safety_doc)] // There's a `Safety` section on `into_byte_slice`. +/// A [`ByteSlice`] that conveys no ownership, and so can be converted into a +/// byte slice. +/// +/// Some `ByteSlice` types (notably, the standard library's [`Ref`] type) convey +/// ownership, and so they cannot soundly be moved by-value into a byte slice +/// type (`&[u8]`). Some methods in this crate's API (such as [`Ref::into_ref`]) +/// are only compatible with `ByteSlice` types without these ownership +/// semantics. +/// +/// [`Ref`]: core::cell::Ref +pub unsafe trait IntoByteSlice<'a>: ByteSlice { + /// Coverts `self` into a `&[u8]`. + /// + /// # Safety + /// + /// The returned reference has the same address and length as `self.deref()` + /// and `self.deref_mut()`. + /// + /// Note that, combined with the safety invariant on [`ByteSlice`], this + /// safety invariant implies that the returned reference is "stable" in the + /// sense described in the `ByteSlice` docs. + fn into_byte_slice(self) -> &'a [u8]; +} + +#[allow(clippy::missing_safety_doc)] // There's a `Safety` section on `into_byte_slice_mut`. +/// A [`ByteSliceMut`] that conveys no ownership, and so can be converted into a +/// mutable byte slice. +/// +/// Some `ByteSliceMut` types (notably, the standard library's [`RefMut`] type) +/// convey ownership, and so they cannot soundly be moved by-value into a byte +/// slice type (`&mut [u8]`). Some methods in this crate's API (such as +/// [`Ref::into_mut`]) are only compatible with `ByteSliceMut` types without +/// these ownership semantics. +/// +/// [`RefMut`]: core::cell::RefMut +pub unsafe trait IntoByteSliceMut<'a>: IntoByteSlice<'a> + ByteSliceMut { + /// Coverts `self` into a `&mut [u8]`. + /// + /// # Safety + /// + /// The returned reference has the same address and length as `self.deref()` + /// and `self.deref_mut()`. + /// + /// Note that, combined with the safety invariant on [`ByteSlice`], this + /// safety invariant implies that the returned reference is "stable" in the + /// sense described in the `ByteSlice` docs. + fn into_byte_slice_mut(self) -> &'a mut [u8]; +} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl ByteSlice for &[u8] {} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl CopyableByteSlice for &[u8] {} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl CloneableByteSlice for &[u8] {} + +// SAFETY: This delegates to `polyfills:split_at_unchecked`, which is documented +// to correctly split `self` into two slices at the given `mid` point. +unsafe impl SplitByteSlice for &[u8] { + #[inline] + unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) { + // SAFETY: By contract on caller, `mid` is not greater than + // `self.len()`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + unsafe { + (<[u8]>::get_unchecked(self, ..mid), <[u8]>::get_unchecked(self, mid..)) + } + } +} + +// SAFETY: See inline. +unsafe impl<'a> IntoByteSlice<'a> for &'a [u8] { + #[inline(always)] + fn into_byte_slice(self) -> &'a [u8] { + // SAFETY: It would be patently insane to implement `::deref` as anything other than `fn deref(&self) -> &[u8] { + // *self }`. Assuming this holds, then `self` is stable as required by + // `into_byte_slice`. + self + } +} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl ByteSlice for &mut [u8] {} + +// SAFETY: This delegates to `polyfills:split_at_mut_unchecked`, which is +// documented to correctly split `self` into two slices at the given `mid` +// point. +unsafe impl SplitByteSlice for &mut [u8] { + #[inline] + unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) { + use core::slice::from_raw_parts_mut; + + // `l_ptr` is non-null, because `self` is non-null, by invariant on + // `&mut [u8]`. + let l_ptr = self.as_mut_ptr(); + + // SAFETY: By contract on caller, `mid` is not greater than + // `self.len()`. + let r_ptr = unsafe { l_ptr.add(mid) }; + + let l_len = mid; + + // SAFETY: By contract on caller, `mid` is not greater than + // `self.len()`. + // + // FIXME(#67): Remove this allow. See NumExt for more details. + #[allow(unstable_name_collisions)] + let r_len = unsafe { self.len().unchecked_sub(mid) }; + + // SAFETY: These invocations of `from_raw_parts_mut` satisfy its + // documented safety preconditions [1]: + // - The data `l_ptr` and `r_ptr` are valid for both reads and writes of + // `l_len` and `r_len` bytes, respectively, and they are trivially + // aligned. In particular: + // - The entire memory range of each slice is contained within a + // single allocated object, since `l_ptr` and `r_ptr` are both + // derived from within the address range of `self`. + // - Both `l_ptr` and `r_ptr` are non-null and trivially aligned. + // `self` is non-null by invariant on `&mut [u8]`, and the + // operations that derive `l_ptr` and `r_ptr` from `self` do not + // nullify either pointer. + // - The data `l_ptr` and `r_ptr` point to `l_len` and `r_len`, + // respectively, consecutive properly initialized values of type `u8`. + // This is true for `self` by invariant on `&mut [u8]`, and remains + // true for these two sub-slices of `self`. + // - The memory referenced by the returned slice cannot be accessed + // through any other pointer (not derived from the return value) for + // the duration of lifetime `'a``, because: + // - `split_at_unchecked` consumes `self` (which is not `Copy`), + // - `split_at_unchecked` does not exfiltrate any references to this + // memory, besides those references returned below, + // - the returned slices are non-overlapping. + // - The individual sizes of the sub-slices of `self` are no larger than + // `isize::MAX`, because their combined sizes are no larger than + // `isize::MAX`, by invariant on `self`. + // + // [1] https://doc.rust-lang.org/std/slice/fn.from_raw_parts_mut.html#safety + #[allow(clippy::multiple_unsafe_ops_per_block)] + unsafe { + (from_raw_parts_mut(l_ptr, l_len), from_raw_parts_mut(r_ptr, r_len)) + } + } +} + +// SAFETY: See inline. +unsafe impl<'a> IntoByteSlice<'a> for &'a mut [u8] { + #[inline(always)] + fn into_byte_slice(self) -> &'a [u8] { + // SAFETY: It would be patently insane to implement `::deref` as anything other than `fn deref(&self) -> &[u8] { + // *self }`. Assuming this holds, then `self` is stable as required by + // `into_byte_slice`. + self + } +} + +// SAFETY: See inline. +unsafe impl<'a> IntoByteSliceMut<'a> for &'a mut [u8] { + #[inline(always)] + fn into_byte_slice_mut(self) -> &'a mut [u8] { + // SAFETY: It would be patently insane to implement `::deref` as anything other than `fn deref_mut(&mut self) -> &mut + // [u8] { *self }`. Assuming this holds, then `self` is stable as + // required by `into_byte_slice_mut`. + self + } +} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl ByteSlice for cell::Ref<'_, [u8]> {} + +// SAFETY: This delegates to stdlib implementation of `Ref::map_split`, which is +// assumed to be correct, and `SplitByteSlice::split_at_unchecked`, which is +// documented to correctly split `self` into two slices at the given `mid` +// point. +unsafe impl SplitByteSlice for cell::Ref<'_, [u8]> { + #[inline] + unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) { + cell::Ref::map_split(self, |slice| + // SAFETY: By precondition on caller, `mid` is not greater than + // `slice.len()`. + unsafe { + SplitByteSlice::split_at_unchecked(slice, mid) + }) + } +} + +// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl ByteSlice for cell::RefMut<'_, [u8]> {} + +// SAFETY: This delegates to stdlib implementation of `RefMut::map_split`, which +// is assumed to be correct, and `SplitByteSlice::split_at_unchecked`, which is +// documented to correctly split `self` into two slices at the given `mid` +// point. +unsafe impl SplitByteSlice for cell::RefMut<'_, [u8]> { + #[inline] + unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) { + cell::RefMut::map_split(self, |slice| + // SAFETY: By precondition on caller, `mid` is not greater than + // `slice.len()` + unsafe { + SplitByteSlice::split_at_unchecked(slice, mid) + }) + } +} + +#[cfg(kani)] +mod proofs { + use super::*; + + fn any_vec() -> Vec { + let len = kani::any(); + kani::assume(len <= isize::MAX as usize); + vec![0u8; len] + } + + #[kani::proof] + fn prove_split_at_unchecked() { + let v = any_vec(); + let slc = v.as_slice(); + let mid = kani::any(); + kani::assume(mid <= slc.len()); + let (l, r) = unsafe { slc.split_at_unchecked(mid) }; + assert_eq!(l.len() + r.len(), slc.len()); + + let slc: *const _ = slc; + let l: *const _ = l; + let r: *const _ = r; + + assert_eq!(slc.cast::(), l.cast::()); + assert_eq!(unsafe { slc.cast::().add(mid) }, r.cast::()); + + let mut v = any_vec(); + let slc = v.as_mut_slice(); + let len = slc.len(); + let mid = kani::any(); + kani::assume(mid <= slc.len()); + let (l, r) = unsafe { slc.split_at_unchecked(mid) }; + assert_eq!(l.len() + r.len(), len); + + let l: *mut _ = l; + let r: *mut _ = r; + let slc: *mut _ = slc; + + assert_eq!(slc.cast::(), l.cast::()); + assert_eq!(unsafe { slc.cast::().add(mid) }, r.cast::()); + } +} + +#[cfg(test)] +mod tests { + use core::cell::RefCell; + + use super::*; + + #[test] + fn test_ref_split_at_unchecked() { + let cell = RefCell::new([1, 2, 3, 4]); + let borrow = cell.borrow(); + let slice_ref: cell::Ref<'_, [u8]> = cell::Ref::map(borrow, |a| &a[..]); + // SAFETY: 2 is within bounds of [1, 2, 3, 4] + let (l, r) = unsafe { slice_ref.split_at_unchecked(2) }; + assert_eq!(*l, [1, 2]); + assert_eq!(*r, [3, 4]); + } + + #[test] + fn test_ref_mut_split_at_unchecked() { + let cell = RefCell::new([1, 2, 3, 4]); + let borrow_mut = cell.borrow_mut(); + let slice_ref_mut: cell::RefMut<'_, [u8]> = cell::RefMut::map(borrow_mut, |a| &mut a[..]); + // SAFETY: 2 is within bounds of [1, 2, 3, 4] + let (l, r) = unsafe { slice_ref_mut.split_at_unchecked(2) }; + assert_eq!(*l, [1, 2]); + assert_eq!(*r, [3, 4]); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byteorder.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byteorder.rs new file mode 100644 index 0000000000000000000000000000000000000000..257505203415640920dbb09407c43d5149317609 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/byteorder.rs @@ -0,0 +1,1563 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Byte order-aware numeric primitives. +//! +//! This module contains equivalents of the native multi-byte integer types with +//! no alignment requirement and supporting byte order conversions. +//! +//! For each native multi-byte integer type - `u16`, `i16`, `u32`, etc - and +//! floating point type - `f32` and `f64` - an equivalent type is defined by +//! this module - [`U16`], [`I16`], [`U32`], [`F32`], [`F64`], etc. Unlike their +//! native counterparts, these types have alignment 1, and take a type parameter +//! specifying the byte order in which the bytes are stored in memory. Each type +//! implements this crate's relevant conversion and marker traits. +//! +//! These two properties, taken together, make these types useful for defining +//! data structures whose memory layout matches a wire format such as that of a +//! network protocol or a file format. Such formats often have multi-byte values +//! at offsets that do not respect the alignment requirements of the equivalent +//! native types, and stored in a byte order not necessarily the same as that of +//! the target platform. +//! +//! Type aliases are provided for common byte orders in the [`big_endian`], +//! [`little_endian`], [`network_endian`], and [`native_endian`] submodules. +//! Note that network-endian is a synonym for big-endian. +//! +//! # Example +//! +//! One use of these types is for representing network packet formats, such as +//! UDP: +//! +//! ```rust +//! use zerocopy::{*, byteorder::network_endian::U16}; +//! # use zerocopy_derive::*; +//! +//! #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] +//! #[repr(C)] +//! struct UdpHeader { +//! src_port: U16, +//! dst_port: U16, +//! length: U16, +//! checksum: U16, +//! } +//! +//! #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] +//! #[repr(C, packed)] +//! struct UdpPacket { +//! header: UdpHeader, +//! body: [u8], +//! } +//! +//! impl UdpPacket { +//! fn parse(bytes: &[u8]) -> Option<&UdpPacket> { +//! UdpPacket::ref_from_bytes(bytes).ok() +//! } +//! } +//! ``` + +use core::{ + convert::{TryFrom, TryInto}, + fmt::{Binary, Debug, LowerHex, Octal, UpperHex}, + hash::Hash, + num::TryFromIntError, +}; + +use super::*; + +/// A type-level representation of byte order. +/// +/// This type is implemented by [`BigEndian`] and [`LittleEndian`], which +/// represent big-endian and little-endian byte order respectively. This module +/// also provides a number of useful aliases for those types: [`NativeEndian`], +/// [`NetworkEndian`], [`BE`], and [`LE`]. +/// +/// `ByteOrder` types can be used to specify the byte order of the types in this +/// module - for example, [`U32`] is a 32-bit integer stored in +/// big-endian byte order. +/// +/// [`U32`]: U32 +pub trait ByteOrder: + Copy + Clone + Debug + Display + Eq + PartialEq + Ord + PartialOrd + Hash + private::Sealed +{ + #[doc(hidden)] + const ORDER: Order; +} + +mod private { + pub trait Sealed {} + + impl Sealed for super::BigEndian {} + impl Sealed for super::LittleEndian {} +} + +#[allow(missing_copy_implementations, missing_debug_implementations)] +#[doc(hidden)] +pub enum Order { + BigEndian, + LittleEndian, +} + +/// Big-endian byte order. +/// +/// See [`ByteOrder`] for more details. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum BigEndian {} + +impl ByteOrder for BigEndian { + const ORDER: Order = Order::BigEndian; +} + +impl Display for BigEndian { + #[inline] + fn fmt(&self, _: &mut Formatter<'_>) -> fmt::Result { + match *self {} + } +} + +/// Little-endian byte order. +/// +/// See [`ByteOrder`] for more details. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum LittleEndian {} + +impl ByteOrder for LittleEndian { + const ORDER: Order = Order::LittleEndian; +} + +impl Display for LittleEndian { + #[inline] + fn fmt(&self, _: &mut Formatter<'_>) -> fmt::Result { + match *self {} + } +} + +/// The endianness used by this platform. +/// +/// This is a type alias for [`BigEndian`] or [`LittleEndian`] depending on the +/// endianness of the target platform. +#[cfg(target_endian = "big")] +pub type NativeEndian = BigEndian; + +/// The endianness used by this platform. +/// +/// This is a type alias for [`BigEndian`] or [`LittleEndian`] depending on the +/// endianness of the target platform. +#[cfg(target_endian = "little")] +pub type NativeEndian = LittleEndian; + +/// The endianness used in many network protocols. +/// +/// This is a type alias for [`BigEndian`]. +pub type NetworkEndian = BigEndian; + +/// A type alias for [`BigEndian`]. +pub type BE = BigEndian; + +/// A type alias for [`LittleEndian`]. +pub type LE = LittleEndian; + +macro_rules! impl_fmt_trait { + ($name:ident, $native:ident, $trait:ident) => { + impl $trait for $name { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + $trait::fmt(&self.get(), f) + } + } + }; +} + +macro_rules! impl_fmt_traits { + ($name:ident, $native:ident, "floating point number") => { + impl_fmt_trait!($name, $native, Display); + }; + ($name:ident, $native:ident, "unsigned integer") => { + impl_fmt_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, "signed integer") => { + impl_fmt_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, @all_types) => { + impl_fmt_trait!($name, $native, Display); + impl_fmt_trait!($name, $native, Octal); + impl_fmt_trait!($name, $native, LowerHex); + impl_fmt_trait!($name, $native, UpperHex); + impl_fmt_trait!($name, $native, Binary); + }; +} + +macro_rules! impl_ops_traits { + ($name:ident, $native:ident, "floating point number") => { + impl_ops_traits!($name, $native, @all_types); + impl_ops_traits!($name, $native, @signed_integer_floating_point); + + impl PartialOrd for $name { + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + self.get().partial_cmp(&other.get()) + } + } + }; + ($name:ident, $native:ident, "unsigned integer") => { + impl_ops_traits!($name, $native, @signed_unsigned_integer); + impl_ops_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, "signed integer") => { + impl_ops_traits!($name, $native, @signed_unsigned_integer); + impl_ops_traits!($name, $native, @signed_integer_floating_point); + impl_ops_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, @signed_unsigned_integer) => { + impl_ops_traits!(@without_byteorder_swap $name, $native, BitAnd, bitand, BitAndAssign, bitand_assign); + impl_ops_traits!(@without_byteorder_swap $name, $native, BitOr, bitor, BitOrAssign, bitor_assign); + impl_ops_traits!(@without_byteorder_swap $name, $native, BitXor, bitxor, BitXorAssign, bitxor_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Shl, shl, ShlAssign, shl_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Shr, shr, ShrAssign, shr_assign); + + impl core::ops::Not for $name { + type Output = $name; + + #[inline(always)] + fn not(self) -> $name { + let self_native = $native::from_ne_bytes(self.0); + $name((!self_native).to_ne_bytes(), PhantomData) + } + } + + impl PartialOrd for $name { + #[inline(always)] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $name { + #[inline(always)] + fn cmp(&self, other: &Self) -> Ordering { + self.get().cmp(&other.get()) + } + } + + impl PartialOrd<$native> for $name { + #[inline(always)] + fn partial_cmp(&self, other: &$native) -> Option { + self.get().partial_cmp(other) + } + } + }; + ($name:ident, $native:ident, @signed_integer_floating_point) => { + impl core::ops::Neg for $name { + type Output = $name; + + #[inline(always)] + fn neg(self) -> $name { + let self_native: $native = self.get(); + #[allow(clippy::arithmetic_side_effects)] + $name::::new(-self_native) + } + } + }; + ($name:ident, $native:ident, @all_types) => { + impl_ops_traits!(@with_byteorder_swap $name, $native, Add, add, AddAssign, add_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Div, div, DivAssign, div_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Mul, mul, MulAssign, mul_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Rem, rem, RemAssign, rem_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Sub, sub, SubAssign, sub_assign); + }; + (@with_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => { + impl core::ops::$trait<$name> for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + let self_native: $native = self.get(); + let rhs_native: $native = rhs.get(); + let result_native = core::ops::$trait::$method(self_native, rhs_native); + $name::::new(result_native) + } + } + + impl core::ops::$trait<$name> for $native { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + let rhs_native: $native = rhs.get(); + let result_native = core::ops::$trait::$method(self, rhs_native); + $name::::new(result_native) + } + } + + impl core::ops::$trait<$native> for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $native) -> $name { + let self_native: $native = self.get(); + let result_native = core::ops::$trait::$method(self_native, rhs); + $name::::new(result_native) + } + } + + impl core::ops::$trait_assign<$name> for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + + impl core::ops::$trait_assign<$name> for $native { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + let rhs_native: $native = rhs.get(); + *self = core::ops::$trait::$method(*self, rhs_native); + } + } + + impl core::ops::$trait_assign<$native> for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $native) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + }; + // Implement traits in terms of the same trait on the native type, but + // without performing a byte order swap when both operands are byteorder + // types. This only works for bitwise operations like `&`, `|`, etc. + // + // When only one operand is a byteorder type, we still need to perform a + // byteorder swap. + (@without_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => { + impl core::ops::$trait<$name> for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + let self_native = $native::from_ne_bytes(self.0); + let rhs_native = $native::from_ne_bytes(rhs.0); + let result_native = core::ops::$trait::$method(self_native, rhs_native); + $name(result_native.to_ne_bytes(), PhantomData) + } + } + + impl core::ops::$trait<$name> for $native { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + // No runtime cost - just byte packing + let rhs_native = $native::from_ne_bytes(rhs.0); + // (Maybe) runtime cost - byte order swap + let slf_byteorder = $name::::new(self); + // No runtime cost - just byte packing + let slf_native = $native::from_ne_bytes(slf_byteorder.0); + // Runtime cost - perform the operation + let result_native = core::ops::$trait::$method(slf_native, rhs_native); + // No runtime cost - just byte unpacking + $name(result_native.to_ne_bytes(), PhantomData) + } + } + + impl core::ops::$trait<$native> for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $native) -> $name { + // (Maybe) runtime cost - byte order swap + let rhs_byteorder = $name::::new(rhs); + // No runtime cost - just byte packing + let rhs_native = $native::from_ne_bytes(rhs_byteorder.0); + // No runtime cost - just byte packing + let slf_native = $native::from_ne_bytes(self.0); + // Runtime cost - perform the operation + let result_native = core::ops::$trait::$method(slf_native, rhs_native); + // No runtime cost - just byte unpacking + $name(result_native.to_ne_bytes(), PhantomData) + } + } + + impl core::ops::$trait_assign<$name> for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + + impl core::ops::$trait_assign<$name> for $native { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + // (Maybe) runtime cost - byte order swap + let rhs_native = rhs.get(); + // Runtime cost - perform the operation + *self = core::ops::$trait::$method(*self, rhs_native); + } + } + + impl core::ops::$trait_assign<$native> for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $native) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + }; +} + +macro_rules! doc_comment { + ($x:expr, $($tt:tt)*) => { + #[doc = $x] + $($tt)* + }; +} + +macro_rules! define_max_value_constant { + ($name:ident, $bytes:expr, "unsigned integer") => { + /// The maximum value. + /// + /// This constant should be preferred to constructing a new value using + /// `new`, as `new` may perform an endianness swap depending on the + /// endianness `O` and the endianness of the platform. + pub const MAX_VALUE: $name = $name([0xFFu8; $bytes], PhantomData); + }; + // We don't provide maximum and minimum value constants for signed values + // and floats because there's no way to do it generically - it would require + // a different value depending on the value of the `ByteOrder` type + // parameter. Currently, one workaround would be to provide implementations + // for concrete implementations of that trait. In the long term, if we are + // ever able to make the `new` constructor a const fn, we could use that + // instead. + ($name:ident, $bytes:expr, "signed integer") => {}; + ($name:ident, $bytes:expr, "floating point number") => {}; +} + +macro_rules! define_type { + ( + $article:ident, + $description:expr, + $name:ident, + $native:ident, + $bits:expr, + $bytes:expr, + $from_be_fn:path, + $to_be_fn:path, + $from_le_fn:path, + $to_le_fn:path, + $number_kind:tt, + [$($larger_native:ty),*], + [$($larger_native_try:ty),*], + [$($larger_byteorder:ident),*], + [$($larger_byteorder_try:ident),*] + ) => { + doc_comment! { + concat!($description, " stored in a given byte order. + +`", stringify!($name), "` is like the native `", stringify!($native), "` type with +two major differences: First, it has no alignment requirement (its alignment is 1). +Second, the endianness of its memory layout is given by the type parameter `O`, +which can be any type which implements [`ByteOrder`]. In particular, this refers +to [`BigEndian`], [`LittleEndian`], [`NativeEndian`], and [`NetworkEndian`]. + +", stringify!($article), " `", stringify!($name), "` can be constructed using +the [`new`] method, and its contained value can be obtained as a native +`",stringify!($native), "` using the [`get`] method, or updated in place with +the [`set`] method. In all cases, if the endianness `O` is not the same as the +endianness of the current platform, an endianness swap will be performed in +order to uphold the invariants that a) the layout of `", stringify!($name), "` +has endianness `O` and that, b) the layout of `", stringify!($native), "` has +the platform's native endianness. + +`", stringify!($name), "` implements [`FromBytes`], [`IntoBytes`], and [`Unaligned`], +making it useful for parsing and serialization. See the module documentation for an +example of how it can be used for parsing UDP packets. + +[`new`]: crate::byteorder::", stringify!($name), "::new +[`get`]: crate::byteorder::", stringify!($name), "::get +[`set`]: crate::byteorder::", stringify!($name), "::set +[`FromBytes`]: crate::FromBytes +[`IntoBytes`]: crate::IntoBytes +[`Unaligned`]: crate::Unaligned"), + #[derive(Copy, Clone, Eq, PartialEq, Hash)] + #[cfg_attr(any(feature = "derive", test), derive(KnownLayout, Immutable, FromBytes, IntoBytes, Unaligned))] + #[repr(transparent)] + pub struct $name([u8; $bytes], PhantomData); + } + + #[cfg(not(any(feature = "derive", test)))] + impl_known_layout!(O => $name); + + #[allow(unused_unsafe)] // Unused when `feature = "derive"`. + // SAFETY: `$name` is `repr(transparent)`, and so it has the same + // layout as its only non-zero field, which is a `u8` array. `u8` arrays + // are `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, + // `IntoBytes`, and `Unaligned`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + impl_or_verify!(O => Immutable for $name); + impl_or_verify!(O => TryFromBytes for $name); + impl_or_verify!(O => FromZeros for $name); + impl_or_verify!(O => FromBytes for $name); + impl_or_verify!(O => IntoBytes for $name); + impl_or_verify!(O => Unaligned for $name); + }; + + impl Default for $name { + #[inline(always)] + fn default() -> $name { + $name::ZERO + } + } + + impl $name { + /// The value zero. + /// + /// This constant should be preferred to constructing a new value + /// using `new`, as `new` may perform an endianness swap depending + /// on the endianness and platform. + pub const ZERO: $name = $name([0u8; $bytes], PhantomData); + + define_max_value_constant!($name, $bytes, $number_kind); + + /// Constructs a new value from bytes which are already in `O` byte + /// order. + #[must_use = "has no side effects"] + #[inline(always)] + pub const fn from_bytes(bytes: [u8; $bytes]) -> $name { + $name(bytes, PhantomData) + } + + /// Extracts the bytes of `self` without swapping the byte order. + /// + /// The returned bytes will be in `O` byte order. + #[must_use = "has no side effects"] + #[inline(always)] + pub const fn to_bytes(self) -> [u8; $bytes] { + self.0 + } + } + + impl $name { + maybe_const_trait_bounded_fn! { + /// Constructs a new value, possibly performing an endianness + /// swap to guarantee that the returned value has endianness + /// `O`. + #[must_use = "has no side effects"] + #[inline(always)] + pub const fn new(n: $native) -> $name { + let bytes = match O::ORDER { + Order::BigEndian => $to_be_fn(n), + Order::LittleEndian => $to_le_fn(n), + }; + + $name(bytes, PhantomData) + } + } + + maybe_const_trait_bounded_fn! { + /// Returns the value as a primitive type, possibly performing + /// an endianness swap to guarantee that the return value has + /// the endianness of the native platform. + #[must_use = "has no side effects"] + #[inline(always)] + pub const fn get(self) -> $native { + match O::ORDER { + Order::BigEndian => $from_be_fn(self.0), + Order::LittleEndian => $from_le_fn(self.0), + } + } + } + + /// Updates the value in place as a primitive type, possibly + /// performing an endianness swap to guarantee that the stored value + /// has the endianness `O`. + #[inline(always)] + pub fn set(&mut self, n: $native) { + *self = Self::new(n); + } + } + + // The reasoning behind which traits to implement here is to only + // implement traits which won't cause inference issues. Notably, + // comparison traits like PartialEq and PartialOrd tend to cause + // inference issues. + + impl From<$name> for [u8; $bytes] { + #[inline(always)] + fn from(x: $name) -> [u8; $bytes] { + x.0 + } + } + + impl From<[u8; $bytes]> for $name { + #[inline(always)] + fn from(bytes: [u8; $bytes]) -> $name { + $name(bytes, PhantomData) + } + } + + impl From<$name> for $native { + #[inline(always)] + fn from(x: $name) -> $native { + x.get() + } + } + + impl From<$native> for $name { + #[inline(always)] + fn from(x: $native) -> $name { + $name::new(x) + } + } + + $( + impl From<$name> for $larger_native { + #[inline(always)] + fn from(x: $name) -> $larger_native { + x.get().into() + } + } + )* + + $( + impl TryFrom<$larger_native_try> for $name { + type Error = TryFromIntError; + #[inline(always)] + fn try_from(x: $larger_native_try) -> Result<$name, TryFromIntError> { + $native::try_from(x).map($name::new) + } + } + )* + + $( + impl From<$name> for $larger_byteorder

{ + #[inline(always)] + fn from(x: $name) -> $larger_byteorder

{ + $larger_byteorder::new(x.get().into()) + } + } + )* + + $( + impl TryFrom<$larger_byteorder_try

> for $name { + type Error = TryFromIntError; + #[inline(always)] + fn try_from(x: $larger_byteorder_try

) -> Result<$name, TryFromIntError> { + x.get().try_into().map($name::new) + } + } + )* + + impl AsRef<[u8; $bytes]> for $name { + #[inline(always)] + fn as_ref(&self) -> &[u8; $bytes] { + &self.0 + } + } + + impl AsMut<[u8; $bytes]> for $name { + #[inline(always)] + fn as_mut(&mut self) -> &mut [u8; $bytes] { + &mut self.0 + } + } + + impl PartialEq<$name> for [u8; $bytes] { + #[inline(always)] + fn eq(&self, other: &$name) -> bool { + self.eq(&other.0) + } + } + + impl PartialEq<[u8; $bytes]> for $name { + #[inline(always)] + fn eq(&self, other: &[u8; $bytes]) -> bool { + self.0.eq(other) + } + } + + impl PartialEq<$native> for $name { + #[inline(always)] + fn eq(&self, other: &$native) -> bool { + self.get().eq(other) + } + } + + impl_fmt_traits!($name, $native, $number_kind); + impl_ops_traits!($name, $native, $number_kind); + + impl Debug for $name { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // This results in a format like "U16(42)". + f.debug_tuple(stringify!($name)).field(&self.get()).finish() + } + } + }; +} + +define_type!( + A, + "A 16-bit unsigned integer", + U16, + u16, + 16, + 2, + u16::from_be_bytes, + u16::to_be_bytes, + u16::from_le_bytes, + u16::to_le_bytes, + "unsigned integer", + [u32, u64, u128, usize], + [u32, u64, u128, usize], + [U32, U64, U128, Usize], + [U32, U64, U128, Usize] +); +define_type!( + A, + "A 32-bit unsigned integer", + U32, + u32, + 32, + 4, + u32::from_be_bytes, + u32::to_be_bytes, + u32::from_le_bytes, + u32::to_le_bytes, + "unsigned integer", + [u64, u128], + [u64, u128], + [U64, U128], + [U64, U128] +); +define_type!( + A, + "A 64-bit unsigned integer", + U64, + u64, + 64, + 8, + u64::from_be_bytes, + u64::to_be_bytes, + u64::from_le_bytes, + u64::to_le_bytes, + "unsigned integer", + [u128], + [u128], + [U128], + [U128] +); +define_type!( + A, + "A 128-bit unsigned integer", + U128, + u128, + 128, + 16, + u128::from_be_bytes, + u128::to_be_bytes, + u128::from_le_bytes, + u128::to_le_bytes, + "unsigned integer", + [], + [], + [], + [] +); +define_type!( + A, + "A word-sized unsigned integer", + Usize, + usize, + mem::size_of::() * 8, + mem::size_of::(), + usize::from_be_bytes, + usize::to_be_bytes, + usize::from_le_bytes, + usize::to_le_bytes, + "unsigned integer", + [], + [], + [], + [] +); +define_type!( + An, + "A 16-bit signed integer", + I16, + i16, + 16, + 2, + i16::from_be_bytes, + i16::to_be_bytes, + i16::from_le_bytes, + i16::to_le_bytes, + "signed integer", + [i32, i64, i128, isize], + [i32, i64, i128, isize], + [I32, I64, I128, Isize], + [I32, I64, I128, Isize] +); +define_type!( + An, + "A 32-bit signed integer", + I32, + i32, + 32, + 4, + i32::from_be_bytes, + i32::to_be_bytes, + i32::from_le_bytes, + i32::to_le_bytes, + "signed integer", + [i64, i128], + [i64, i128], + [I64, I128], + [I64, I128] +); +define_type!( + An, + "A 64-bit signed integer", + I64, + i64, + 64, + 8, + i64::from_be_bytes, + i64::to_be_bytes, + i64::from_le_bytes, + i64::to_le_bytes, + "signed integer", + [i128], + [i128], + [I128], + [I128] +); +define_type!( + An, + "A 128-bit signed integer", + I128, + i128, + 128, + 16, + i128::from_be_bytes, + i128::to_be_bytes, + i128::from_le_bytes, + i128::to_le_bytes, + "signed integer", + [], + [], + [], + [] +); +define_type!( + An, + "A word-sized signed integer", + Isize, + isize, + mem::size_of::() * 8, + mem::size_of::(), + isize::from_be_bytes, + isize::to_be_bytes, + isize::from_le_bytes, + isize::to_le_bytes, + "signed integer", + [], + [], + [], + [] +); + +// FIXME(https://github.com/rust-lang/rust/issues/72447): Use the endianness +// conversion methods directly once those are const-stable. +macro_rules! define_float_conversion { + ($ty:ty, $bits:ident, $bytes:expr, $mod:ident) => { + mod $mod { + use super::*; + + define_float_conversion!($ty, $bits, $bytes, from_be_bytes, to_be_bytes); + define_float_conversion!($ty, $bits, $bytes, from_le_bytes, to_le_bytes); + } + }; + ($ty:ty, $bits:ident, $bytes:expr, $from:ident, $to:ident) => { + // Clippy: The suggestion of using `from_bits()` instead doesn't work + // because `from_bits` is not const-stable on our MSRV. + #[allow(clippy::unnecessary_transmutes)] + pub(crate) const fn $from(bytes: [u8; $bytes]) -> $ty { + transmute!($bits::$from(bytes)) + } + + pub(crate) const fn $to(f: $ty) -> [u8; $bytes] { + // Clippy: The suggestion of using `f.to_bits()` instead doesn't + // work because `to_bits` is not const-stable on our MSRV. + #[allow(clippy::unnecessary_transmutes)] + let bits: $bits = transmute!(f); + bits.$to() + } + }; +} + +define_float_conversion!(f32, u32, 4, f32_ext); +define_float_conversion!(f64, u64, 8, f64_ext); + +define_type!( + An, + "A 32-bit floating point number", + F32, + f32, + 32, + 4, + f32_ext::from_be_bytes, + f32_ext::to_be_bytes, + f32_ext::from_le_bytes, + f32_ext::to_le_bytes, + "floating point number", + [f64], + [], + [F64], + [] +); +define_type!( + An, + "A 64-bit floating point number", + F64, + f64, + 64, + 8, + f64_ext::from_be_bytes, + f64_ext::to_be_bytes, + f64_ext::from_le_bytes, + f64_ext::to_le_bytes, + "floating point number", + [], + [], + [], + [] +); + +macro_rules! module { + ($name:ident, $trait:ident, $endianness_str:expr) => { + /// Numeric primitives stored in + #[doc = $endianness_str] + /// byte order. + pub mod $name { + use super::$trait; + + module!(@ty U16, $trait, "16-bit unsigned integer", $endianness_str); + module!(@ty U32, $trait, "32-bit unsigned integer", $endianness_str); + module!(@ty U64, $trait, "64-bit unsigned integer", $endianness_str); + module!(@ty U128, $trait, "128-bit unsigned integer", $endianness_str); + module!(@ty I16, $trait, "16-bit signed integer", $endianness_str); + module!(@ty I32, $trait, "32-bit signed integer", $endianness_str); + module!(@ty I64, $trait, "64-bit signed integer", $endianness_str); + module!(@ty I128, $trait, "128-bit signed integer", $endianness_str); + module!(@ty F32, $trait, "32-bit floating point number", $endianness_str); + module!(@ty F64, $trait, "64-bit floating point number", $endianness_str); + } + }; + (@ty $ty:ident, $trait:ident, $desc_str:expr, $endianness_str:expr) => { + /// A + #[doc = $desc_str] + /// stored in + #[doc = $endianness_str] + /// byte order. + pub type $ty = crate::byteorder::$ty<$trait>; + }; +} + +module!(big_endian, BigEndian, "big-endian"); +module!(little_endian, LittleEndian, "little-endian"); +module!(network_endian, NetworkEndian, "network-endian"); +module!(native_endian, NativeEndian, "native-endian"); + +#[cfg(any(test, kani))] +mod tests { + use super::*; + + #[cfg(not(kani))] + mod compatibility { + pub(super) use rand::{ + distributions::{Distribution, Standard}, + rngs::SmallRng, + Rng, SeedableRng, + }; + + pub(crate) trait Arbitrary {} + + impl Arbitrary for T {} + } + + #[cfg(kani)] + mod compatibility { + pub(crate) use kani::Arbitrary; + + pub(crate) struct SmallRng; + + impl SmallRng { + pub(crate) fn seed_from_u64(_state: u64) -> Self { + Self + } + } + + pub(crate) trait Rng { + fn sample>(&mut self, _distr: D) -> T + where + T: Arbitrary, + { + kani::any() + } + } + + impl Rng for SmallRng {} + + pub(crate) trait Distribution {} + impl Distribution for U {} + + pub(crate) struct Standard; + } + + use compatibility::*; + + // A native integer type (u16, i32, etc). + trait Native: Arbitrary + FromBytes + IntoBytes + Immutable + Copy + PartialEq + Debug { + const ZERO: Self; + const MAX_VALUE: Self; + + type Distribution: Distribution; + const DIST: Self::Distribution; + + fn rand(rng: &mut R) -> Self { + rng.sample(Self::DIST) + } + + #[cfg_attr(kani, allow(unused))] + fn checked_add(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_div(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_mul(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_rem(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_sub(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_shl(self, rhs: Self) -> Option; + + #[cfg_attr(kani, allow(unused))] + fn checked_shr(self, rhs: Self) -> Option; + + fn is_nan(self) -> bool; + + /// For `f32` and `f64`, NaN values are not considered equal to + /// themselves. This method is like `assert_eq!`, but it treats NaN + /// values as equal. + fn assert_eq_or_nan(self, other: Self) { + let slf = (!self.is_nan()).then(|| self); + let other = (!other.is_nan()).then(|| other); + assert_eq!(slf, other); + } + } + + trait ByteArray: + FromBytes + IntoBytes + Immutable + Copy + AsRef<[u8]> + AsMut<[u8]> + Debug + Default + Eq + { + /// Invert the order of the bytes in the array. + fn invert(self) -> Self; + } + + trait ByteOrderType: + FromBytes + IntoBytes + Unaligned + Copy + Eq + Debug + Hash + From + { + type Native: Native; + type ByteArray: ByteArray; + + const ZERO: Self; + + fn new(native: Self::Native) -> Self; + fn get(self) -> Self::Native; + fn set(&mut self, native: Self::Native); + fn from_bytes(bytes: Self::ByteArray) -> Self; + fn into_bytes(self) -> Self::ByteArray; + + /// For `f32` and `f64`, NaN values are not considered equal to + /// themselves. This method is like `assert_eq!`, but it treats NaN + /// values as equal. + fn assert_eq_or_nan(self, other: Self) { + let slf = (!self.get().is_nan()).then(|| self); + let other = (!other.get().is_nan()).then(|| other); + assert_eq!(slf, other); + } + } + + trait ByteOrderTypeUnsigned: ByteOrderType { + const MAX_VALUE: Self; + } + + macro_rules! impl_byte_array { + ($bytes:expr) => { + impl ByteArray for [u8; $bytes] { + fn invert(mut self) -> [u8; $bytes] { + self.reverse(); + self + } + } + }; + } + + impl_byte_array!(2); + impl_byte_array!(4); + impl_byte_array!(8); + impl_byte_array!(16); + + macro_rules! impl_byte_order_type_unsigned { + ($name:ident, unsigned) => { + impl ByteOrderTypeUnsigned for $name { + const MAX_VALUE: $name = $name::MAX_VALUE; + } + }; + ($name:ident, signed) => {}; + } + + macro_rules! impl_traits { + ($name:ident, $native:ident, $sign:ident $(, @$float:ident)?) => { + impl Native for $native { + // For some types, `0 as $native` is required (for example, when + // `$native` is a floating-point type; `0` is an integer), but + // for other types, it's a trivial cast. In all cases, Clippy + // thinks it's dangerous. + #[allow(trivial_numeric_casts, clippy::as_conversions)] + const ZERO: $native = 0 as $native; + const MAX_VALUE: $native = $native::MAX; + + type Distribution = Standard; + const DIST: Standard = Standard; + + impl_traits!(@float_dependent_methods $(@$float)?); + } + + impl ByteOrderType for $name { + type Native = $native; + type ByteArray = [u8; mem::size_of::<$native>()]; + + const ZERO: $name = $name::ZERO; + + fn new(native: $native) -> $name { + $name::new(native) + } + + fn get(self) -> $native { + $name::get(self) + } + + fn set(&mut self, native: $native) { + $name::set(self, native) + } + + fn from_bytes(bytes: [u8; mem::size_of::<$native>()]) -> $name { + $name::from(bytes) + } + + fn into_bytes(self) -> [u8; mem::size_of::<$native>()] { + <[u8; mem::size_of::<$native>()]>::from(self) + } + } + + impl_byte_order_type_unsigned!($name, $sign); + }; + (@float_dependent_methods) => { + fn checked_add(self, rhs: Self) -> Option { self.checked_add(rhs) } + fn checked_div(self, rhs: Self) -> Option { self.checked_div(rhs) } + fn checked_mul(self, rhs: Self) -> Option { self.checked_mul(rhs) } + fn checked_rem(self, rhs: Self) -> Option { self.checked_rem(rhs) } + fn checked_sub(self, rhs: Self) -> Option { self.checked_sub(rhs) } + fn checked_shl(self, rhs: Self) -> Option { self.checked_shl(rhs.try_into().unwrap_or(u32::MAX)) } + fn checked_shr(self, rhs: Self) -> Option { self.checked_shr(rhs.try_into().unwrap_or(u32::MAX)) } + fn is_nan(self) -> bool { false } + }; + (@float_dependent_methods @float) => { + fn checked_add(self, rhs: Self) -> Option { Some(self + rhs) } + fn checked_div(self, rhs: Self) -> Option { Some(self / rhs) } + fn checked_mul(self, rhs: Self) -> Option { Some(self * rhs) } + fn checked_rem(self, rhs: Self) -> Option { Some(self % rhs) } + fn checked_sub(self, rhs: Self) -> Option { Some(self - rhs) } + fn checked_shl(self, _rhs: Self) -> Option { unimplemented!() } + fn checked_shr(self, _rhs: Self) -> Option { unimplemented!() } + fn is_nan(self) -> bool { self.is_nan() } + }; + } + + impl_traits!(U16, u16, unsigned); + impl_traits!(U32, u32, unsigned); + impl_traits!(U64, u64, unsigned); + impl_traits!(U128, u128, unsigned); + impl_traits!(Usize, usize, unsigned); + impl_traits!(I16, i16, signed); + impl_traits!(I32, i32, signed); + impl_traits!(I64, i64, signed); + impl_traits!(I128, i128, signed); + impl_traits!(Isize, isize, unsigned); + impl_traits!(F32, f32, signed, @float); + impl_traits!(F64, f64, signed, @float); + + macro_rules! call_for_unsigned_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_signed_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_float_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_all_types { + ($fn:ident, $byteorder:ident) => { + call_for_unsigned_types!($fn, $byteorder); + call_for_signed_types!($fn, $byteorder); + call_for_float_types!($fn, $byteorder); + }; + } + + #[cfg(target_endian = "big")] + type NonNativeEndian = LittleEndian; + #[cfg(target_endian = "little")] + type NonNativeEndian = BigEndian; + + // We use a `u64` seed so that we can use `SeedableRng::seed_from_u64`. + // `SmallRng`'s `SeedableRng::Seed` differs by platform, so if we wanted to + // call `SeedableRng::from_seed`, which takes a `Seed`, we would need + // conditional compilation by `target_pointer_width`. + const RNG_SEED: u64 = 0x7A03CAE2F32B5B8F; + + const RAND_ITERS: usize = if cfg!(any(miri, kani)) { + // The tests below which use this constant used to take a very long time + // on Miri, which slows down local development and CI jobs. We're not + // using Miri to check for the correctness of our code, but rather its + // soundness, and at least in the context of these particular tests, a + // single loop iteration is just as good for surfacing UB as multiple + // iterations are. + // + // As of the writing of this comment, here's one set of measurements: + // + // $ # RAND_ITERS == 1 + // $ cargo miri test -- -Z unstable-options --report-time endian + // test byteorder::tests::test_native_endian ... ok <0.049s> + // test byteorder::tests::test_non_native_endian ... ok <0.061s> + // + // $ # RAND_ITERS == 1024 + // $ cargo miri test -- -Z unstable-options --report-time endian + // test byteorder::tests::test_native_endian ... ok <25.716s> + // test byteorder::tests::test_non_native_endian ... ok <38.127s> + 1 + } else { + 1024 + }; + + #[test] + fn test_const_methods() { + use big_endian::*; + + #[rustversion::since(1.61.0)] + const _U: U16 = U16::new(0); + #[rustversion::since(1.61.0)] + const _NATIVE: u16 = _U.get(); + const _FROM_BYTES: U16 = U16::from_bytes([0, 1]); + const _BYTES: [u8; 2] = _FROM_BYTES.to_bytes(); + } + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_zero() { + fn test_zero() { + assert_eq!(T::ZERO.get(), T::Native::ZERO); + } + + call_for_all_types!(test_zero, NativeEndian); + call_for_all_types!(test_zero, NonNativeEndian); + } + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_max_value() { + fn test_max_value() { + assert_eq!(T::MAX_VALUE.get(), T::Native::MAX_VALUE); + } + + call_for_unsigned_types!(test_max_value, NativeEndian); + call_for_unsigned_types!(test_max_value, NonNativeEndian); + } + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_endian() { + fn test(invert: bool) { + let mut r = SmallRng::seed_from_u64(RNG_SEED); + for _ in 0..RAND_ITERS { + let native = T::Native::rand(&mut r); + let mut bytes = T::ByteArray::default(); + bytes.as_mut_bytes().copy_from_slice(native.as_bytes()); + if invert { + bytes = bytes.invert(); + } + let mut from_native = T::new(native); + let from_bytes = T::from_bytes(bytes); + + from_native.assert_eq_or_nan(from_bytes); + from_native.get().assert_eq_or_nan(native); + from_bytes.get().assert_eq_or_nan(native); + + assert_eq!(from_native.into_bytes(), bytes); + assert_eq!(from_bytes.into_bytes(), bytes); + + let updated = T::Native::rand(&mut r); + from_native.set(updated); + from_native.get().assert_eq_or_nan(updated); + } + } + + fn test_native() { + test::(false); + } + + fn test_non_native() { + test::(true); + } + + call_for_all_types!(test_native, NativeEndian); + call_for_all_types!(test_non_native, NonNativeEndian); + } + + #[test] + fn test_ops_impls() { + // Test implementations of traits in `core::ops`. Some of these are + // fairly banal, but some are optimized to perform the operation without + // swapping byte order (namely, bit-wise operations which are identical + // regardless of byte order). These are important to test, and while + // we're testing those anyway, it's trivial to test all of the impls. + + fn test( + op_t_t: FTT, + op_t_n: FTN, + op_n_t: FNT, + op_n_n: FNN, + op_n_n_checked: Option, + op_assign: Option<(FATT, FATN, FANT)>, + ) where + T: ByteOrderType, + FTT: Fn(T, T) -> T, + FTN: Fn(T, T::Native) -> T, + FNT: Fn(T::Native, T) -> T, + FNN: Fn(T::Native, T::Native) -> T::Native, + FNNChecked: Fn(T::Native, T::Native) -> Option, + FATT: Fn(&mut T, T), + FATN: Fn(&mut T, T::Native), + FANT: Fn(&mut T::Native, T), + { + let mut r = SmallRng::seed_from_u64(RNG_SEED); + for _ in 0..RAND_ITERS { + let n0 = T::Native::rand(&mut r); + let n1 = T::Native::rand(&mut r); + let t0 = T::new(n0); + let t1 = T::new(n1); + + // If this operation would overflow/underflow, skip it rather + // than attempt to catch and recover from panics. + if matches!(&op_n_n_checked, Some(checked) if checked(n0, n1).is_none()) { + continue; + } + + let t_t_res = op_t_t(t0, t1); + let t_n_res = op_t_n(t0, n1); + let n_t_res = op_n_t(n0, t1); + let n_n_res = op_n_n(n0, n1); + + // For `f32` and `f64`, NaN values are not considered equal to + // themselves. We store `Option`/`Option` and store + // NaN as `None` so they can still be compared. + let val_or_none = |t: T| (!T::Native::is_nan(t.get())).then(|| t.get()); + let t_t_res = val_or_none(t_t_res); + let t_n_res = val_or_none(t_n_res); + let n_t_res = val_or_none(n_t_res); + let n_n_res = (!T::Native::is_nan(n_n_res)).then(|| n_n_res); + assert_eq!(t_t_res, n_n_res); + assert_eq!(t_n_res, n_n_res); + assert_eq!(n_t_res, n_n_res); + + if let Some((op_assign_t_t, op_assign_t_n, op_assign_n_t)) = &op_assign { + let mut t_t_res = t0; + op_assign_t_t(&mut t_t_res, t1); + let mut t_n_res = t0; + op_assign_t_n(&mut t_n_res, n1); + let mut n_t_res = n0; + op_assign_n_t(&mut n_t_res, t1); + + // For `f32` and `f64`, NaN values are not considered equal to + // themselves. We store `Option`/`Option` and store + // NaN as `None` so they can still be compared. + let t_t_res = val_or_none(t_t_res); + let t_n_res = val_or_none(t_n_res); + let n_t_res = (!T::Native::is_nan(n_t_res)).then(|| n_t_res); + assert_eq!(t_t_res, n_n_res); + assert_eq!(t_n_res, n_n_res); + assert_eq!(n_t_res, n_n_res); + } + } + } + + macro_rules! test { + ( + @binary + $trait:ident, + $method:ident $([$checked_method:ident])?, + $trait_assign:ident, + $method_assign:ident, + $($call_for_macros:ident),* + ) => {{ + fn t() + where + T: ByteOrderType, + T: core::ops::$trait, + T: core::ops::$trait, + T::Native: core::ops::$trait, + T::Native: core::ops::$trait, + + T: core::ops::$trait_assign, + T: core::ops::$trait_assign, + T::Native: core::ops::$trait_assign, + T::Native: core::ops::$trait_assign, + { + test::( + core::ops::$trait::$method, + core::ops::$trait::$method, + core::ops::$trait::$method, + core::ops::$trait::$method, + { + #[allow(unused_mut, unused_assignments)] + let mut op_native_checked = None:: Option>; + $( + op_native_checked = Some(T::Native::$checked_method); + )? + op_native_checked + }, + Some(( + >::$method_assign, + >::$method_assign, + >::$method_assign + )), + ); + } + + $( + $call_for_macros!(t, NativeEndian); + $call_for_macros!(t, NonNativeEndian); + )* + }}; + ( + @unary + $trait:ident, + $method:ident, + $($call_for_macros:ident),* + ) => {{ + fn t() + where + T: ByteOrderType, + T: core::ops::$trait, + T::Native: core::ops::$trait, + { + test::( + |slf, _rhs| core::ops::$trait::$method(slf), + |slf, _rhs| core::ops::$trait::$method(slf), + |slf, _rhs| core::ops::$trait::$method(slf).into(), + |slf, _rhs| core::ops::$trait::$method(slf), + None:: Option>, + None::<(fn(&mut T, T), fn(&mut T, T::Native), fn(&mut T::Native, T))>, + ); + } + + $( + $call_for_macros!(t, NativeEndian); + $call_for_macros!(t, NonNativeEndian); + )* + }}; + } + + test!(@binary Add, add[checked_add], AddAssign, add_assign, call_for_all_types); + test!(@binary Div, div[checked_div], DivAssign, div_assign, call_for_all_types); + test!(@binary Mul, mul[checked_mul], MulAssign, mul_assign, call_for_all_types); + test!(@binary Rem, rem[checked_rem], RemAssign, rem_assign, call_for_all_types); + test!(@binary Sub, sub[checked_sub], SubAssign, sub_assign, call_for_all_types); + + test!(@binary BitAnd, bitand, BitAndAssign, bitand_assign, call_for_unsigned_types, call_for_signed_types); + test!(@binary BitOr, bitor, BitOrAssign, bitor_assign, call_for_unsigned_types, call_for_signed_types); + test!(@binary BitXor, bitxor, BitXorAssign, bitxor_assign, call_for_unsigned_types, call_for_signed_types); + test!(@binary Shl, shl[checked_shl], ShlAssign, shl_assign, call_for_unsigned_types, call_for_signed_types); + test!(@binary Shr, shr[checked_shr], ShrAssign, shr_assign, call_for_unsigned_types, call_for_signed_types); + + test!(@unary Not, not, call_for_signed_types, call_for_unsigned_types); + test!(@unary Neg, neg, call_for_signed_types, call_for_float_types); + } + + #[test] + fn test_debug_impl() { + // Ensure that Debug applies format options to the inner value. + let val = U16::::new(10); + assert_eq!(format!("{:?}", val), "U16(10)"); + assert_eq!(format!("{:03?}", val), "U16(010)"); + assert_eq!(format!("{:x?}", val), "U16(a)"); + } + + #[test] + fn test_byteorder_traits_coverage() { + let val_be = U16::::from_bytes([0, 1]); + let val_le = U16::::from_bytes([1, 0]); + + assert_eq!(val_be.get(), 1); + assert_eq!(val_le.get(), 1); + + // Debug + assert_eq!(format!("{:?}", val_be), "U16(1)"); + assert_eq!(format!("{:?}", val_le), "U16(1)"); + + // PartialOrd, Ord with same type + assert!(val_be >= val_be); + assert!(val_be <= val_be); + assert_eq!(val_be.cmp(&val_be), core::cmp::Ordering::Equal); + + // PartialOrd with native + assert!(val_be == 1u16); + assert!(val_be >= 1u16); + + // Default + let default_be: U16 = Default::default(); + assert_eq!(default_be.get(), 0); + + // I16 + let val_be_i16 = I16::::from_bytes([0, 1]); + assert_eq!(val_be_i16.get(), 1); + assert_eq!(format!("{:?}", val_be_i16), "I16(1)"); + assert_eq!(val_be_i16.cmp(&val_be_i16), core::cmp::Ordering::Equal); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/deprecated.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/deprecated.rs new file mode 100644 index 0000000000000000000000000000000000000000..61b29aa5f485e3b4a03cfd37f4623300b75e9c4b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/deprecated.rs @@ -0,0 +1,279 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Deprecated items. These are kept separate so that they don't clutter up +//! other modules. + +use super::*; + +impl Ref +where + B: ByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + #[deprecated(since = "0.8.0", note = "renamed to `Ref::from_bytes`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new(bytes: B) -> Option> { + Self::from_bytes(bytes).ok() + } +} + +impl Ref +where + B: SplitByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + #[deprecated(since = "0.8.0", note = "renamed to `Ref::from_prefix`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_from_prefix(bytes: B) -> Option<(Ref, B)> { + Self::from_prefix(bytes).ok() + } +} + +impl Ref +where + B: SplitByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + #[deprecated(since = "0.8.0", note = "renamed to `Ref::from_suffix`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_from_suffix(bytes: B) -> Option<(B, Ref)> { + Self::from_suffix(bytes).ok() + } +} + +impl Ref +where + B: ByteSlice, + T: Unaligned + KnownLayout + Immutable + ?Sized, +{ + #[deprecated( + since = "0.8.0", + note = "use `Ref::from_bytes`; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_unaligned(bytes: B) -> Option> { + Self::from_bytes(bytes).ok() + } +} + +impl Ref +where + B: SplitByteSlice, + T: Unaligned + KnownLayout + Immutable + ?Sized, +{ + #[deprecated( + since = "0.8.0", + note = "use `Ref::from_prefix`; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref, B)> { + Self::from_prefix(bytes).ok() + } +} + +impl Ref +where + B: SplitByteSlice, + T: Unaligned + KnownLayout + Immutable + ?Sized, +{ + #[deprecated( + since = "0.8.0", + note = "use `Ref::from_suffix`; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref)> { + Self::from_suffix(bytes).ok() + } +} + +impl Ref +where + B: ByteSlice, + T: Immutable, +{ + #[deprecated(since = "0.8.0", note = "`Ref::from_bytes` now supports slices")] + #[doc(hidden)] + #[inline(always)] + pub fn new_slice(bytes: B) -> Option> { + Self::from_bytes(bytes).ok() + } +} + +impl Ref +where + B: ByteSlice, + T: Unaligned + Immutable, +{ + #[deprecated( + since = "0.8.0", + note = "`Ref::from_bytes` now supports slices; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[inline(always)] + pub fn new_slice_unaligned(bytes: B) -> Option> { + Ref::from_bytes(bytes).ok() + } +} + +impl<'a, B, T> Ref +where + B: 'a + IntoByteSlice<'a>, + T: FromBytes + Immutable, +{ + #[deprecated(since = "0.8.0", note = "`Ref::into_ref` now supports slices")] + #[doc(hidden)] + #[inline(always)] + pub fn into_slice(self) -> &'a [T] { + Ref::into_ref(self) + } +} + +impl<'a, B, T> Ref +where + B: 'a + IntoByteSliceMut<'a>, + T: FromBytes + IntoBytes + Immutable, +{ + #[deprecated(since = "0.8.0", note = "`Ref::into_mut` now supports slices")] + #[doc(hidden)] + #[inline(always)] + pub fn into_mut_slice(self) -> &'a mut [T] { + Ref::into_mut(self) + } +} + +impl Ref +where + B: SplitByteSlice, + T: Immutable, +{ + #[deprecated(since = "0.8.0", note = "replaced by `Ref::from_prefix_with_elems`")] + #[must_use = "has no side effects"] + #[doc(hidden)] + #[inline(always)] + pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref, B)> { + Ref::from_prefix_with_elems(bytes, count).ok() + } + + #[deprecated(since = "0.8.0", note = "replaced by `Ref::from_suffix_with_elems`")] + #[must_use = "has no side effects"] + #[doc(hidden)] + #[inline(always)] + pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref)> { + Ref::from_suffix_with_elems(bytes, count).ok() + } +} + +impl Ref +where + B: SplitByteSlice, + T: Unaligned + Immutable, +{ + #[deprecated( + since = "0.8.0", + note = "use `Ref::from_prefix_with_elems`; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref, B)> { + Ref::from_prefix_with_elems(bytes, count).ok() + } + + #[deprecated( + since = "0.8.0", + note = "use `Ref::from_suffix_with_elems`; for `T: Unaligned`, the returned `CastError` implements `Into`" + )] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref)> { + Ref::from_suffix_with_elems(bytes, count).ok() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[allow(deprecated)] + fn test_deprecated_ref_methods() { + let bytes = &[0u8; 1][..]; + let bytes_slice = &[0u8; 4][..]; + + let r: Option> = Ref::new(bytes); + assert!(r.is_some()); + + let r: Option<(Ref<&[u8], u8>, &[u8])> = Ref::new_from_prefix(bytes); + assert!(r.is_some()); + + let r: Option<(&[u8], Ref<&[u8], u8>)> = Ref::new_from_suffix(bytes); + assert!(r.is_some()); + + let r: Option> = Ref::new_unaligned(bytes); + assert!(r.is_some()); + + let r: Option<(Ref<&[u8], u8>, &[u8])> = Ref::new_unaligned_from_prefix(bytes); + assert!(r.is_some()); + + let r: Option<(&[u8], Ref<&[u8], u8>)> = Ref::new_unaligned_from_suffix(bytes); + assert!(r.is_some()); + + let r: Option> = Ref::new_slice(bytes_slice); + assert!(r.is_some()); + + let r: Option> = Ref::new_slice_unaligned(bytes_slice); + assert!(r.is_some()); + + let r: Option<(Ref<&[u8], [u8]>, &[u8])> = Ref::new_slice_from_prefix(bytes_slice, 1); + assert!(r.is_some()); + + let r: Option<(&[u8], Ref<&[u8], [u8]>)> = Ref::new_slice_from_suffix(bytes_slice, 1); + assert!(r.is_some()); + + let r: Option<(Ref<&[u8], [u8]>, &[u8])> = + Ref::new_slice_unaligned_from_prefix(bytes_slice, 1); + assert!(r.is_some()); + + let r: Option<(&[u8], Ref<&[u8], [u8]>)> = + Ref::new_slice_unaligned_from_suffix(bytes_slice, 1); + assert!(r.is_some()); + } + + #[test] + #[allow(deprecated)] + fn test_deprecated_into_slice() { + let bytes = &[0u8; 4][..]; + let r: Ref<&[u8], [u8]> = Ref::from_bytes(bytes).unwrap(); + let slice: &[u8] = r.into_slice(); + assert_eq!(slice.len(), 4); + } + + #[test] + #[allow(deprecated)] + fn test_deprecated_into_mut_slice() { + let mut bytes = [0u8; 4]; + let r: Ref<&mut [u8], [u8]> = Ref::from_bytes(&mut bytes[..]).unwrap(); + let slice: &mut [u8] = r.into_mut_slice(); + assert_eq!(slice.len(), 4); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/doctests.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/doctests.rs new file mode 100644 index 0000000000000000000000000000000000000000..989f1be194bd36cf4d41ff19005d0bcbf0011f40 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/doctests.rs @@ -0,0 +1,168 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![cfg(feature = "derive")] // Required for derives on `SliceDst` +#![allow(dead_code, missing_docs, missing_debug_implementations, missing_copy_implementations)] + +//! Our UI test framework, built on the `trybuild` crate, does not support +//! testing for post-monomorphization errors. Instead, we use doctests, which +//! are able to test for post-monomorphization errors. + +use crate::*; + +#[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] +#[repr(C)] +pub struct SliceDst { + pub t: T, + pub u: [U], +} + +#[allow(clippy::must_use_candidate, clippy::missing_inline_in_public_items, clippy::todo)] +impl SliceDst { + pub fn new() -> &'static SliceDst { + todo!() + } + + pub fn new_mut() -> &'static mut SliceDst { + todo!() + } +} + +/// We require that the alignment of the destination type is not larger than the +/// alignment of the source type. +/// +/// ```compile_fail,E0080 +/// let increase_alignment: &u16 = zerocopy::transmute_ref!(&[0u8; 2]); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = [0u8; 2]; +/// let increase_alignment: &mut u16 = zerocopy::transmute_mut!(&mut src); +/// ``` +/// +/// ```compile_fail,E0080 +/// let increase_alignment: &u16 = zerocopy::try_transmute_ref!(&[0u8; 2]).unwrap(); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = [0u8; 2]; +/// let increase_alignment: &mut u16 = zerocopy::try_transmute_mut!(&mut src).unwrap(); +/// ``` +enum TransmuteRefMutAlignmentIncrease {} + +/// We require that the size of the destination type is not larger than the size +/// of the source type. +/// +/// ```compile_fail,E0080 +/// let increase_size: &[u8; 2] = zerocopy::transmute_ref!(&0u8); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = 0u8; +/// let increase_size: &mut [u8; 2] = zerocopy::transmute_mut!(&mut src); +/// ``` +/// +/// ```compile_fail,E0080 +/// let increase_size: &[u8; 2] = zerocopy::try_transmute_ref!(&0u8).unwrap(); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = 0u8; +/// let increase_size: &mut [u8; 2] = zerocopy::try_transmute_mut!(&mut src).unwrap(); +/// ``` +enum TransmuteRefMutSizeIncrease {} + +/// We require that the size of the destination type is not smaller than the +/// size of the source type. +/// +/// ```compile_fail,E0080 +/// let decrease_size: &u8 = zerocopy::transmute_ref!(&[0u8; 2]); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = [0u8; 2]; +/// let decrease_size: &mut u8 = zerocopy::transmute_mut!(&mut src); +/// ``` +/// +/// ```compile_fail,E0080 +/// let decrease_size: &u8 = zerocopy::try_transmute_ref!(&[0u8; 2]).unwrap(); +/// ``` +/// +/// ```compile_fail,E0080 +/// let mut src = [0u8; 2]; +/// let decrease_size: &mut u8 = zerocopy::try_transmute_mut!(&mut src).unwrap(); +/// ``` +enum TransmuteRefMutSizeDecrease {} + +/// It's not possible in the general case to increase the trailing slice offset +/// during a reference transmutation - some pointer metadata values would not be +/// supportable, and so such a transmutation would be fallible. +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &SliceDst = SliceDst::new(); +/// let increase_offset: &SliceDst<[u8; 2], u8> = zerocopy::transmute_ref!(src); +/// ``` +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &mut SliceDst = SliceDst::new_mut(); +/// let increase_offset: &mut SliceDst<[u8; 2], u8> = zerocopy::transmute_mut!(src); +/// ``` +enum TransmuteRefMutDstOffsetIncrease {} + +/// Reference transmutes are not possible when the difference between the source +/// and destination types' trailing slice offsets is not a multiple of the +/// destination type's trailing slice element size. +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &SliceDst<[u8; 3], [u8; 2]> = SliceDst::new(); +/// let _: &SliceDst<[u8; 2], [u8; 2]> = zerocopy::transmute_ref!(src); +/// ``` +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &mut SliceDst<[u8; 3], [u8; 2]> = SliceDst::new_mut(); +/// let _: &mut SliceDst<[u8; 2], [u8; 2]> = zerocopy::transmute_mut!(src); +/// ``` +enum TransmuteRefMutDstOffsetNotMultiple {} + +/// Reference transmutes are not possible when the source's trailing slice +/// element size is not a multiple of the destination's. +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &SliceDst<(), [u8; 3]> = SliceDst::new(); +/// let _: &SliceDst<(), [u8; 2]> = zerocopy::transmute_ref!(src); +/// ``` +/// +/// ```compile_fail,E0080 +/// use zerocopy::doctests::SliceDst; +/// let src: &mut SliceDst<(), [u8; 3]> = SliceDst::new_mut(); +/// let _: &mut SliceDst<(), [u8; 2]> = zerocopy::transmute_mut!(src); +/// ``` +enum TransmuteRefMutDstElemSizeNotMultiple {} + +/// ```compile_fail,E0277 +/// use zerocopy::*; +/// +/// #[derive(FromBytes, IntoBytes, Unaligned)] +/// #[repr(transparent)] +/// struct Foo(T); +/// +/// const _: () = unsafe { +/// impl_or_verify!(T => TryFromBytes for Foo); +/// impl_or_verify!(T => FromZeros for Foo); +/// impl_or_verify!(T => FromBytes for Foo); +/// impl_or_verify!(T => IntoBytes for Foo); +/// impl_or_verify!(T => Unaligned for Foo); +/// }; +/// ``` +enum InvalidImplOrVerify {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/error.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..1719abf048c937be20259f9c7a36063706f8f837 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/error.rs @@ -0,0 +1,1346 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Types related to error reporting. +//! +//! ## Single failure mode errors +//! +//! Generally speaking, zerocopy's conversions may fail for one of up to three +//! reasons: +//! - [`AlignmentError`]: the conversion source was improperly aligned +//! - [`SizeError`]: the conversion source was of incorrect size +//! - [`ValidityError`]: the conversion source contained invalid data +//! +//! Methods that only have one failure mode, like +//! [`FromBytes::read_from_bytes`], return that mode's corresponding error type +//! directly. +//! +//! ## Compound errors +//! +//! Conversion methods that have either two or three possible failure modes +//! return one of these error types: +//! - [`CastError`]: the error type of reference conversions +//! - [`TryCastError`]: the error type of fallible reference conversions +//! - [`TryReadError`]: the error type of fallible read conversions +//! +//! ## [`Unaligned`] destination types +//! +//! For [`Unaligned`] destination types, alignment errors are impossible. All +//! compound error types support infallibly discarding the alignment error via +//! [`From`] so long as `Dst: Unaligned`. For example, see [`>::from`][size-error-from]. +//! +//! [size-error-from]: struct.SizeError.html#method.from-1 +//! +//! ## Accessing the conversion source +//! +//! All error types provide an `into_src` method that converts the error into +//! the source value underlying the failed conversion. +//! +//! ## Display formatting +//! +//! All error types provide a `Display` implementation that produces a +//! human-readable error message. When `debug_assertions` are enabled, these +//! error messages are verbose and may include potentially sensitive +//! information, including: +//! +//! - the names of the involved types +//! - the sizes of the involved types +//! - the addresses of the involved types +//! - the contents of the involved types +//! +//! When `debug_assertions` are disabled (as is default for `release` builds), +//! such potentially sensitive information is excluded. +//! +//! In the future, we may support manually configuring this behavior. If you are +//! interested in this feature, [let us know on GitHub][issue-1457] so we know +//! to prioritize it. +//! +//! [issue-1457]: https://github.com/google/zerocopy/issues/1457 +//! +//! ## Validation order +//! +//! Our conversion methods typically check alignment, then size, then bit +//! validity. However, we do not guarantee that this is always the case, and +//! this behavior may change between releases. +//! +//! ## `Send`, `Sync`, and `'static` +//! +//! Our error types are `Send`, `Sync`, and `'static` when their `Src` parameter +//! is `Send`, `Sync`, or `'static`, respectively. This can cause issues when an +//! error is sent or synchronized across threads; e.g.: +//! +//! ```compile_fail,E0515 +//! use zerocopy::*; +//! +//! let result: SizeError<&[u8], u32> = std::thread::spawn(|| { +//! let source = &mut [0u8, 1, 2][..]; +//! // Try (and fail) to read a `u32` from `source`. +//! u32::read_from_bytes(source).unwrap_err() +//! }).join().unwrap(); +//! ``` +//! +//! To work around this, use [`map_src`][CastError::map_src] to convert the +//! source parameter to an unproblematic type; e.g.: +//! +//! ``` +//! use zerocopy::*; +//! +//! let result: SizeError<(), u32> = std::thread::spawn(|| { +//! let source = &mut [0u8, 1, 2][..]; +//! // Try (and fail) to read a `u32` from `source`. +//! u32::read_from_bytes(source).unwrap_err() +//! // Erase the error source. +//! .map_src(drop) +//! }).join().unwrap(); +//! ``` +//! +//! Alternatively, use `.to_string()` to eagerly convert the error into a +//! human-readable message; e.g.: +//! +//! ``` +//! use zerocopy::*; +//! +//! let result: Result = std::thread::spawn(|| { +//! let source = &mut [0u8, 1, 2][..]; +//! // Try (and fail) to read a `u32` from `source`. +//! u32::read_from_bytes(source) +//! // Eagerly render the error message. +//! .map_err(|err| err.to_string()) +//! }).join().unwrap(); +//! ``` +#[cfg(not(no_zerocopy_core_error_1_81_0))] +use core::error::Error; +use core::{ + convert::Infallible, + fmt::{self, Debug, Write}, + ops::Deref, +}; +#[cfg(all(no_zerocopy_core_error_1_81_0, any(feature = "std", test)))] +use std::error::Error; + +use crate::{util::SendSyncPhantomData, KnownLayout, TryFromBytes, Unaligned}; +#[cfg(doc)] +use crate::{FromBytes, Ref}; + +/// Zerocopy's generic error type. +/// +/// Generally speaking, zerocopy's conversions may fail for one of up to three +/// reasons: +/// - [`AlignmentError`]: the conversion source was improperly aligned +/// - [`SizeError`]: the conversion source was of incorrect size +/// - [`ValidityError`]: the conversion source contained invalid data +/// +/// However, not all conversions produce all errors. For instance, +/// [`FromBytes::ref_from_bytes`] may fail due to alignment or size issues, but +/// not validity issues. This generic error type captures these +/// (im)possibilities via parameterization: `A` is parameterized with +/// [`AlignmentError`], `S` is parameterized with [`SizeError`], and `V` is +/// parameterized with [`Infallible`]. +/// +/// Zerocopy never uses this type directly in its API. Rather, we provide three +/// pre-parameterized aliases: +/// - [`CastError`]: the error type of reference conversions +/// - [`TryCastError`]: the error type of fallible reference conversions +/// - [`TryReadError`]: the error type of fallible read conversions +#[derive(PartialEq, Eq, Clone)] +pub enum ConvertError { + /// The conversion source was improperly aligned. + Alignment(A), + /// The conversion source was of incorrect size. + Size(S), + /// The conversion source contained invalid data. + Validity(V), +} + +impl From, S, V>> + for ConvertError +{ + /// Infallibly discards the alignment error from this `ConvertError` since + /// `Dst` is unaligned. + /// + /// Since [`Dst: Unaligned`], it is impossible to encounter an alignment + /// error. This method permits discarding that alignment error infallibly + /// and replacing it with [`Infallible`]. + /// + /// [`Dst: Unaligned`]: crate::Unaligned + /// + /// # Examples + /// + /// ``` + /// use core::convert::Infallible; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, KnownLayout, Unaligned, Immutable)] + /// #[repr(C, packed)] + /// struct Bools { + /// one: bool, + /// two: bool, + /// many: [bool], + /// } + /// + /// impl Bools { + /// fn parse(bytes: &[u8]) -> Result<&Bools, AlignedTryCastError<&[u8], Bools>> { + /// // Since `Bools: Unaligned`, we can infallibly discard + /// // the alignment error. + /// Bools::try_ref_from_bytes(bytes).map_err(Into::into) + /// } + /// } + /// ``` + #[inline] + fn from(err: ConvertError, S, V>) -> ConvertError { + match err { + ConvertError::Alignment(e) => { + #[allow(unreachable_code)] + return ConvertError::Alignment(Infallible::from(e)); + } + ConvertError::Size(e) => ConvertError::Size(e), + ConvertError::Validity(e) => ConvertError::Validity(e), + } + } +} + +impl fmt::Debug for ConvertError { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Alignment(e) => f.debug_tuple("Alignment").field(e).finish(), + Self::Size(e) => f.debug_tuple("Size").field(e).finish(), + Self::Validity(e) => f.debug_tuple("Validity").field(e).finish(), + } + } +} + +/// Produces a human-readable error message. +/// +/// The message differs between debug and release builds. When +/// `debug_assertions` are enabled, this message is verbose and includes +/// potentially sensitive information. +impl fmt::Display for ConvertError { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Alignment(e) => e.fmt(f), + Self::Size(e) => e.fmt(f), + Self::Validity(e) => e.fmt(f), + } + } +} + +#[cfg(any(not(no_zerocopy_core_error_1_81_0), feature = "std", test))] +#[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.81.0", feature = "std"))))] +impl Error for ConvertError +where + A: fmt::Display + fmt::Debug, + S: fmt::Display + fmt::Debug, + V: fmt::Display + fmt::Debug, +{ +} + +/// The error emitted if the conversion source is improperly aligned. +pub struct AlignmentError { + /// The source value involved in the conversion. + src: Src, + /// The inner destination type involved in the conversion. + /// + /// INVARIANT: An `AlignmentError` may only be constructed if `Dst`'s + /// alignment requirement is greater than one. + _dst: SendSyncPhantomData, +} + +impl AlignmentError { + /// # Safety + /// + /// The caller must ensure that `Dst`'s alignment requirement is greater + /// than one. + pub(crate) unsafe fn new_unchecked(src: Src) -> Self { + // INVARIANT: The caller guarantees that `Dst`'s alignment requirement + // is greater than one. + Self { src, _dst: SendSyncPhantomData::default() } + } + + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + self.src + } + + pub(crate) fn with_src(self, new_src: NewSrc) -> AlignmentError { + // INVARIANT: `with_src` doesn't change the type of `Dst`, so the + // invariant that `Dst`'s alignment requirement is greater than one is + // preserved. + AlignmentError { src: new_src, _dst: SendSyncPhantomData::default() } + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::*; + /// + /// let unaligned = Unalign::new(0u16); + /// + /// // Attempt to deref `unaligned`. This might fail with an alignment error. + /// let maybe_n: Result<&u16, AlignmentError<&Unalign, u16>> = unaligned.try_deref(); + /// + /// // Map the error's source to its address as a usize. + /// let maybe_n: Result<&u16, AlignmentError> = maybe_n.map_err(|err| { + /// err.map_src(|src| src as *const _ as usize) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> AlignmentError { + AlignmentError { src: f(self.src), _dst: SendSyncPhantomData::default() } + } + + pub(crate) fn into(self) -> ConvertError { + ConvertError::Alignment(self) + } + + /// Format extra details for a verbose, human-readable error message. + /// + /// This formatting may include potentially sensitive information. + fn display_verbose_extras(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + where + Src: Deref, + Dst: KnownLayout, + { + #[allow(clippy::as_conversions)] + let addr = self.src.deref() as *const _ as *const (); + let addr_align = 2usize.pow((crate::util::AsAddress::addr(addr)).trailing_zeros()); + + f.write_str("\n\nSource type: ")?; + f.write_str(core::any::type_name::())?; + + f.write_str("\nSource address: ")?; + addr.fmt(f)?; + f.write_str(" (a multiple of ")?; + addr_align.fmt(f)?; + f.write_str(")")?; + + f.write_str("\nDestination type: ")?; + f.write_str(core::any::type_name::())?; + + f.write_str("\nDestination alignment: ")?; + ::LAYOUT.align.get().fmt(f)?; + + Ok(()) + } +} + +impl Clone for AlignmentError { + #[inline] + fn clone(&self) -> Self { + Self { src: self.src.clone(), _dst: SendSyncPhantomData::default() } + } +} + +impl PartialEq for AlignmentError { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.src == other.src + } +} + +impl Eq for AlignmentError {} + +impl From> for Infallible { + #[inline(always)] + fn from(_: AlignmentError) -> Infallible { + // SAFETY: `AlignmentError`s can only be constructed when `Dst`'s + // alignment requirement is greater than one. In this block, `Dst: + // Unaligned`, which means that its alignment requirement is equal to + // one. Thus, it's not possible to reach here at runtime. + unsafe { core::hint::unreachable_unchecked() } + } +} + +#[cfg(test)] +impl AlignmentError { + // A convenience constructor so that test code doesn't need to write + // `unsafe`. + fn new_checked(src: Src) -> AlignmentError { + assert_ne!(core::mem::align_of::(), 1); + // SAFETY: The preceding assertion guarantees that `Dst`'s alignment + // requirement is greater than one. + unsafe { AlignmentError::new_unchecked(src) } + } +} + +impl fmt::Debug for AlignmentError { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AlignmentError").finish() + } +} + +/// Produces a human-readable error message. +/// +/// The message differs between debug and release builds. When +/// `debug_assertions` are enabled, this message is verbose and includes +/// potentially sensitive information. +impl fmt::Display for AlignmentError +where + Src: Deref, + Dst: KnownLayout, +{ + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("The conversion failed because the address of the source is not a multiple of the alignment of the destination type.")?; + + if cfg!(debug_assertions) { + self.display_verbose_extras(f) + } else { + Ok(()) + } + } +} + +#[cfg(any(not(no_zerocopy_core_error_1_81_0), feature = "std", test))] +#[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.81.0", feature = "std"))))] +impl Error for AlignmentError +where + Src: Deref, + Dst: KnownLayout, +{ +} + +impl From> + for ConvertError, S, V> +{ + #[inline(always)] + fn from(err: AlignmentError) -> Self { + Self::Alignment(err) + } +} + +/// The error emitted if the conversion source is of incorrect size. +pub struct SizeError { + /// The source value involved in the conversion. + src: Src, + /// The inner destination type involved in the conversion. + _dst: SendSyncPhantomData, +} + +impl SizeError { + pub(crate) fn new(src: Src) -> Self { + Self { src, _dst: SendSyncPhantomData::default() } + } + + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + self.src + } + + /// Sets the source value associated with the conversion error. + pub(crate) fn with_src(self, new_src: NewSrc) -> SizeError { + SizeError { src: new_src, _dst: SendSyncPhantomData::default() } + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::*; + /// + /// let source: [u8; 3] = [0, 1, 2]; + /// + /// // Try to read a `u32` from `source`. This will fail because there are insufficient + /// // bytes in `source`. + /// let maybe_u32: Result> = u32::read_from_bytes(&source[..]); + /// + /// // Map the error's source to its size. + /// let maybe_u32: Result> = maybe_u32.map_err(|err| { + /// err.map_src(|src| src.len()) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> SizeError { + SizeError { src: f(self.src), _dst: SendSyncPhantomData::default() } + } + + /// Sets the destination type associated with the conversion error. + pub(crate) fn with_dst(self) -> SizeError { + SizeError { src: self.src, _dst: SendSyncPhantomData::default() } + } + + /// Converts the error into a general [`ConvertError`]. + pub(crate) fn into(self) -> ConvertError { + ConvertError::Size(self) + } + + /// Format extra details for a verbose, human-readable error message. + /// + /// This formatting may include potentially sensitive information. + fn display_verbose_extras(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + where + Src: Deref, + Dst: KnownLayout, + { + // include the source type + f.write_str("\nSource type: ")?; + f.write_str(core::any::type_name::())?; + + // include the source.deref() size + let src_size = core::mem::size_of_val(&*self.src); + f.write_str("\nSource size: ")?; + src_size.fmt(f)?; + f.write_str(" byte")?; + if src_size != 1 { + f.write_char('s')?; + } + + // if `Dst` is `Sized`, include the `Dst` size + if let crate::SizeInfo::Sized { size } = Dst::LAYOUT.size_info { + f.write_str("\nDestination size: ")?; + size.fmt(f)?; + f.write_str(" byte")?; + if size != 1 { + f.write_char('s')?; + } + } + + // include the destination type + f.write_str("\nDestination type: ")?; + f.write_str(core::any::type_name::())?; + + Ok(()) + } +} + +impl Clone for SizeError { + #[inline] + fn clone(&self) -> Self { + Self { src: self.src.clone(), _dst: SendSyncPhantomData::default() } + } +} + +impl PartialEq for SizeError { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.src == other.src + } +} + +impl Eq for SizeError {} + +impl fmt::Debug for SizeError { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SizeError").finish() + } +} + +/// Produces a human-readable error message. +/// +/// The message differs between debug and release builds. When +/// `debug_assertions` are enabled, this message is verbose and includes +/// potentially sensitive information. +impl fmt::Display for SizeError +where + Src: Deref, + Dst: KnownLayout, +{ + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("The conversion failed because the source was incorrectly sized to complete the conversion into the destination type.")?; + if cfg!(debug_assertions) { + f.write_str("\n")?; + self.display_verbose_extras(f)?; + } + Ok(()) + } +} + +#[cfg(any(not(no_zerocopy_core_error_1_81_0), feature = "std", test))] +#[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.81.0", feature = "std"))))] +impl Error for SizeError +where + Src: Deref, + Dst: KnownLayout, +{ +} + +impl From> for ConvertError, V> { + #[inline(always)] + fn from(err: SizeError) -> Self { + Self::Size(err) + } +} + +/// The error emitted if the conversion source contains invalid data. +pub struct ValidityError { + /// The source value involved in the conversion. + pub(crate) src: Src, + /// The inner destination type involved in the conversion. + _dst: SendSyncPhantomData, +} + +impl ValidityError { + pub(crate) fn new(src: Src) -> Self { + Self { src, _dst: SendSyncPhantomData::default() } + } + + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + self.src + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::*; + /// + /// let source: u8 = 42; + /// + /// // Try to transmute the `source` to a `bool`. This will fail. + /// let maybe_bool: Result> = try_transmute!(source); + /// + /// // Drop the error's source. + /// let maybe_bool: Result> = maybe_bool.map_err(|err| { + /// err.map_src(drop) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> ValidityError { + ValidityError { src: f(self.src), _dst: SendSyncPhantomData::default() } + } + + /// Converts the error into a general [`ConvertError`]. + pub(crate) fn into(self) -> ConvertError { + ConvertError::Validity(self) + } + + /// Format extra details for a verbose, human-readable error message. + /// + /// This formatting may include potentially sensitive information. + fn display_verbose_extras(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + where + Dst: KnownLayout, + { + f.write_str("Destination type: ")?; + f.write_str(core::any::type_name::())?; + Ok(()) + } +} + +impl Clone for ValidityError { + #[inline] + fn clone(&self) -> Self { + Self { src: self.src.clone(), _dst: SendSyncPhantomData::default() } + } +} + +// SAFETY: `ValidityError` contains a single `Self::Inner = Src`, and no other +// non-ZST fields. `map` passes ownership of `self`'s sole `Self::Inner` to `f`. +unsafe impl crate::pointer::TryWithError + for crate::ValidityError +where + Dst: TryFromBytes + ?Sized, +{ + type Inner = Src; + type Mapped = crate::ValidityError; + fn map NewSrc>(self, f: F) -> Self::Mapped { + self.map_src(f) + } +} + +impl PartialEq for ValidityError { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.src == other.src + } +} + +impl Eq for ValidityError {} + +impl fmt::Debug for ValidityError { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidityError").finish() + } +} + +/// Produces a human-readable error message. +/// +/// The message differs between debug and release builds. When +/// `debug_assertions` are enabled, this message is verbose and includes +/// potentially sensitive information. +impl fmt::Display for ValidityError +where + Dst: KnownLayout + TryFromBytes, +{ + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("The conversion failed because the source bytes are not a valid value of the destination type.")?; + if cfg!(debug_assertions) { + f.write_str("\n\n")?; + self.display_verbose_extras(f)?; + } + Ok(()) + } +} + +#[cfg(any(not(no_zerocopy_core_error_1_81_0), feature = "std", test))] +#[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.81.0", feature = "std"))))] +impl Error for ValidityError where Dst: KnownLayout + TryFromBytes {} + +impl From> + for ConvertError> +{ + #[inline(always)] + fn from(err: ValidityError) -> Self { + Self::Validity(err) + } +} + +/// The error type of reference conversions. +/// +/// Reference conversions, like [`FromBytes::ref_from_bytes`] may emit +/// [alignment](AlignmentError) and [size](SizeError) errors. +// Bounds on generic parameters are not enforced in type aliases, but they do +// appear in rustdoc. +#[allow(type_alias_bounds)] +pub type CastError = + ConvertError, SizeError, Infallible>; + +impl CastError { + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + match self { + Self::Alignment(e) => e.src, + Self::Size(e) => e.src, + Self::Validity(i) => match i {}, + } + } + + /// Sets the source value associated with the conversion error. + pub(crate) fn with_src(self, new_src: NewSrc) -> CastError { + match self { + Self::Alignment(e) => CastError::Alignment(e.with_src(new_src)), + Self::Size(e) => CastError::Size(e.with_src(new_src)), + Self::Validity(i) => match i {}, + } + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::*; + /// + /// let source: [u8; 3] = [0, 1, 2]; + /// + /// // Try to read a `u32` from `source`. This will fail because there are insufficient + /// // bytes in `source`. + /// let maybe_u32: Result<&u32, CastError<&[u8], u32>> = u32::ref_from_bytes(&source[..]); + /// + /// // Map the error's source to its size and address. + /// let maybe_u32: Result<&u32, CastError<(usize, usize), u32>> = maybe_u32.map_err(|err| { + /// err.map_src(|src| (src.len(), src.as_ptr() as usize)) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> CastError { + match self { + Self::Alignment(e) => CastError::Alignment(e.map_src(f)), + Self::Size(e) => CastError::Size(e.map_src(f)), + Self::Validity(i) => match i {}, + } + } + + /// Converts the error into a general [`ConvertError`]. + pub(crate) fn into(self) -> TryCastError + where + Dst: TryFromBytes, + { + match self { + Self::Alignment(e) => TryCastError::Alignment(e), + Self::Size(e) => TryCastError::Size(e), + Self::Validity(i) => match i {}, + } + } +} + +// SAFETY: `CastError` is either a single `AlignmentError` or a single +// `SizeError`. In either case, it contains a single `Self::Inner = Src`, and no +// other non-ZST fields. `map` passes ownership of `self`'s sole `Self::Inner` +// to `f`. +unsafe impl crate::pointer::TryWithError for crate::CastError +where + Dst: ?Sized, +{ + type Inner = Src; + type Mapped = crate::CastError; + + fn map NewSrc>(self, f: F) -> Self::Mapped { + self.map_src(f) + } +} + +impl From> for SizeError { + /// Infallibly extracts the [`SizeError`] from this `CastError` since `Dst` + /// is unaligned. + /// + /// Since [`Dst: Unaligned`], it is impossible to encounter an alignment + /// error, and so the only error that can be encountered at runtime is a + /// [`SizeError`]. This method permits extracting that `SizeError` + /// infallibly. + /// + /// [`Dst: Unaligned`]: crate::Unaligned + /// + /// # Examples + /// + /// ```rust + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] + /// #[repr(C)] + /// struct UdpHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] + /// #[repr(C, packed)] + /// struct UdpPacket { + /// header: UdpHeader, + /// body: [u8], + /// } + /// + /// impl UdpPacket { + /// pub fn parse(bytes: &[u8]) -> Result<&UdpPacket, SizeError<&[u8], UdpPacket>> { + /// // Since `UdpPacket: Unaligned`, we can map the `CastError` to a `SizeError`. + /// UdpPacket::ref_from_bytes(bytes).map_err(Into::into) + /// } + /// } + /// ``` + #[inline(always)] + fn from(err: CastError) -> SizeError { + match err { + #[allow(unreachable_code)] + CastError::Alignment(e) => match Infallible::from(e) {}, + CastError::Size(e) => e, + CastError::Validity(i) => match i {}, + } + } +} + +/// The error type of fallible reference conversions. +/// +/// Fallible reference conversions, like [`TryFromBytes::try_ref_from_bytes`] +/// may emit [alignment](AlignmentError), [size](SizeError), and +/// [validity](ValidityError) errors. +// Bounds on generic parameters are not enforced in type aliases, but they do +// appear in rustdoc. +#[allow(type_alias_bounds)] +pub type TryCastError = + ConvertError, SizeError, ValidityError>; + +// FIXME(#1139): Remove the `TryFromBytes` here and in other downstream +// locations (all the way to `ValidityError`) if we determine it's not necessary +// for rich validity errors. +impl TryCastError { + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + match self { + Self::Alignment(e) => e.src, + Self::Size(e) => e.src, + Self::Validity(e) => e.src, + } + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use core::num::NonZeroU32; + /// use zerocopy::*; + /// + /// let source: [u8; 3] = [0, 0, 0]; + /// + /// // Try to read a `NonZeroU32` from `source`. + /// let maybe_u32: Result<&NonZeroU32, TryCastError<&[u8], NonZeroU32>> + /// = NonZeroU32::try_ref_from_bytes(&source[..]); + /// + /// // Map the error's source to its size and address. + /// let maybe_u32: Result<&NonZeroU32, TryCastError<(usize, usize), NonZeroU32>> = + /// maybe_u32.map_err(|err| { + /// err.map_src(|src| (src.len(), src.as_ptr() as usize)) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> TryCastError { + match self { + Self::Alignment(e) => TryCastError::Alignment(e.map_src(f)), + Self::Size(e) => TryCastError::Size(e.map_src(f)), + Self::Validity(e) => TryCastError::Validity(e.map_src(f)), + } + } +} + +impl From> for TryCastError { + #[inline] + fn from(value: CastError) -> Self { + match value { + CastError::Alignment(e) => Self::Alignment(e), + CastError::Size(e) => Self::Size(e), + CastError::Validity(i) => match i {}, + } + } +} + +/// The error type of fallible read-conversions. +/// +/// Fallible read-conversions, like [`TryFromBytes::try_read_from_bytes`] may +/// emit [size](SizeError) and [validity](ValidityError) errors, but not +/// alignment errors. +// Bounds on generic parameters are not enforced in type aliases, but they do +// appear in rustdoc. +#[allow(type_alias_bounds)] +pub type TryReadError = + ConvertError, ValidityError>; + +impl TryReadError { + /// Produces the source underlying the failed conversion. + #[inline] + pub fn into_src(self) -> Src { + match self { + Self::Alignment(i) => match i {}, + Self::Size(e) => e.src, + Self::Validity(e) => e.src, + } + } + + /// Maps the source value associated with the conversion error. + /// + /// This can help mitigate [issues with `Send`, `Sync` and `'static` + /// bounds][self#send-sync-and-static]. + /// + /// # Examples + /// + /// ``` + /// use core::num::NonZeroU32; + /// use zerocopy::*; + /// + /// let source: [u8; 3] = [0, 0, 0]; + /// + /// // Try to read a `NonZeroU32` from `source`. + /// let maybe_u32: Result> + /// = NonZeroU32::try_read_from_bytes(&source[..]); + /// + /// // Map the error's source to its size. + /// let maybe_u32: Result> = + /// maybe_u32.map_err(|err| { + /// err.map_src(|src| src.len()) + /// }); + /// ``` + #[inline] + pub fn map_src(self, f: impl FnOnce(Src) -> NewSrc) -> TryReadError { + match self { + Self::Alignment(i) => match i {}, + Self::Size(e) => TryReadError::Size(e.map_src(f)), + Self::Validity(e) => TryReadError::Validity(e.map_src(f)), + } + } +} + +/// The error type of well-aligned, fallible casts. +/// +/// This is like [`TryCastError`], but for casts that are always well-aligned. +/// It is identical to `TryCastError`, except that its alignment error is +/// [`Infallible`]. +/// +/// As of this writing, none of zerocopy's API produces this error directly. +/// However, it is useful since it permits users to infallibly discard alignment +/// errors when they can prove statically that alignment errors are impossible. +/// +/// # Examples +/// +/// ``` +/// use core::convert::Infallible; +/// use zerocopy::*; +/// # use zerocopy_derive::*; +/// +/// #[derive(TryFromBytes, KnownLayout, Unaligned, Immutable)] +/// #[repr(C, packed)] +/// struct Bools { +/// one: bool, +/// two: bool, +/// many: [bool], +/// } +/// +/// impl Bools { +/// fn parse(bytes: &[u8]) -> Result<&Bools, AlignedTryCastError<&[u8], Bools>> { +/// // Since `Bools: Unaligned`, we can infallibly discard +/// // the alignment error. +/// Bools::try_ref_from_bytes(bytes).map_err(Into::into) +/// } +/// } +/// ``` +#[allow(type_alias_bounds)] +pub type AlignedTryCastError = + ConvertError, ValidityError>; + +/// The error type of a failed allocation. +/// +/// This type is intended to be deprecated in favor of the standard library's +/// [`AllocError`] type once it is stabilized. When that happens, this type will +/// be replaced by a type alias to the standard library type. We do not intend +/// to treat this as a breaking change; users who wish to avoid breakage should +/// avoid writing code which assumes that this is *not* such an alias. For +/// example, implementing the same trait for both types will result in an impl +/// conflict once this type is an alias. +/// +/// [`AllocError`]: https://doc.rust-lang.org/alloc/alloc/struct.AllocError.html +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct AllocError; + +#[cfg(test)] +mod tests { + use core::convert::Infallible; + + use super::*; + + #[test] + fn test_send_sync() { + // Test that all error types are `Send + Sync` even if `Dst: !Send + + // !Sync`. + + #[allow(dead_code)] + fn is_send_sync(_t: T) {} + + #[allow(dead_code)] + fn alignment_err_is_send_sync(err: AlignmentError) { + is_send_sync(err) + } + + #[allow(dead_code)] + fn size_err_is_send_sync(err: SizeError) { + is_send_sync(err) + } + + #[allow(dead_code)] + fn validity_err_is_send_sync( + err: ValidityError, + ) { + is_send_sync(err) + } + + #[allow(dead_code)] + fn convert_error_is_send_sync( + err: ConvertError< + AlignmentError, + SizeError, + ValidityError, + >, + ) { + is_send_sync(err) + } + } + + #[test] + fn test_eq_partial_eq_clone() { + // Test that all error types implement `Eq`, `PartialEq` + // and `Clone` if src does + // even if `Dst: !Eq`, `!PartialEq`, `!Clone`. + + #[allow(dead_code)] + fn is_eq_partial_eq_clone(_t: T) {} + + #[allow(dead_code)] + fn alignment_err_is_eq_partial_eq_clone( + err: AlignmentError, + ) { + is_eq_partial_eq_clone(err) + } + + #[allow(dead_code)] + fn size_err_is_eq_partial_eq_clone( + err: SizeError, + ) { + is_eq_partial_eq_clone(err) + } + + #[allow(dead_code)] + fn validity_err_is_eq_partial_eq_clone( + err: ValidityError, + ) { + is_eq_partial_eq_clone(err) + } + + #[allow(dead_code)] + fn convert_error_is_eq_partial_eq_clone( + err: ConvertError< + AlignmentError, + SizeError, + ValidityError, + >, + ) { + is_eq_partial_eq_clone(err) + } + } + + #[test] + fn alignment_display() { + #[repr(C, align(128))] + struct Aligned { + bytes: [u8; 128], + } + + impl_known_layout!(elain::Align::<8>); + + let aligned = Aligned { bytes: [0; 128] }; + + let bytes = &aligned.bytes[1..]; + let addr = crate::util::AsAddress::addr(bytes); + assert_eq!( + AlignmentError::<_, elain::Align::<8>>::new_checked(bytes).to_string(), + format!("The conversion failed because the address of the source is not a multiple of the alignment of the destination type.\n\ + \nSource type: &[u8]\ + \nSource address: 0x{:x} (a multiple of 1)\ + \nDestination type: elain::Align<8>\ + \nDestination alignment: 8", addr) + ); + + let bytes = &aligned.bytes[2..]; + let addr = crate::util::AsAddress::addr(bytes); + assert_eq!( + AlignmentError::<_, elain::Align::<8>>::new_checked(bytes).to_string(), + format!("The conversion failed because the address of the source is not a multiple of the alignment of the destination type.\n\ + \nSource type: &[u8]\ + \nSource address: 0x{:x} (a multiple of 2)\ + \nDestination type: elain::Align<8>\ + \nDestination alignment: 8", addr) + ); + + let bytes = &aligned.bytes[3..]; + let addr = crate::util::AsAddress::addr(bytes); + assert_eq!( + AlignmentError::<_, elain::Align::<8>>::new_checked(bytes).to_string(), + format!("The conversion failed because the address of the source is not a multiple of the alignment of the destination type.\n\ + \nSource type: &[u8]\ + \nSource address: 0x{:x} (a multiple of 1)\ + \nDestination type: elain::Align<8>\ + \nDestination alignment: 8", addr) + ); + + let bytes = &aligned.bytes[4..]; + let addr = crate::util::AsAddress::addr(bytes); + assert_eq!( + AlignmentError::<_, elain::Align::<8>>::new_checked(bytes).to_string(), + format!("The conversion failed because the address of the source is not a multiple of the alignment of the destination type.\n\ + \nSource type: &[u8]\ + \nSource address: 0x{:x} (a multiple of 4)\ + \nDestination type: elain::Align<8>\ + \nDestination alignment: 8", addr) + ); + } + + #[test] + fn size_display() { + assert_eq!( + SizeError::<_, [u8]>::new(&[0u8; 2][..]).to_string(), + "The conversion failed because the source was incorrectly sized to complete the conversion into the destination type.\n\ + \nSource type: &[u8]\ + \nSource size: 2 bytes\ + \nDestination type: [u8]" + ); + + assert_eq!( + SizeError::<_, [u8; 2]>::new(&[0u8; 1][..]).to_string(), + "The conversion failed because the source was incorrectly sized to complete the conversion into the destination type.\n\ + \nSource type: &[u8]\ + \nSource size: 1 byte\ + \nDestination size: 2 bytes\ + \nDestination type: [u8; 2]" + ); + } + + #[test] + fn validity_display() { + assert_eq!( + ValidityError::<_, bool>::new(&[2u8; 1][..]).to_string(), + "The conversion failed because the source bytes are not a valid value of the destination type.\n\ + \n\ + Destination type: bool" + ); + } + + #[test] + fn test_convert_error_debug() { + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Alignment(AlignmentError::new_checked(&[0u8])); + assert_eq!(format!("{:?}", err), "Alignment(AlignmentError)"); + + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Size(SizeError::new(&[0u8])); + assert_eq!(format!("{:?}", err), "Size(SizeError)"); + + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Validity(ValidityError::new(&[0u8])); + assert_eq!(format!("{:?}", err), "Validity(ValidityError)"); + } + + #[test] + fn test_convert_error_from_unaligned() { + // u8 is Unaligned + let err: ConvertError< + AlignmentError<&[u8], u8>, + SizeError<&[u8], u8>, + ValidityError<&[u8], bool>, + > = ConvertError::Size(SizeError::new(&[0u8])); + let converted: ConvertError, ValidityError<&[u8], bool>> = + ConvertError::from(err); + match converted { + ConvertError::Size(_) => {} + _ => panic!("Expected Size error"), + } + } + + #[test] + fn test_alignment_error_display_debug() { + let err: AlignmentError<&[u8], u16> = AlignmentError::new_checked(&[0u8]); + assert!(format!("{:?}", err).contains("AlignmentError")); + assert!(format!("{}", err).contains("address of the source is not a multiple")); + } + + #[test] + fn test_size_error_display_debug() { + let err: SizeError<&[u8], u16> = SizeError::new(&[0u8]); + assert!(format!("{:?}", err).contains("SizeError")); + assert!(format!("{}", err).contains("source was incorrectly sized")); + } + + #[test] + fn test_validity_error_display_debug() { + let err: ValidityError<&[u8], bool> = ValidityError::new(&[0u8]); + assert!(format!("{:?}", err).contains("ValidityError")); + assert!(format!("{}", err).contains("source bytes are not a valid value")); + } + + #[test] + fn test_convert_error_display_debug_more() { + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Alignment(AlignmentError::new_checked(&[0u8])); + assert!(format!("{}", err).contains("address of the source is not a multiple")); + + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Size(SizeError::new(&[0u8])); + assert!(format!("{}", err).contains("source was incorrectly sized")); + + let err: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = ConvertError::Validity(ValidityError::new(&[0u8])); + assert!(format!("{}", err).contains("source bytes are not a valid value")); + } + + #[test] + fn test_alignment_error_methods() { + let err: AlignmentError<&[u8], u16> = AlignmentError::new_checked(&[0u8]); + + // into_src + let src = err.clone().into_src(); + assert_eq!(src, &[0u8]); + + // into + let converted: ConvertError< + AlignmentError<&[u8], u16>, + SizeError<&[u8], u16>, + ValidityError<&[u8], bool>, + > = err.clone().into(); + match converted { + ConvertError::Alignment(_) => {} + _ => panic!("Expected Alignment error"), + } + + // clone + let cloned = err.clone(); + assert_eq!(err, cloned); + + // eq + assert_eq!(err, cloned); + let err2: AlignmentError<&[u8], u16> = AlignmentError::new_checked(&[1u8]); + assert_ne!(err, err2); + } + + #[test] + fn test_convert_error_from_unaligned_variants() { + // u8 is Unaligned + let err: ConvertError< + AlignmentError<&[u8], u8>, + SizeError<&[u8], u8>, + ValidityError<&[u8], bool>, + > = ConvertError::Validity(ValidityError::new(&[0u8])); + let converted: ConvertError, ValidityError<&[u8], bool>> = + ConvertError::from(err); + match converted { + ConvertError::Validity(_) => {} + _ => panic!("Expected Validity error"), + } + + let err: ConvertError< + AlignmentError<&[u8], u8>, + SizeError<&[u8], u8>, + ValidityError<&[u8], bool>, + > = ConvertError::Size(SizeError::new(&[0u8])); + let converted: ConvertError, ValidityError<&[u8], bool>> = + ConvertError::from(err); + match converted { + ConvertError::Size(_) => {} + _ => panic!("Expected Size error"), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/impls.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..02153b86c4cc8495bfe068e72a42456f579ffd25 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/impls.rs @@ -0,0 +1,2373 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{ + cell::{Cell, UnsafeCell}, + mem::MaybeUninit as CoreMaybeUninit, + ptr::NonNull, +}; + +use super::*; +use crate::pointer::cast::{CastSizedExact, CastUnsized}; + +// SAFETY: Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a +// zero-sized type to have a size of 0 and an alignment of 1." +// - `Immutable`: `()` self-evidently does not contain any `UnsafeCell`s. +// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is only +// one possible sequence of 0 bytes, and `()` is inhabited. +// - `IntoBytes`: Since `()` has size 0, it contains no padding bytes. +// - `Unaligned`: `()` has alignment 1. +// +// [1] https://doc.rust-lang.org/1.81.0/reference/type-layout.html#tuple-layout +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!((): Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(()); +}; + +// SAFETY: +// - `Immutable`: These types self-evidently do not contain any `UnsafeCell`s. +// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: all bit +// patterns are valid for numeric types [1] +// - `IntoBytes`: numeric types have no padding bytes [1] +// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size of +// `u8` and `i8` as 1 byte. We also know that: +// - Alignment is >= 1 [3] +// - Size is an integer multiple of alignment [4] +// - The only value >= 1 for which 1 is an integer multiple is 1 Therefore, +// the only possible alignment for `u8` and `i8` is 1. +// +// [1] Per https://doc.rust-lang.org/1.81.0/reference/types/numeric.html#bit-validity: +// +// For every numeric type, `T`, the bit validity of `T` is equivalent to +// the bit validity of `[u8; size_of::()]`. An uninitialized byte is +// not a valid `u8`. +// +// [2] https://doc.rust-lang.org/1.81.0/reference/type-layout.html#primitive-data-layout +// +// [3] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#size-and-alignment: +// +// Alignment is measured in bytes, and must be at least 1. +// +// [4] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#size-and-alignment: +// +// The size of a value is always a multiple of its alignment. +// +// FIXME(#278): Once we've updated the trait docs to refer to `u8`s rather than +// bits or bytes, update this comment, especially the reference to [1]. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(u8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + unsafe_impl!(i8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(u8, i8); + unsafe_impl!(u16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(isize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(f32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(f64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + #[cfg(feature = "float-nightly")] + unsafe_impl!(#[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] f16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + #[cfg(feature = "float-nightly")] + unsafe_impl!(#[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] f128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); +}; + +// SAFETY: +// - `Immutable`: `bool` self-evidently does not contain any `UnsafeCell`s. +// - `FromZeros`: Valid since "[t]he value false has the bit pattern 0x00" [1]. +// - `IntoBytes`: Since "the boolean type has a size and alignment of 1 each" +// and "The value false has the bit pattern 0x00 and the value true has the +// bit pattern 0x01" [1]. Thus, the only byte of the bool is always +// initialized. +// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type has +// a size and alignment of 1 each." +// +// [1] https://doc.rust-lang.org/1.81.0/reference/types/boolean.html +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl!(bool: Immutable, FromZeros, IntoBytes, Unaligned) }; +assert_unaligned!(bool); + +// SAFETY: The impl must only return `true` for its argument if the original +// `Maybe` refers to a valid `bool`. We only return true if the `u8` value +// is 0 or 1, and both of these are valid values for `bool` [1]. +// +// [1] Per https://doc.rust-lang.org/1.81.0/reference/types/boolean.html: +// +// The value false has the bit pattern 0x00 and the value true has the bit +// pattern 0x01. +const _: () = unsafe { + unsafe_impl!(=> TryFromBytes for bool; |byte| { + let byte = byte.transmute_with::(); + *byte.unaligned_as_ref() < 2 + }) +}; + +// SAFETY: +// - `Immutable`: `char` self-evidently does not contain any `UnsafeCell`s. +// - `FromZeros`: Per reference [1], "[a] value of type char is a Unicode scalar +// value (i.e. a code point that is not a surrogate), represented as a 32-bit +// unsigned word in the 0x0000 to 0xD7FF or 0xE000 to 0x10FFFF range" which +// contains 0x0000. +// - `IntoBytes`: `char` is per reference [1] "represented as a 32-bit unsigned +// word" (`u32`) which is `IntoBytes`. Note that unlike `u32`, not all bit +// patterns are valid for `char`. +// +// [1] https://doc.rust-lang.org/1.81.0/reference/types/textual.html +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl!(char: Immutable, FromZeros, IntoBytes) }; + +// SAFETY: The impl must only return `true` for its argument if the original +// `Maybe` refers to a valid `char`. `char::from_u32` guarantees that it +// returns `None` if its input is not a valid `char` [1]. +// +// [1] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: +// +// `from_u32()` will return `None` if the input is not a valid value for a +// `char`. +const _: () = unsafe { + unsafe_impl!(=> TryFromBytes for char; |c| { + let c = c.transmute_with::, invariant::Valid, CastSizedExact, BecauseImmutable>(); + let c = c.read().into_inner(); + char::from_u32(c).is_some() + }); +}; + +// SAFETY: Per the Reference [1], `str` has the same layout as `[u8]`. +// - `Immutable`: `[u8]` does not contain any `UnsafeCell`s. +// - `FromZeros`, `IntoBytes`, `Unaligned`: `[u8]` is `FromZeros`, `IntoBytes`, +// and `Unaligned`. +// +// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!` uses +// `align_of`, which only works for `Sized` types. +// +// FIXME(#429): Improve safety proof for `FromZeros` and `IntoBytes`; having the same +// layout as `[u8]` isn't sufficient. +// +// [1] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#str-layout: +// +// String slices are a UTF-8 representation of characters that have the same +// layout as slices of type `[u8]`. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl!(str: Immutable, FromZeros, IntoBytes, Unaligned) }; + +// SAFETY: The impl must only return `true` for its argument if the original +// `Maybe` refers to a valid `str`. `str::from_utf8` guarantees that it +// returns `Err` if its input is not a valid `str` [1]. +// +// [1] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors: +// +// Returns `Err` if the slice is not UTF-8. +const _: () = unsafe { + unsafe_impl!(=> TryFromBytes for str; |c| { + let c = c.transmute_with::<[u8], invariant::Valid, CastUnsized, BecauseImmutable>(); + let c = c.unaligned_as_ref(); + core::str::from_utf8(c).is_ok() + }) +}; + +macro_rules! unsafe_impl_try_from_bytes_for_nonzero { + ($($nonzero:ident[$prim:ty]),*) => { + $( + unsafe_impl!(=> TryFromBytes for $nonzero; |n| { + let n = n.transmute_with::, invariant::Valid, CastSizedExact, BecauseImmutable>(); + $nonzero::new(n.read().into_inner()).is_some() + }); + )* + } +} + +// `NonZeroXxx` is `IntoBytes`, but not `FromZeros` or `FromBytes`. +// +// SAFETY: +// - `IntoBytes`: `NonZeroXxx` has the same layout as its associated primitive. +// Since it is the same size, this guarantees it has no padding - integers +// have no padding, and there's no room for padding if it can represent all +// of the same values except 0. +// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that `Option` +// and `Option` both have size 1. [1] [2] This is worded in a way +// that makes it unclear whether it's meant as a guarantee, but given the +// purpose of those types, it's virtually unthinkable that that would ever +// change. `Option` cannot be smaller than its contained type, which implies +// that, and `NonZeroX8` are of size 1 or 0. `NonZeroX8` can represent +// multiple states, so they cannot be 0 bytes, which means that they must be 1 +// byte. The only valid alignment for a 1-byte type is 1. +// +// FIXME(#429): +// - Add quotes from documentation. +// - Add safety comment for `Immutable`. How can we prove that `NonZeroXxx` +// doesn't contain any `UnsafeCell`s? It's obviously true, but it's not clear +// how we'd prove it short of adding text to the stdlib docs that says so +// explicitly, which likely wouldn't be accepted. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/num/type.NonZeroU8.html: +// +// `NonZeroU8` is guaranteed to have the same layout and bit validity as `u8` with +// the exception that 0 is not a valid instance. +// +// [2] Per https://doc.rust-lang.org/1.81.0/std/num/type.NonZeroI8.html: +// +// `NonZeroI8` is guaranteed to have the same layout and bit validity as `i8` with +// the exception that 0 is not a valid instance. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(NonZeroU8: Immutable, IntoBytes, Unaligned); + unsafe_impl!(NonZeroI8: Immutable, IntoBytes, Unaligned); + assert_unaligned!(NonZeroU8, NonZeroI8); + unsafe_impl!(NonZeroU16: Immutable, IntoBytes); + unsafe_impl!(NonZeroI16: Immutable, IntoBytes); + unsafe_impl!(NonZeroU32: Immutable, IntoBytes); + unsafe_impl!(NonZeroI32: Immutable, IntoBytes); + unsafe_impl!(NonZeroU64: Immutable, IntoBytes); + unsafe_impl!(NonZeroI64: Immutable, IntoBytes); + unsafe_impl!(NonZeroU128: Immutable, IntoBytes); + unsafe_impl!(NonZeroI128: Immutable, IntoBytes); + unsafe_impl!(NonZeroUsize: Immutable, IntoBytes); + unsafe_impl!(NonZeroIsize: Immutable, IntoBytes); + unsafe_impl_try_from_bytes_for_nonzero!( + NonZeroU8[u8], + NonZeroI8[i8], + NonZeroU16[u16], + NonZeroI16[i16], + NonZeroU32[u32], + NonZeroI32[i32], + NonZeroU64[u64], + NonZeroI64[i64], + NonZeroU128[u128], + NonZeroI128[i128], + NonZeroUsize[usize], + NonZeroIsize[isize] + ); +}; + +// SAFETY: +// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`, `IntoBytes`: +// The Rust compiler reuses `0` value to represent `None`, so +// `size_of::>() == size_of::()`; see `NonZeroXxx` +// documentation. +// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that `Option` +// and `Option` both have size 1. [1] [2] This is worded in a way +// that makes it unclear whether it's meant as a guarantee, but given the +// purpose of those types, it's virtually unthinkable that that would ever +// change. The only valid alignment for a 1-byte type is 1. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/num/type.NonZeroU8.html: +// +// `Option` is guaranteed to be compatible with `u8`, including in FFI. +// +// Thanks to the null pointer optimization, `NonZeroU8` and `Option` +// are guaranteed to have the same size and alignment: +// +// [2] Per https://doc.rust-lang.org/1.81.0/std/num/type.NonZeroI8.html: +// +// `Option` is guaranteed to be compatible with `i8`, including in FFI. +// +// Thanks to the null pointer optimization, `NonZeroI8` and `Option` +// are guaranteed to have the same size and alignment: +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(Option, Option); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); +}; + +// SAFETY: While it's not fully documented, the consensus is that `Box` does +// not contain any `UnsafeCell`s for `T: Sized` [1]. This is not a complete +// proof, but we are accepting this as a known risk per #1358. +// +// [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/492 +#[cfg(feature = "alloc")] +const _: () = unsafe { + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T: Sized => Immutable for Box + ) +}; + +// SAFETY: The following types can be transmuted from `[0u8; size_of::()]`. [1] +// +// [1] Per https://doc.rust-lang.org/1.89.0/core/option/index.html#representation: +// +// Rust guarantees to optimize the following types `T` such that [`Option`] +// has the same size and alignment as `T`. In some of these cases, Rust +// further guarantees that `transmute::<_, Option>([0u8; size_of::()])` +// is sound and produces `Option::::None`. These cases are identified by +// the second column: +// +// | `T` | `transmute::<_, Option>([0u8; size_of::()])` sound? | +// |-----------------------------------|-----------------------------------------------------------| +// | [`Box`] | when `U: Sized` | +// | `&U` | when `U: Sized` | +// | `&mut U` | when `U: Sized` | +// | [`ptr::NonNull`] | when `U: Sized` | +// | `fn`, `extern "C" fn`[^extern_fn] | always | +// +// [^extern_fn]: this remains true for `unsafe` variants, any argument/return +// types, and any other ABI: `[unsafe] extern "abi" fn` (_e.g._, `extern +// "system" fn`) +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T => TryFromBytes for Option>; |c| pointer::is_zeroed(c) + ); + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T => FromZeros for Option> + ); + unsafe_impl!( + T => TryFromBytes for Option<&'_ T>; |c| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option<&'_ T>); + unsafe_impl!( + T => TryFromBytes for Option<&'_ mut T>; |c| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option<&'_ mut T>); + unsafe_impl!( + T => TryFromBytes for Option>; |c| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option>); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_fn!(...); + |c| pointer::is_zeroed(c) + ); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_unsafe_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_unsafe_fn!(...); + |c| pointer::is_zeroed(c) + ); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_extern_c_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_extern_c_fn!(...); + |c| pointer::is_zeroed(c) + ); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_unsafe_extern_c_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_unsafe_extern_c_fn!(...); + |c| pointer::is_zeroed(c) + ); +}; + +// SAFETY: `[unsafe] [extern "C"] fn()` self-evidently do not contain +// `UnsafeCell`s. This is not a proof, but we are accepting this as a known risk +// per #1358. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_fn!(...)); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_unsafe_fn!(...)); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_extern_c_fn!(...)); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_unsafe_extern_c_fn!(...)); +}; + +#[cfg(all( + not(no_zerocopy_target_has_atomics_1_60_0), + any( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ) +))] +#[cfg_attr(doc_cfg, doc(cfg(rust = "1.60.0")))] +mod atomics { + use super::*; + + macro_rules! impl_traits_for_atomics { + ($($atomics:tt [$primitives:ty]),* $(,)?) => { + $( + impl_known_layout!($atomics); + impl_for_transmute_from!(=> FromZeros for $atomics [$primitives]); + impl_for_transmute_from!(=> FromBytes for $atomics [$primitives]); + impl_for_transmute_from!(=> TryFromBytes for $atomics [$primitives]); + impl_for_transmute_from!(=> IntoBytes for $atomics [$primitives]); + )* + }; + } + + /// Implements `TransmuteFrom` for `$atomic`, `$prim`, and + /// `UnsafeCell<$prim>`. + /// + /// # Safety + /// + /// `$atomic` must have the same size and bit validity as `$prim`. + macro_rules! unsafe_impl_transmute_from_for_atomic { + ($($($tyvar:ident)? => $atomic:ty [$prim:ty]),*) => {{ + crate::util::macros::__unsafe(); + + use crate::pointer::{SizeEq, TransmuteFrom, invariant::Valid}; + + $( + // SAFETY: The caller promised that `$atomic` and `$prim` have + // the same size and bit validity. + unsafe impl<$($tyvar)?> TransmuteFrom<$atomic, Valid, Valid> for $prim {} + // SAFETY: The caller promised that `$atomic` and `$prim` have + // the same size and bit validity. + unsafe impl<$($tyvar)?> TransmuteFrom<$prim, Valid, Valid> for $atomic {} + + impl<$($tyvar)?> SizeEq> for ReadOnly<$prim> { + type CastFrom = $crate::pointer::cast::CastSizedExact; + } + + // SAFETY: The caller promised that `$atomic` and `$prim` have + // the same bit validity. `UnsafeCell` has the same bit + // validity as `T` [1]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/cell/struct.UnsafeCell.html#memory-layout: + // + // `UnsafeCell` has the same in-memory representation as + // its inner type `T`. A consequence of this guarantee is that + // it is possible to convert between `T` and `UnsafeCell`. + unsafe impl<$($tyvar)?> TransmuteFrom<$atomic, Valid, Valid> for core::cell::UnsafeCell<$prim> {} + // SAFETY: See previous safety comment. + unsafe impl<$($tyvar)?> TransmuteFrom, Valid, Valid> for $atomic {} + )* + }}; + } + + #[cfg(target_has_atomic = "8")] + #[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "8")))] + mod atomic_8 { + use core::sync::atomic::{AtomicBool, AtomicI8, AtomicU8}; + + use super::*; + + impl_traits_for_atomics!(AtomicU8[u8], AtomicI8[i8]); + + impl_known_layout!(AtomicBool); + impl_for_transmute_from!(=> FromZeros for AtomicBool [bool]); + impl_for_transmute_from!(=> TryFromBytes for AtomicBool [bool]); + impl_for_transmute_from!(=> IntoBytes for AtomicBool [bool]); + + // SAFETY: Per [1], `AtomicBool`, `AtomicU8`, and `AtomicI8` have the + // same size as `bool`, `u8`, and `i8` respectively. Since a type's + // alignment cannot be smaller than 1 [2], and since its alignment + // cannot be greater than its size [3], the only possible value for the + // alignment is 1. Thus, it is sound to implement `Unaligned`. + // + // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU8.html: + // + // This type has the same size, alignment, and bit validity as the + // underlying integer type + // + // [2] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#size-and-alignment: + // + // Alignment is measured in bytes, and must be at least 1. + // + // [3] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#size-and-alignment: + // + // The size of a value is always a multiple of its alignment. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl!(AtomicBool: Unaligned); + unsafe_impl!(AtomicU8: Unaligned); + unsafe_impl!(AtomicI8: Unaligned); + assert_unaligned!(AtomicBool, AtomicU8, AtomicI8); + }; + + // SAFETY: `AtomicU8`, `AtomicI8`, and `AtomicBool` have the same size + // and bit validity as `u8`, `i8`, and `bool` respectively [1][2][3]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicU8.html: + // + // This type has the same size, alignment, and bit validity as the + // underlying integer type, `u8`. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicI8.html: + // + // This type has the same size, alignment, and bit validity as the + // underlying integer type, `i8`. + // + // [3] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicBool.html: + // + // This type has the same size, alignment, and bit validity a `bool`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl_transmute_from_for_atomic!( + => AtomicU8 [u8], + => AtomicI8 [i8], + => AtomicBool [bool] + ) + }; + } + + #[cfg(target_has_atomic = "16")] + #[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "16")))] + mod atomic_16 { + use core::sync::atomic::{AtomicI16, AtomicU16}; + + use super::*; + + impl_traits_for_atomics!(AtomicU16[u16], AtomicI16[i16]); + + // SAFETY: `AtomicU16` and `AtomicI16` have the same size and bit + // validity as `u16` and `i16` respectively [1][2]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicU16.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `u16`. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicI16.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `i16`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl_transmute_from_for_atomic!(=> AtomicU16 [u16], => AtomicI16 [i16]) + }; + } + + #[cfg(target_has_atomic = "32")] + #[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "32")))] + mod atomic_32 { + use core::sync::atomic::{AtomicI32, AtomicU32}; + + use super::*; + + impl_traits_for_atomics!(AtomicU32[u32], AtomicI32[i32]); + + // SAFETY: `AtomicU32` and `AtomicI32` have the same size and bit + // validity as `u32` and `i32` respectively [1][2]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicU32.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `u32`. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicI32.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `i32`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl_transmute_from_for_atomic!(=> AtomicU32 [u32], => AtomicI32 [i32]) + }; + } + + #[cfg(target_has_atomic = "64")] + #[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "64")))] + mod atomic_64 { + use core::sync::atomic::{AtomicI64, AtomicU64}; + + use super::*; + + impl_traits_for_atomics!(AtomicU64[u64], AtomicI64[i64]); + + // SAFETY: `AtomicU64` and `AtomicI64` have the same size and bit + // validity as `u64` and `i64` respectively [1][2]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicU64.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `u64`. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicI64.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `i64`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl_transmute_from_for_atomic!(=> AtomicU64 [u64], => AtomicI64 [i64]) + }; + } + + #[cfg(target_has_atomic = "ptr")] + #[cfg_attr(doc_cfg, doc(cfg(target_has_atomic = "ptr")))] + mod atomic_ptr { + use core::sync::atomic::{AtomicIsize, AtomicPtr, AtomicUsize}; + + use super::*; + + impl_traits_for_atomics!(AtomicUsize[usize], AtomicIsize[isize]); + + // FIXME(#170): Implement `FromBytes` and `IntoBytes` once we implement + // those traits for `*mut T`. + impl_known_layout!(T => AtomicPtr); + impl_for_transmute_from!(T => TryFromBytes for AtomicPtr [*mut T]); + impl_for_transmute_from!(T => FromZeros for AtomicPtr [*mut T]); + + // SAFETY: `AtomicUsize` and `AtomicIsize` have the same size and bit + // validity as `usize` and `isize` respectively [1][2]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicUsize.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `usize`. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicIsize.html: + // + // This type has the same size and bit validity as the underlying + // integer type, `isize`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + unsafe_impl_transmute_from_for_atomic!(=> AtomicUsize [usize], => AtomicIsize [isize]) + }; + + // SAFETY: Per + // https://doc.rust-lang.org/1.85.0/std/sync/atomic/struct.AtomicPtr.html: + // + // This type has the same size and bit validity as a `*mut T`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { unsafe_impl_transmute_from_for_atomic!(T => AtomicPtr [*mut T]) }; + } +} + +// SAFETY: Per reference [1]: "For all T, the following are guaranteed: +// size_of::>() == 0 align_of::>() == 1". This +// gives: +// - `Immutable`: `PhantomData` has no fields. +// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is only +// one possible sequence of 0 bytes, and `PhantomData` is inhabited. +// - `IntoBytes`: Since `PhantomData` has size 0, it contains no padding bytes. +// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment 1. +// +// [1] https://doc.rust-lang.org/1.81.0/std/marker/struct.PhantomData.html#layout-1 +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(T: ?Sized => Immutable for PhantomData); + unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData); + unsafe_impl!(T: ?Sized => FromZeros for PhantomData); + unsafe_impl!(T: ?Sized => FromBytes for PhantomData); + unsafe_impl!(T: ?Sized => IntoBytes for PhantomData); + unsafe_impl!(T: ?Sized => Unaligned for PhantomData); + assert_unaligned!(PhantomData<()>, PhantomData, PhantomData); +}; + +impl_for_transmute_from!(T: TryFromBytes => TryFromBytes for Wrapping[T]); +impl_for_transmute_from!(T: FromZeros => FromZeros for Wrapping[T]); +impl_for_transmute_from!(T: FromBytes => FromBytes for Wrapping[T]); +impl_for_transmute_from!(T: IntoBytes => IntoBytes for Wrapping[T]); +assert_unaligned!(Wrapping<()>, Wrapping); + +// SAFETY: Per [1], `Wrapping` has the same layout as `T`. Since its single +// field (of type `T`) is public, it would be a breaking change to add or remove +// fields. Thus, we know that `Wrapping` contains a `T` (as opposed to just +// having the same size and alignment as `T`) with no pre- or post-padding. +// Thus, `Wrapping` must have `UnsafeCell`s covering the same byte ranges as +// `Inner = T`. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1: +// +// `Wrapping` is guaranteed to have the same layout and ABI as `T` +const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for Wrapping) }; + +// SAFETY: Per [1] in the preceding safety comment, `Wrapping` has the same +// alignment as `T`. +const _: () = unsafe { unsafe_impl!(T: Unaligned => Unaligned for Wrapping) }; + +// SAFETY: `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: +// `MaybeUninit` has no restrictions on its contents. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(T => TryFromBytes for CoreMaybeUninit); + unsafe_impl!(T => FromZeros for CoreMaybeUninit); + unsafe_impl!(T => FromBytes for CoreMaybeUninit); +}; + +// SAFETY: `MaybeUninit` has `UnsafeCell`s covering the same byte ranges as +// `Inner = T`. This is not explicitly documented, but it can be inferred. Per +// [1], `MaybeUninit` has the same size as `T`. Further, note the signature +// of `MaybeUninit::assume_init_ref` [2]: +// +// pub unsafe fn assume_init_ref(&self) -> &T +// +// If the argument `&MaybeUninit` and the returned `&T` had `UnsafeCell`s at +// different offsets, this would be unsound. Its existence is proof that this is +// not the case. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: +// +// `MaybeUninit` is guaranteed to have the same size, alignment, and ABI as +// `T`. +// +// [2] https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#method.assume_init_ref +const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for CoreMaybeUninit) }; + +// SAFETY: Per [1] in the preceding safety comment, `MaybeUninit` has the +// same alignment as `T`. +const _: () = unsafe { unsafe_impl!(T: Unaligned => Unaligned for CoreMaybeUninit) }; +assert_unaligned!(CoreMaybeUninit<()>, CoreMaybeUninit); + +// SAFETY: `ManuallyDrop` has the same layout as `T` [1]. This strongly +// implies, but does not guarantee, that it contains `UnsafeCell`s covering the +// same byte ranges as in `T`. However, it also implements `Defer` +// [2], which provides the ability to convert `&ManuallyDrop -> &T`. This, +// combined with having the same size as `T`, implies that `ManuallyDrop` +// exactly contains a `T` with the same fields and `UnsafeCell`s covering the +// same byte ranges, or else the `Deref` impl would permit safe code to obtain +// different shared references to the same region of memory with different +// `UnsafeCell` coverage, which would in turn permit interior mutation that +// would violate the invariants of a shared reference. +// +// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html: +// +// `ManuallyDrop` is guaranteed to have the same layout and bit validity as +// `T` +// +// [2] https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html#impl-Deref-for-ManuallyDrop%3CT%3E +const _: () = unsafe { unsafe_impl!(T: ?Sized + Immutable => Immutable for ManuallyDrop) }; + +impl_for_transmute_from!(T: ?Sized + TryFromBytes => TryFromBytes for ManuallyDrop[T]); +impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for ManuallyDrop[T]); +impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop[T]); +impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for ManuallyDrop[T]); +// SAFETY: `ManuallyDrop` has the same layout as `T` [1], and thus has the +// same alignment as `T`. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: +// +// `ManuallyDrop` is guaranteed to have the same layout and bit validity as +// `T` +const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop) }; +assert_unaligned!(ManuallyDrop<()>, ManuallyDrop); + +const _: () = { + #[allow( + non_camel_case_types, + missing_copy_implementations, + missing_debug_implementations, + missing_docs + )] + pub enum value {} + + // SAFETY: See safety comment on `ProjectToTag`. + unsafe impl HasTag for ManuallyDrop { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type Tag = (); + + // SAFETY: It is trivially sound to project any pointer to a pointer to + // a type of size zero and alignment 1 (which `()` is [1]). Such a + // pointer will trivially satisfy its aliasing and validity requirements + // (since it has a zero-sized referent), and its alignment requirement + // (since it is aligned to 1). + // + // [1] Per https://doc.rust-lang.org/1.92.0/reference/type-layout.html#r-layout.tuple.unit: + // + // [T]he unit tuple (`()`)... is guaranteed as a zero-sized type to + // have a size of 0 and an alignment of 1. + type ProjectToTag = crate::pointer::cast::CastToUnit; + } + + // SAFETY: `ManuallyDrop` has a field of type `T` at offset `0` without + // any safety invariants beyond those of `T`. Its existence is not + // explicitly documented, but it can be inferred; per [1] `ManuallyDrop` + // has the same size and bit validity as `T`. This field is not literally + // public, but is effectively so; the field can be transparently: + // + // - initialized via `ManuallyDrop::new` + // - moved via `ManuallyDrop::into_inner` + // - referenced via `ManuallyDrop::deref` + // - exclusively referenced via `ManuallyDrop::deref_mut` + // + // We call this field `value`, both because that is both the name of this + // private field, and because it is the name it is referred to in the public + // documentation of `ManuallyDrop::new`, `ManuallyDrop::into_inner`, + // `ManuallyDrop::take` and `ManuallyDrop::drop`. + unsafe impl + HasField + for ManuallyDrop + { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type Type = T; + + #[inline(always)] + fn project(slf: PtrInner<'_, Self>) -> *mut T { + // SAFETY: `ManuallyDrop` has the same layout and bit validity as + // `T` [1]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html: + // + // `ManuallyDrop` is guaranteed to have the same layout and bit + // validity as `T` + #[allow(clippy::as_conversions)] + return slf.as_ptr() as *mut T; + } + } +}; + +impl_for_transmute_from!(T: ?Sized + TryFromBytes => TryFromBytes for Cell[T]); +impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for Cell[T]); +impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for Cell[T]); +impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for Cell[T]); +// SAFETY: `Cell` has the same in-memory representation as `T` [1], and thus +// has the same alignment as `T`. +// +// [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.Cell.html#memory-layout: +// +// `Cell` has the same in-memory representation as its inner type `T`. +const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for Cell) }; + +impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for UnsafeCell[T]); +impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for UnsafeCell[T]); +impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for UnsafeCell[T]); +// SAFETY: `UnsafeCell` has the same in-memory representation as `T` [1], and +// thus has the same alignment as `T`. +// +// [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: +// +// `UnsafeCell` has the same in-memory representation as its inner type +// `T`. +const _: () = unsafe { unsafe_impl!(T: ?Sized + Unaligned => Unaligned for UnsafeCell) }; +assert_unaligned!(UnsafeCell<()>, UnsafeCell); + +// SAFETY: See safety comment in `is_bit_valid` impl. +unsafe impl TryFromBytes for UnsafeCell { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + #[inline(always)] + fn is_bit_valid(candidate: Maybe<'_, Self, A>) -> bool + where + A: invariant::Alignment, + { + T::is_bit_valid(candidate.transmute::<_, _, BecauseImmutable>()) + } +} + +// SAFETY: Per the reference [1]: +// +// An array of `[T; N]` has a size of `size_of::() * N` and the same +// alignment of `T`. Arrays are laid out so that the zero-based `nth` element +// of the array is offset from the start of the array by `n * size_of::()` +// bytes. +// +// ... +// +// Slices have the same layout as the section of the array they slice. +// +// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s laid +// out back-to-back with no bytes in between. Therefore, `[T]` or `[T; N]` are +// `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, and `IntoBytes` if `T` +// is (respectively). Furthermore, since an array/slice has "the same alignment +// of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is. +// +// Note that we don't `assert_unaligned!` for slice types because +// `assert_unaligned!` uses `align_of`, which only works for `Sized` types. +// +// [1] https://doc.rust-lang.org/1.81.0/reference/type-layout.html#array-layout +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(const N: usize, T: Immutable => Immutable for [T; N]); + unsafe_impl!(const N: usize, T: TryFromBytes => TryFromBytes for [T; N]; |c| { + let c: Ptr<'_, [ReadOnly; N], _> = c.cast::<_, crate::pointer::cast::CastSized, _>(); + let c: Ptr<'_, [ReadOnly], _> = c.as_slice(); + let c: Ptr<'_, ReadOnly<[T]>, _> = c.cast::<_, crate::pointer::cast::CastUnsized, _>(); + + // Note that this call may panic, but it would still be sound even if it + // did. `is_bit_valid` does not promise that it will not panic (in fact, + // it explicitly warns that it's a possibility), and we have not + // violated any safety invariants that we must fix before returning. + <[T] as TryFromBytes>::is_bit_valid(c) + }); + unsafe_impl!(const N: usize, T: FromZeros => FromZeros for [T; N]); + unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]); + unsafe_impl!(const N: usize, T: IntoBytes => IntoBytes for [T; N]); + unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]); + assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]); + unsafe_impl!(T: Immutable => Immutable for [T]); + unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c| { + let c: Ptr<'_, [ReadOnly], _> = c.cast::<_, crate::pointer::cast::CastUnsized, _>(); + + // SAFETY: Per the reference [1]: + // + // An array of `[T; N]` has a size of `size_of::() * N` and the + // same alignment of `T`. Arrays are laid out so that the zero-based + // `nth` element of the array is offset from the start of the array by + // `n * size_of::()` bytes. + // + // ... + // + // Slices have the same layout as the section of the array they slice. + // + // In other words, the layout of a `[T] is a sequence of `T`s laid out + // back-to-back with no bytes in between. If all elements in `candidate` + // are `is_bit_valid`, so too is `candidate`. + // + // Note that any of the below calls may panic, but it would still be + // sound even if it did. `is_bit_valid` does not promise that it will + // not panic (in fact, it explicitly warns that it's a possibility), and + // we have not violated any safety invariants that we must fix before + // returning. + c.iter().all(::is_bit_valid) + }); + unsafe_impl!(T: FromZeros => FromZeros for [T]); + unsafe_impl!(T: FromBytes => FromBytes for [T]); + unsafe_impl!(T: IntoBytes => IntoBytes for [T]); + unsafe_impl!(T: Unaligned => Unaligned for [T]); +}; + +// SAFETY: +// - `Immutable`: Raw pointers do not contain any `UnsafeCell`s. +// - `FromZeros`: For thin pointers (note that `T: Sized`), the zero pointer is +// considered "null". [1] No operations which require provenance are legal on +// null pointers, so this is not a footgun. +// - `TryFromBytes`: By the same reasoning as for `FromZeroes`, we can implement +// `TryFromBytes` for thin pointers provided that +// [`TryFromByte::is_bit_valid`] only produces `true` for zeroed bytes. +// +// NOTE(#170): Implementing `FromBytes` and `IntoBytes` for raw pointers would +// be sound, but carries provenance footguns. We want to support `FromBytes` and +// `IntoBytes` for raw pointers eventually, but we are holding off until we can +// figure out how to address those footguns. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/ptr/fn.null.html: +// +// Creates a null raw pointer. +// +// This function is equivalent to zero-initializing the pointer: +// `MaybeUninit::<*const T>::zeroed().assume_init()`. +// +// The resulting pointer has the address 0. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(T: ?Sized => Immutable for *const T); + unsafe_impl!(T: ?Sized => Immutable for *mut T); + unsafe_impl!(T => TryFromBytes for *const T; |c| pointer::is_zeroed(c)); + unsafe_impl!(T => FromZeros for *const T); + unsafe_impl!(T => TryFromBytes for *mut T; |c| pointer::is_zeroed(c)); + unsafe_impl!(T => FromZeros for *mut T); +}; + +// SAFETY: `NonNull` self-evidently does not contain `UnsafeCell`s. This is +// not a proof, but we are accepting this as a known risk per #1358. +const _: () = unsafe { unsafe_impl!(T: ?Sized => Immutable for NonNull) }; + +// SAFETY: Reference types do not contain any `UnsafeCell`s. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl!(T: ?Sized => Immutable for &'_ T); + unsafe_impl!(T: ?Sized => Immutable for &'_ mut T); +}; + +// SAFETY: `Option` is not `#[non_exhaustive]` [1], which means that the types +// in its variants cannot change, and no new variants can be added. `Option` +// does not contain any `UnsafeCell`s outside of `T`. [1] +// +// [1] https://doc.rust-lang.org/core/option/enum.Option.html +const _: () = unsafe { unsafe_impl!(T: Immutable => Immutable for Option) }; + +mod tuples { + use super::*; + + /// Generates various trait implementations for tuples. + /// + /// # Safety + /// + /// `impl_tuple!` should be provided name-number pairs, where each number is + /// the ordinal of the preceding type name. + macro_rules! impl_tuple { + // Entry point. + ($($T:ident $I:tt),+ $(,)?) => { + crate::util::macros::__unsafe(); + impl_tuple!(@all [] [$($T $I)+]); + }; + + // Build up the set of tuple types (i.e., `(A,)`, `(A, B)`, `(A, B, C)`, + // etc.) Trait implementations that do not depend on field index may be + // added to this branch. + (@all [$($head_T:ident $head_I:tt)*] [$next_T:ident $next_I:tt $($tail:tt)*]) => { + // SAFETY: If all fields of the tuple `Self` are `Immutable`, so too is `Self`. + unsafe_impl!($($head_T: Immutable,)* $next_T: Immutable => Immutable for ($($head_T,)* $next_T,)); + + // SAFETY: If all fields in `c` are `is_bit_valid`, so too is `c`. + unsafe_impl!($($head_T: TryFromBytes,)* $next_T: TryFromBytes => TryFromBytes for ($($head_T,)* $next_T,); |c| { + let mut c = c; + $(TryFromBytes::is_bit_valid(into_inner!(c.reborrow().project::<_, { crate::STRUCT_VARIANT_ID }, { crate::ident_id!($head_I) }>())) &&)* + TryFromBytes::is_bit_valid(into_inner!(c.reborrow().project::<_, { crate::STRUCT_VARIANT_ID }, { crate::ident_id!($next_I) }>())) + }); + + // SAFETY: If all fields in `Self` are `FromZeros`, so too is `Self`. + unsafe_impl!($($head_T: FromZeros,)* $next_T: FromZeros => FromZeros for ($($head_T,)* $next_T,)); + + // SAFETY: If all fields in `Self` are `FromBytes`, so too is `Self`. + unsafe_impl!($($head_T: FromBytes,)* $next_T: FromBytes => FromBytes for ($($head_T,)* $next_T,)); + + // SAFETY: See safety comment on `ProjectToTag`. + unsafe impl<$($head_T,)* $next_T> crate::HasTag for ($($head_T,)* $next_T,) { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized + {} + + type Tag = (); + + // SAFETY: It is trivially sound to project any pointer to a + // pointer to a type of size zero and alignment 1 (which `()` is + // [1]). Such a pointer will trivially satisfy its aliasing and + // validity requirements (since it has a zero-sized referent), + // and its alignment requirement (since it is aligned to 1). + // + // [1] Per https://doc.rust-lang.org/1.92.0/reference/type-layout.html#r-layout.tuple.unit: + // + // [T]he unit tuple (`()`)... is guaranteed as a zero-sized + // type to have a size of 0 and an alignment of 1. + type ProjectToTag = crate::pointer::cast::CastToUnit; + } + + // Generate impls that depend on tuple index. + impl_tuple!(@variants + [$($head_T $head_I)* $next_T $next_I] + [] + [$($head_T $head_I)* $next_T $next_I] + ); + + // Recurse to next tuple size + impl_tuple!(@all [$($head_T $head_I)* $next_T $next_I] [$($tail)*]); + }; + (@all [$($head_T:ident $head_I:tt)*] []) => {}; + + // Emit trait implementations that depend on field index. + (@variants + // The full tuple definition in type–index pairs. + [$($AllT:ident $AllI:tt)+] + // Types before the current index. + [$($BeforeT:ident)*] + // The types and indices at and after the current index. + [$CurrT:ident $CurrI:tt $($AfterT:ident $AfterI:tt)*] + ) => { + // SAFETY: + // - `Self` is a struct (albeit anonymous), so `VARIANT_ID` is + // `STRUCT_VARIANT_ID`. + // - `$CurrI` is the field at index `$CurrI`, so `FIELD_ID` is + // `zerocopy::ident_id!($CurrI)` + // - `()` has the same visibility as the `.$CurrI` field (ie, `.0`, + // `.1`, etc) + // - `Type` has the same type as `$CurrI`; i.e., `$CurrT`. + unsafe impl<$($AllT),+> crate::HasField< + (), + { crate::STRUCT_VARIANT_ID }, + { crate::ident_id!($CurrI)} + > for ($($AllT,)+) { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized + {} + + type Type = $CurrT; + + #[inline(always)] + fn project(slf: crate::PtrInner<'_, Self>) -> *mut Self::Type { + let slf = slf.as_non_null().as_ptr(); + // SAFETY: `PtrInner` promises it references either a zero-sized + // byte range, or else will reference a byte range that is + // entirely contained within an allocated object. In either + // case, this guarantees that `(*slf).$CurrI` is in-bounds of + // `slf`. + unsafe { core::ptr::addr_of_mut!((*slf).$CurrI) } + } + } + + // SAFETY: See comments on items. + unsafe impl crate::ProjectField< + (), + (Aliasing, Alignment, crate::invariant::Uninit), + { crate::STRUCT_VARIANT_ID }, + { crate::ident_id!($CurrI)} + > for ($($AllT,)+) + where + Aliasing: crate::invariant::Aliasing, + Alignment: crate::invariant::Alignment, + { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized + {} + + // SAFETY: Tuples are product types whose fields are + // well-aligned, so projection preserves both the alignment and + // validity invariants of the outer pointer. + type Invariants = (Aliasing, Alignment, crate::invariant::Uninit); + + // SAFETY: Tuples are product types and so projection is infallible; + type Error = core::convert::Infallible; + } + + // SAFETY: See comments on items. + unsafe impl crate::ProjectField< + (), + (Aliasing, Alignment, crate::invariant::Initialized), + { crate::STRUCT_VARIANT_ID }, + { crate::ident_id!($CurrI)} + > for ($($AllT,)+) + where + Aliasing: crate::invariant::Aliasing, + Alignment: crate::invariant::Alignment, + { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized + {} + + // SAFETY: Tuples are product types whose fields are + // well-aligned, so projection preserves both the alignment and + // validity invariants of the outer pointer. + type Invariants = (Aliasing, Alignment, crate::invariant::Initialized); + + // SAFETY: Tuples are product types and so projection is infallible; + type Error = core::convert::Infallible; + } + + // SAFETY: See comments on items. + unsafe impl crate::ProjectField< + (), + (Aliasing, Alignment, crate::invariant::Valid), + { crate::STRUCT_VARIANT_ID }, + { crate::ident_id!($CurrI)} + > for ($($AllT,)+) + where + Aliasing: crate::invariant::Aliasing, + Alignment: crate::invariant::Alignment, + { + #[inline] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized + {} + + // SAFETY: Tuples are product types whose fields are + // well-aligned, so projection preserves both the alignment and + // validity invariants of the outer pointer. + type Invariants = (Aliasing, Alignment, crate::invariant::Valid); + + // SAFETY: Tuples are product types and so projection is infallible; + type Error = core::convert::Infallible; + } + + // Recurse to the next index. + impl_tuple!(@variants [$($AllT $AllI)+] [$($BeforeT)* $CurrT] [$($AfterT $AfterI)*]); + }; + (@variants [$($AllT:ident $AllI:tt)+] [$($BeforeT:ident)*] []) => {}; + } + + // SAFETY: `impl_tuple` is provided name-number pairs, where number is the + // ordinal of the name. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + impl_tuple! { + A 0, + B 1, + C 2, + D 3, + E 4, + F 5, + G 6, + H 7, + I 8, + J 9, + K 10, + L 11, + M 12, + N 13, + O 14, + P 15, + Q 16, + R 17, + S 18, + T 19, + U 20, + V 21, + W 22, + X 23, + Y 24, + Z 25, + }; + }; +} + +// SIMD support +// +// Per the Unsafe Code Guidelines Reference [1]: +// +// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs +// containing `N` elements of type `T` where `N` is a power-of-two and the +// size and alignment requirements of `T` are equal: +// +// ```rust +// #[repr(simd)] +// struct Vector(T_0, ..., T_(N - 1)); +// ``` +// +// ... +// +// The size of `Vector` is `N * size_of::()` and its alignment is an +// implementation-defined function of `T` and `N` greater than or equal to +// `align_of::()`. +// +// ... +// +// Vector elements are laid out in source field order, enabling random access +// to vector elements by reinterpreting the vector as an array: +// +// ```rust +// union U { +// vec: Vector, +// arr: [T; N] +// } +// +// assert_eq!(size_of::>(), size_of::<[T; N]>()); +// assert!(align_of::>() >= align_of::<[T; N]>()); +// +// unsafe { +// let u = U { vec: Vector(t_0, ..., t_(N - 1)) }; +// +// assert_eq!(u.vec.0, u.arr[0]); +// // ... +// assert_eq!(u.vec.(N - 1), u.arr[N - 1]); +// } +// ``` +// +// Given this background, we can observe that: +// - The size and bit pattern requirements of a SIMD type are equivalent to the +// equivalent array type. Thus, for any SIMD type whose primitive `T` is +// `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes`, that +// SIMD type is also `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or +// `IntoBytes` respectively. +// - Since no upper bound is placed on the alignment, no SIMD type can be +// guaranteed to be `Unaligned`. +// +// Also per [1]: +// +// This chapter represents the consensus from issue #38. The statements in +// here are not (yet) "guaranteed" not to change until an RFC ratifies them. +// +// See issue #38 [2]. While this behavior is not technically guaranteed, the +// likelihood that the behavior will change such that SIMD types are no longer +// `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes` is next to zero, as +// that would defeat the entire purpose of SIMD types. Nonetheless, we put this +// behavior behind the `simd` Cargo feature, which requires consumers to opt +// into this stability hazard. +// +// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html +// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38 +#[cfg(feature = "simd")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))] +mod simd { + /// Defines a module which implements `TryFromBytes`, `FromZeros`, + /// `FromBytes`, and `IntoBytes` for a set of types from a module in + /// `core::arch`. + /// + /// `$arch` is both the name of the defined module and the name of the + /// module in `core::arch`, and `$typ` is the list of items from that module + /// to implement `FromZeros`, `FromBytes`, and `IntoBytes` for. + #[allow(unused_macros)] // `allow(unused_macros)` is needed because some + // target/feature combinations don't emit any impls + // and thus don't use this macro. + macro_rules! simd_arch_mod { + ($(#[cfg $cfg:tt])* $(#[cfg_attr $cfg_attr:tt])? $arch:ident, $mod:ident, $($typ:ident),*) => { + $(#[cfg $cfg])* + #[cfg_attr(doc_cfg, doc(cfg $($cfg)*))] + $(#[cfg_attr $cfg_attr])? + mod $mod { + use core::arch::$arch::{$($typ),*}; + + use crate::*; + impl_known_layout!($($typ),*); + // SAFETY: See comment on module definition for justification. + #[allow(clippy::multiple_unsafe_ops_per_block)] + const _: () = unsafe { + $( unsafe_impl!($typ: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); )* + }; + } + }; + } + + #[rustfmt::skip] + const _: () = { + simd_arch_mod!( + #[cfg(target_arch = "x86")] + x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + #[cfg(not(no_zerocopy_simd_x86_avx12_1_89_0))] + simd_arch_mod!( + #[cfg(target_arch = "x86")] + #[cfg_attr(doc_cfg, doc(cfg(rust = "1.89.0")))] + x86, x86_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "x86_64")] + x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + #[cfg(not(no_zerocopy_simd_x86_avx12_1_89_0))] + simd_arch_mod!( + #[cfg(target_arch = "x86_64")] + #[cfg_attr(doc_cfg, doc(cfg(rust = "1.89.0")))] + x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "wasm32")] + wasm32, wasm32, v128 + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + #[cfg(not(no_zerocopy_aarch64_simd_1_59_0))] + simd_arch_mod!( + // NOTE(https://github.com/rust-lang/stdarch/issues/1484): NEON intrinsics are currently + // broken on big-endian platforms. + #[cfg(all(target_arch = "aarch64", target_endian = "little"))] + #[cfg_attr(doc_cfg, doc(cfg(rust = "1.59.0")))] + aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x4x2_t, uint16x4x3_t, + uint16x4x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t, uint64x2_t + ); + }; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_impls() { + // A type that can supply test cases for testing + // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!` + // must implement this trait; that macro uses it to generate runtime + // tests for `TryFromBytes` impls. + // + // All `T: FromBytes` types are provided with a blanket impl. Other + // types must implement `TryFromBytesTestable` directly (ie using + // `impl_try_from_bytes_testable!`). + trait TryFromBytesTestable { + fn with_passing_test_cases>)>(f: F); + fn with_failing_test_cases(f: F); + } + + impl TryFromBytesTestable for T { + fn with_passing_test_cases>)>(f: F) { + // Test with a zeroed value. + f(ReadOnly::::new_box_zeroed().unwrap()); + + let ffs = { + let mut t = ReadOnly::new(Self::new_zeroed()); + let ptr: *mut T = ReadOnly::as_mut(&mut t); + // SAFETY: `T: FromBytes` + unsafe { ptr::write_bytes(ptr.cast::(), 0xFF, mem::size_of::()) }; + t + }; + + // Test with a value initialized with 0xFF. + f(Box::new(ffs)); + } + + fn with_failing_test_cases(_f: F) {} + } + + macro_rules! impl_try_from_bytes_testable_for_null_pointer_optimization { + ($($tys:ty),*) => { + $( + impl TryFromBytesTestable for Option<$tys> { + fn with_passing_test_cases>)>(f: F) { + // Test with a zeroed value. + f(Box::new(ReadOnly::new(None))); + } + + fn with_failing_test_cases(f: F) { + for pos in 0..mem::size_of::() { + let mut bytes = [0u8; mem::size_of::()]; + bytes[pos] = 0x01; + f(&mut bytes[..]); + } + } + } + )* + }; + } + + // Implements `TryFromBytesTestable`. + macro_rules! impl_try_from_bytes_testable { + // Base case for recursion (when the list of types has run out). + (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {}; + // Implements for type(s) with no type parameters. + ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + impl TryFromBytesTestable for $ty { + impl_try_from_bytes_testable!( + @methods @success $($success_case),* + $(, @failure $($failure_case),*)? + ); + } + impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?); + }; + // Implements for multiple types with no type parameters. + ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => { + $( + impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*); + )* + }; + // Implements only the methods; caller must invoke this from inside + // an impl block. + (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + fn with_passing_test_cases>)>(_f: F) { + $( + let bx = Box::::from($success_case); + let ro: Box> = { + let raw = Box::into_raw(bx); + // SAFETY: `ReadOnly` has the same layout and bit + // validity as `T`. + #[allow(clippy::as_conversions)] + unsafe { Box::from_raw(raw as *mut _) } + }; + _f(ro); + )* + } + + fn with_failing_test_cases(_f: F) { + $($( + let mut case = $failure_case; + _f(case.as_mut_bytes()); + )*)? + } + }; + } + + impl_try_from_bytes_testable_for_null_pointer_optimization!( + Box>, + &'static UnsafeCell, + &'static mut UnsafeCell, + NonNull>, + fn(), + FnManyArgs, + extern "C" fn(), + ECFnManyArgs + ); + + macro_rules! bx { + ($e:expr) => { + Box::new($e) + }; + } + + // Note that these impls are only for types which are not `FromBytes`. + // `FromBytes` types are covered by a preceding blanket impl. + impl_try_from_bytes_testable!( + bool => @success true, false, + @failure 2u8, 3u8, 0xFFu8; + char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}', + @failure 0xD800u32, 0xDFFFu32, 0x110000u32; + str => @success "", "hello", "❤️🧡💛💚💙💜", + @failure [0, 159, 146, 150]; + [u8] => @success vec![].into_boxed_slice(), vec![0, 1, 2].into_boxed_slice(); + NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, + NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, + NonZeroUsize, NonZeroIsize + => @success Self::new(1).unwrap(), + // Doing this instead of `0` ensures that we always satisfy + // the size and alignment requirements of `Self` (whereas `0` + // may be any integer type with a different size or alignment + // than some `NonZeroXxx` types). + @failure Option::::None; + [bool; 0] => @success []; + [bool; 1] + => @success [true], [false], + @failure [2u8], [3u8], [0xFFu8]; + [bool] + => @success vec![true, false].into_boxed_slice(), vec![false, true].into_boxed_slice(), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + Unalign + => @success Unalign::new(false), Unalign::new(true), + @failure 2u8, 0xFFu8; + ManuallyDrop + => @success ManuallyDrop::new(false), ManuallyDrop::new(true), + @failure 2u8, 0xFFu8; + ManuallyDrop<[u8]> + => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([0u8])), bx!(ManuallyDrop::new([0u8, 1u8])); + ManuallyDrop<[bool]> + => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([false])), bx!(ManuallyDrop::new([false, true])), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + ManuallyDrop<[UnsafeCell]> + => @success bx!(ManuallyDrop::new([UnsafeCell::new(0)])), bx!(ManuallyDrop::new([UnsafeCell::new(0), UnsafeCell::new(1)])); + ManuallyDrop<[UnsafeCell]> + => @success bx!(ManuallyDrop::new([UnsafeCell::new(false)])), bx!(ManuallyDrop::new([UnsafeCell::new(false), UnsafeCell::new(true)])), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + Wrapping + => @success Wrapping(false), Wrapping(true), + @failure 2u8, 0xFFu8; + *const NotZerocopy + => @success ptr::null::(), + @failure [0x01; mem::size_of::<*const NotZerocopy>()]; + *mut NotZerocopy + => @success ptr::null_mut::(), + @failure [0x01; mem::size_of::<*mut NotZerocopy>()]; + ); + + // Use the trick described in [1] to allow us to call methods + // conditional on certain trait bounds. + // + // In all of these cases, methods return `Option`, where `R` is the + // return type of the method we're conditionally calling. The "real" + // implementations (the ones defined in traits using `&self`) return + // `Some`, and the default implementations (the ones defined as inherent + // methods using `&mut self`) return `None`. + // + // [1] https://github.com/dtolnay/case-studies/blob/master/autoref-specialization/README.md + mod autoref_trick { + use super::*; + + pub(super) struct AutorefWrapper(pub(super) PhantomData); + + pub(super) trait TestIsBitValidShared { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared<'ptr>(&self, candidate: Maybe<'ptr, T>) + -> Option; + } + + impl TestIsBitValidShared for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared<'ptr>( + &self, + candidate: Maybe<'ptr, T>, + ) -> Option { + Some(T::is_bit_valid(candidate)) + } + } + + pub(super) trait TestTryFromRef { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>( + &self, + bytes: &'bytes [u8], + ) -> Option>; + } + + impl TestTryFromRef for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>( + &self, + bytes: &'bytes [u8], + ) -> Option> { + Some(T::try_ref_from_bytes(bytes).ok()) + } + } + + pub(super) trait TestTryFromMut { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>( + &self, + bytes: &'bytes mut [u8], + ) -> Option>; + } + + impl TestTryFromMut for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>( + &self, + bytes: &'bytes mut [u8], + ) -> Option> { + Some(T::try_mut_from_bytes(bytes).ok()) + } + } + + pub(super) trait TestTryReadFrom { + fn test_try_read_from(&self, bytes: &[u8]) -> Option>; + } + + impl TestTryReadFrom for AutorefWrapper { + fn test_try_read_from(&self, bytes: &[u8]) -> Option> { + Some(T::try_read_from_bytes(bytes).ok()) + } + } + + pub(super) trait TestAsBytes { + #[allow(clippy::needless_lifetimes)] + fn test_as_bytes<'slf, 't>(&'slf self, t: &'t ReadOnly) -> Option<&'t [u8]>; + } + + impl TestAsBytes for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_as_bytes<'slf, 't>(&'slf self, t: &'t ReadOnly) -> Option<&'t [u8]> { + Some(t.as_bytes()) + } + } + } + + use autoref_trick::*; + + // Asserts that `$ty` is one of a list of types which are allowed to not + // provide a "real" implementation for `$fn_name`. Since the + // `autoref_trick` machinery fails silently, this allows us to ensure + // that the "default" impls are only being used for types which we + // expect. + // + // Note that, since this is a runtime test, it is possible to have an + // allowlist which is too restrictive if the function in question is + // never called for a particular type. For example, if `as_bytes` is not + // supported for a particular type, and so `test_as_bytes` returns + // `None`, methods such as `test_try_from_ref` may never be called for + // that type. As a result, it's possible that, for example, adding + // `as_bytes` support for a type would cause other allowlist assertions + // to fail. This means that allowlist assertion failures should not + // automatically be taken as a sign of a bug. + macro_rules! assert_on_allowlist { + ($fn_name:ident($ty:ty) $(: $($tys:ty),*)?) => {{ + use core::any::TypeId; + + let allowlist: &[TypeId] = &[ $($(TypeId::of::<$tys>()),*)? ]; + let allowlist_names: &[&str] = &[ $($(stringify!($tys)),*)? ]; + + let id = TypeId::of::<$ty>(); + assert!(allowlist.contains(&id), "{} is not on allowlist for {}: {:?}", stringify!($ty), stringify!($fn_name), allowlist_names); + }}; + } + + // Asserts that `$ty` implements any `$trait` and doesn't implement any + // `!$trait`. Note that all `$trait`s must come before any `!$trait`s. + // + // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success + // and failure cases. + macro_rules! assert_impls { + ($ty:ty: TryFromBytes) => { + // "Default" implementations that match the "real" + // implementations defined in the `autoref_trick` module above. + #[allow(unused, non_local_definitions)] + impl AutorefWrapper<$ty> { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared<'ptr>( + &mut self, + candidate: Maybe<'ptr, $ty>, + ) -> Option { + assert_on_allowlist!( + test_is_bit_valid_shared($ty): + ManuallyDrop>, + ManuallyDrop<[UnsafeCell]>, + ManuallyDrop<[UnsafeCell]>, + CoreMaybeUninit, + CoreMaybeUninit>, + Wrapping> + ); + + None + } + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>(&mut self, _bytes: &'bytes [u8]) -> Option> { + assert_on_allowlist!( + test_try_from_ref($ty): + ManuallyDrop<[UnsafeCell]> + ); + + None + } + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>(&mut self, _bytes: &'bytes mut [u8]) -> Option> { + assert_on_allowlist!( + test_try_from_mut($ty): + Option>>, + Option<&'static UnsafeCell>, + Option<&'static mut UnsafeCell>, + Option>>, + Option, + Option, + Option, + Option, + *const NotZerocopy, + *mut NotZerocopy + ); + + None + } + + fn test_try_read_from(&mut self, _bytes: &[u8]) -> Option> { + assert_on_allowlist!( + test_try_read_from($ty): + str, + ManuallyDrop<[u8]>, + ManuallyDrop<[bool]>, + ManuallyDrop<[UnsafeCell]>, + [u8], + [bool] + ); + + None + } + + fn test_as_bytes(&mut self, _t: &ReadOnly<$ty>) -> Option<&[u8]> { + assert_on_allowlist!( + test_as_bytes($ty): + Option<&'static UnsafeCell>, + Option<&'static mut UnsafeCell>, + Option>>, + Option>>, + Option, + Option, + Option, + Option, + CoreMaybeUninit, + CoreMaybeUninit, + CoreMaybeUninit>, + ManuallyDrop>, + ManuallyDrop<[UnsafeCell]>, + ManuallyDrop<[UnsafeCell]>, + Wrapping>, + *const NotZerocopy, + *mut NotZerocopy + ); + + None + } + } + + <$ty as TryFromBytesTestable>::with_passing_test_cases(|mut val| { + // FIXME(#494): These tests only get exercised for types + // which are `IntoBytes`. Once we implement #494, we should + // be able to support non-`IntoBytes` types by zeroing + // padding. + + // We define `w` and `ww` since, in the case of the inherent + // methods, Rust thinks they're both borrowed mutably at the + // same time (given how we use them below). If we just + // defined a single `w` and used it for multiple operations, + // this would conflict. + // + // We `#[allow(unused_mut]` for the cases where the "real" + // impls are used, which take `&self`. + #[allow(unused_mut)] + let (mut w, mut ww) = (AutorefWrapper::<$ty>(PhantomData), AutorefWrapper::<$ty>(PhantomData)); + + let c = Ptr::from_ref(&*val); + let c = c.forget_aligned(); + // SAFETY: FIXME(#899): This is unsound. `$ty` is not + // necessarily `IntoBytes`, but that's the corner we've + // backed ourselves into by using `Ptr::from_ref`. + let c = unsafe { c.assume_initialized() }; + let res = w.test_is_bit_valid_shared(c); + if let Some(res) = res { + assert!(res, "{}::is_bit_valid (shared `Ptr`): got false, expected true", stringify!($ty)); + } + + let c = Ptr::from_mut(&mut *val); + let c = c.forget_aligned(); + // SAFETY: FIXME(#899): This is unsound. `$ty` is not + // necessarily `IntoBytes`, but that's the corner we've + // backed ourselves into by using `Ptr::from_ref`. + let mut c = unsafe { c.assume_initialized() }; + let res = <$ty as TryFromBytes>::is_bit_valid(c.reborrow_shared()); + assert!(res, "{}::is_bit_valid (exclusive `Ptr`): got false, expected true", stringify!($ty)); + + // `bytes` is `Some(val.as_bytes())` if `$ty: IntoBytes + + // Immutable` and `None` otherwise. + let bytes = w.test_as_bytes(&*val); + + // The inner closure returns + // `Some($ty::try_ref_from_bytes(bytes))` if `$ty: + // Immutable` and `None` otherwise. + let res = bytes.and_then(|bytes| ww.test_try_from_ref(bytes)); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_ref_from_bytes: got `None`, expected `Some`", stringify!($ty)); + } + + if let Some(bytes) = bytes { + // We need to get a mutable byte slice, and so we clone + // into a `Vec`. However, we also need these bytes to + // satisfy `$ty`'s alignment requirement, which isn't + // guaranteed for `Vec`. In order to get around + // this, we create a `Vec` which is twice as long as we + // need. There is guaranteed to be an aligned byte range + // of size `size_of_val(val)` within that range. + let val = &*val; + let size = mem::size_of_val(val); + let align = mem::align_of_val(val); + + let mut vec = bytes.to_vec(); + vec.extend(bytes); + let slc = vec.as_slice(); + let offset = slc.as_ptr().align_offset(align); + let bytes_mut = &mut vec.as_mut_slice()[offset..offset+size]; + bytes_mut.copy_from_slice(bytes); + + let res = ww.test_try_from_mut(bytes_mut); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_mut_from_bytes: got `None`, expected `Some`", stringify!($ty)); + } + } + + let res = bytes.and_then(|bytes| ww.test_try_read_from(bytes)); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_read_from_bytes: got `None`, expected `Some`", stringify!($ty)); + } + }); + #[allow(clippy::as_conversions)] + <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| { + #[allow(unused_mut)] // For cases where the "real" impls are used, which take `&self`. + let mut w = AutorefWrapper::<$ty>(PhantomData); + + // This is `Some($ty::try_ref_from_bytes(c))` if `$ty: + // Immutable` and `None` otherwise. + let res = w.test_try_from_ref(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_ref_from_bytes({:?}): got Some, expected None", stringify!($ty), c); + } + + let res = w.test_try_from_mut(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_mut_from_bytes({:?}): got Some, expected None", stringify!($ty), c); + } + + + let res = w.test_try_read_from(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_read_from_bytes({:?}): got Some, expected None", stringify!($ty), c); + } + }); + + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); }; + }; + ($ty:ty: $trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: $trait); }; + }; + ($ty:ty: !$trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); }; + }; + ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => { + $( + assert_impls!($ty: $trait); + )* + + $( + assert_impls!($ty: !$negative_trait); + )* + }; + } + + // NOTE: The negative impl assertions here are not necessarily + // prescriptive. They merely serve as change detectors to make sure + // we're aware of what trait impls are getting added with a given + // change. Of course, some impls would be invalid (e.g., `bool: + // FromBytes`), and so this change detection is very important. + + assert_impls!( + (): KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + u8: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + i8: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + u16: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i16: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u128: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i128: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + usize: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + isize: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + #[cfg(feature = "float-nightly")] + assert_impls!( + f16: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + f32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + f64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + #[cfg(feature = "float-nightly")] + assert_impls!( + f128: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + bool: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + assert_impls!( + char: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + str: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + + assert_impls!( + NonZeroU8: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + Unaligned, + !FromZeros, + !FromBytes + ); + assert_impls!( + NonZeroI8: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + Unaligned, + !FromZeros, + !FromBytes + ); + assert_impls!( + NonZeroU16: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI16: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU32: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI32: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU64: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI64: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU128: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI128: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroUsize: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroIsize: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + + // Implements none of the ZC traits. + struct NotZerocopy; + + #[rustfmt::skip] + type FnManyArgs = fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + // Allowed, because we're not actually using this type for FFI. + #[allow(improper_ctypes_definitions)] + #[rustfmt::skip] + type ECFnManyArgs = extern "C" fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + #[cfg(feature = "alloc")] + assert_impls!(Option>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static mut UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static mut [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option>>: KnownLayout, TryFromBytes, FromZeros, Immutable, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + + assert_impls!(PhantomData: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(PhantomData>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(PhantomData<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + + assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(ManuallyDrop<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(ManuallyDrop<[bool]>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(ManuallyDrop: !Immutable, !TryFromBytes, !KnownLayout, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(ManuallyDrop<[NotZerocopy]>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(ManuallyDrop>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, IntoBytes, Unaligned, !Immutable, !FromBytes); + + assert_impls!(CoreMaybeUninit: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, Unaligned, !IntoBytes); + assert_impls!(CoreMaybeUninit: KnownLayout, TryFromBytes, FromZeros, FromBytes, !Immutable, !IntoBytes, !Unaligned); + assert_impls!(CoreMaybeUninit>: KnownLayout, TryFromBytes, FromZeros, FromBytes, Unaligned, !Immutable, !IntoBytes); + + assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(Wrapping: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Wrapping>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + + assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(Unalign: KnownLayout, Unaligned, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes); + + assert_impls!( + [u8]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + [bool]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + assert_impls!([NotZerocopy]: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!( + [u8; 0]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned, + ); + assert_impls!( + [NotZerocopy; 0]: KnownLayout, + !Immutable, + !TryFromBytes, + !FromZeros, + !FromBytes, + !IntoBytes, + !Unaligned + ); + assert_impls!( + [u8; 1]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned, + ); + assert_impls!( + [NotZerocopy; 1]: KnownLayout, + !Immutable, + !TryFromBytes, + !FromZeros, + !FromBytes, + !IntoBytes, + !Unaligned + ); + + assert_impls!(*const NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*const [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*const dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + + #[cfg(feature = "simd")] + { + #[allow(unused_macros)] + macro_rules! test_simd_arch_mod { + ($arch:ident, $($typ:ident),*) => { + { + use core::arch::$arch::{$($typ),*}; + use crate::*; + $( assert_impls!($typ: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); )* + } + }; + } + #[cfg(target_arch = "x86")] + test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(not(no_zerocopy_simd_x86_avx12_1_89_0), target_arch = "x86"))] + test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "x86_64")] + test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(not(no_zerocopy_simd_x86_avx12_1_89_0), target_arch = "x86_64"))] + test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "wasm32")] + test_simd_arch_mod!(wasm32, v128); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + test_simd_arch_mod!( + powerpc, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + test_simd_arch_mod!( + powerpc64, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + #[cfg(all(target_arch = "aarch64", not(no_zerocopy_aarch64_simd_1_59_0)))] + #[rustfmt::skip] + test_simd_arch_mod!( + aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x4x2_t, uint16x4x3_t, + uint16x4x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t, uint64x2_t + ); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/layout.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/layout.rs new file mode 100644 index 0000000000000000000000000000000000000000..6c83676c80fe64af3ca3f9fa0da0ceecab4548cc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/layout.rs @@ -0,0 +1,2211 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{mem, num::NonZeroUsize}; + +use crate::util; + +/// The target pointer width, counted in bits. +const POINTER_WIDTH_BITS: usize = mem::size_of::() * 8; + +/// The layout of a type which might be dynamically-sized. +/// +/// `DstLayout` describes the layout of sized types, slice types, and "slice +/// DSTs" - ie, those that are known by the type system to have a trailing slice +/// (as distinguished from `dyn Trait` types - such types *might* have a +/// trailing slice type, but the type system isn't aware of it). +/// +/// Note that `DstLayout` does not have any internal invariants, so no guarantee +/// is made that a `DstLayout` conforms to any of Rust's requirements regarding +/// the layout of real Rust types or instances of types. +#[doc(hidden)] +#[allow(missing_debug_implementations, missing_copy_implementations)] +#[cfg_attr(any(kani, test), derive(Debug, PartialEq, Eq))] +#[derive(Copy, Clone)] +pub struct DstLayout { + pub(crate) align: NonZeroUsize, + pub(crate) size_info: SizeInfo, + // Is it guaranteed statically (without knowing a value's runtime metadata) + // that the top-level type contains no padding? This does *not* apply + // recursively - for example, `[(u8, u16)]` has `statically_shallow_unpadded + // = true` even though this type likely has padding inside each `(u8, u16)`. + pub(crate) statically_shallow_unpadded: bool, +} + +#[cfg_attr(any(kani, test), derive(Debug, PartialEq, Eq))] +#[derive(Copy, Clone)] +pub(crate) enum SizeInfo { + Sized { size: usize }, + SliceDst(TrailingSliceLayout), +} + +#[cfg_attr(any(kani, test), derive(Debug, PartialEq, Eq))] +#[derive(Copy, Clone)] +pub(crate) struct TrailingSliceLayout { + // The offset of the first byte of the trailing slice field. Note that this + // is NOT the same as the minimum size of the type. For example, consider + // the following type: + // + // struct Foo { + // a: u16, + // b: u8, + // c: [u8], + // } + // + // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed + // by a padding byte. + pub(crate) offset: usize, + // The size of the element type of the trailing slice field. + pub(crate) elem_size: E, +} + +impl SizeInfo { + /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a + /// `NonZeroUsize`. If `elem_size` is 0, returns `None`. + #[allow(unused)] + const fn try_to_nonzero_elem_size(&self) -> Option> { + Some(match *self { + SizeInfo::Sized { size } => SizeInfo::Sized { size }, + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + if let Some(elem_size) = NonZeroUsize::new(elem_size) { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) + } else { + return None; + } + } + }) + } +} + +#[doc(hidden)] +#[derive(Copy, Clone)] +#[cfg_attr(test, derive(Debug))] +#[allow(missing_debug_implementations)] +pub enum CastType { + Prefix, + Suffix, +} + +#[cfg_attr(test, derive(Debug))] +pub(crate) enum MetadataCastError { + Alignment, + Size, +} + +impl DstLayout { + /// The minimum possible alignment of a type. + const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) { + Some(min_align) => min_align, + None => const_unreachable!(), + }; + + /// The maximum theoretic possible alignment of a type. + /// + /// For compatibility with future Rust versions, this is defined as the + /// maximum power-of-two that fits into a `usize`. See also + /// [`DstLayout::CURRENT_MAX_ALIGN`]. + pub(crate) const THEORETICAL_MAX_ALIGN: NonZeroUsize = + match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) { + Some(max_align) => max_align, + None => const_unreachable!(), + }; + + /// The current, documented max alignment of a type \[1\]. + /// + /// \[1\] Per : + /// + /// The alignment value must be a power of two from 1 up to + /// 229. + #[cfg(not(kani))] + #[cfg(not(target_pointer_width = "16"))] + pub(crate) const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) { + Some(max_align) => max_align, + None => const_unreachable!(), + }; + + #[cfg(not(kani))] + #[cfg(target_pointer_width = "16")] + pub(crate) const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 15) { + Some(max_align) => max_align, + None => const_unreachable!(), + }; + + /// Assumes that this layout lacks static shallow padding. + /// + /// # Panics + /// + /// This method does not panic. + /// + /// # Safety + /// + /// If `self` describes the size and alignment of type that lacks static + /// shallow padding, unsafe code may assume that the result of this method + /// accurately reflects the size, alignment, and lack of static shallow + /// padding of that type. + const fn assume_shallow_unpadded(self) -> Self { + Self { statically_shallow_unpadded: true, ..self } + } + + /// Constructs a `DstLayout` for a zero-sized type with `repr_align` + /// alignment (or 1). If `repr_align` is provided, then it must be a power + /// of two. + /// + /// # Panics + /// + /// This function panics if the supplied `repr_align` is not a power of two. + /// + /// # Safety + /// + /// Unsafe code may assume that the contract of this function is satisfied. + #[doc(hidden)] + #[must_use] + #[inline] + pub const fn new_zst(repr_align: Option) -> DstLayout { + let align = match repr_align { + Some(align) => align, + None => Self::MIN_ALIGN, + }; + + const_assert!(align.get().is_power_of_two()); + + DstLayout { + align, + size_info: SizeInfo::Sized { size: 0 }, + statically_shallow_unpadded: true, + } + } + + /// Constructs a `DstLayout` which describes `T` and assumes `T` may contain + /// padding. + /// + /// # Safety + /// + /// Unsafe code may assume that `DstLayout` is the correct layout for `T`. + #[doc(hidden)] + #[must_use] + #[inline] + pub const fn for_type() -> DstLayout { + // SAFETY: `align` is correct by construction. `T: Sized`, and so it is + // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the + // `size` field is also correct by construction. `unpadded` can safely + // default to `false`. + DstLayout { + align: match NonZeroUsize::new(mem::align_of::()) { + Some(align) => align, + None => const_unreachable!(), + }, + size_info: SizeInfo::Sized { size: mem::size_of::() }, + statically_shallow_unpadded: false, + } + } + + /// Constructs a `DstLayout` which describes a `T` that does not contain + /// padding. + /// + /// # Safety + /// + /// Unsafe code may assume that `DstLayout` is the correct layout for `T`. + #[doc(hidden)] + #[must_use] + #[inline] + pub const fn for_unpadded_type() -> DstLayout { + Self::for_type::().assume_shallow_unpadded() + } + + /// Constructs a `DstLayout` which describes `[T]`. + /// + /// # Safety + /// + /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`. + pub(crate) const fn for_slice() -> DstLayout { + // SAFETY: The alignment of a slice is equal to the alignment of its + // element type, and so `align` is initialized correctly. + // + // Since this is just a slice type, there is no offset between the + // beginning of the type and the beginning of the slice, so it is + // correct to set `offset: 0`. The `elem_size` is correct by + // construction. Since `[T]` is a (degenerate case of a) slice DST, it + // is correct to initialize `size_info` to `SizeInfo::SliceDst`. + DstLayout { + align: match NonZeroUsize::new(mem::align_of::()) { + Some(align) => align, + None => const_unreachable!(), + }, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + offset: 0, + elem_size: mem::size_of::(), + }), + statically_shallow_unpadded: true, + } + } + + /// Constructs a complete `DstLayout` reflecting a `repr(C)` struct with the + /// given alignment modifiers and fields. + /// + /// This method cannot be used to match the layout of a record with the + /// default representation, as that representation is mostly unspecified. + /// + /// # Safety + /// + /// For any definition of a `repr(C)` struct, if this method is invoked with + /// alignment modifiers and fields corresponding to that definition, the + /// resulting `DstLayout` will correctly encode the layout of that struct. + /// + /// We make no guarantees to the behavior of this method when it is invoked + /// with arguments that cannot correspond to a valid `repr(C)` struct. + #[must_use] + #[inline] + pub const fn for_repr_c_struct( + repr_align: Option, + repr_packed: Option, + fields: &[DstLayout], + ) -> DstLayout { + let mut layout = DstLayout::new_zst(repr_align); + + let mut i = 0; + #[allow(clippy::arithmetic_side_effects)] + while i < fields.len() { + #[allow(clippy::indexing_slicing)] + let field = fields[i]; + layout = layout.extend(field, repr_packed); + i += 1; + } + + layout = layout.pad_to_align(); + + // SAFETY: `layout` accurately describes the layout of a `repr(C)` + // struct with `repr_align` or `repr_packed` alignment modifications and + // the given `fields`. The `layout` is constructed using a sequence of + // invocations of `DstLayout::{new_zst,extend,pad_to_align}`. The + // documentation of these items vows that invocations in this manner + // will accurately describe a type, so long as: + // + // - that type is `repr(C)`, + // - its fields are enumerated in the order they appear, + // - the presence of `repr_align` and `repr_packed` are correctly accounted for. + // + // We respect all three of these preconditions above. + layout + } + + /// Like `Layout::extend`, this creates a layout that describes a record + /// whose layout consists of `self` followed by `next` that includes the + /// necessary inter-field padding, but not any trailing padding. + /// + /// In order to match the layout of a `#[repr(C)]` struct, this method + /// should be invoked for each field in declaration order. To add trailing + /// padding, call `DstLayout::pad_to_align` after extending the layout for + /// all fields. If `self` corresponds to a type marked with + /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`, + /// otherwise `None`. + /// + /// This method cannot be used to match the layout of a record with the + /// default representation, as that representation is mostly unspecified. + /// + /// # Safety + /// + /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with + /// fields whose layout are `self`, and those fields are immediately + /// followed by a field whose layout is `field`, then unsafe code may rely + /// on `self.extend(field, repr_packed)` producing a layout that correctly + /// encompasses those two components. + /// + /// We make no guarantees to the behavior of this method if these fragments + /// cannot appear in a valid Rust type (e.g., the concatenation of the + /// layouts would lead to a size larger than `isize::MAX`). + #[doc(hidden)] + #[must_use] + #[inline] + pub const fn extend(self, field: DstLayout, repr_packed: Option) -> Self { + use util::{max, min, padding_needed_for}; + + // If `repr_packed` is `None`, there are no alignment constraints, and + // the value can be defaulted to `THEORETICAL_MAX_ALIGN`. + let max_align = match repr_packed { + Some(max_align) => max_align, + None => Self::THEORETICAL_MAX_ALIGN, + }; + + const_assert!(max_align.get().is_power_of_two()); + + // We use Kani to prove that this method is robust to future increases + // in Rust's maximum allowed alignment. However, if such a change ever + // actually occurs, we'd like to be notified via assertion failures. + #[cfg(not(kani))] + { + const_debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + const_debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + if let Some(repr_packed) = repr_packed { + const_debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + } + } + + // The field's alignment is clamped by `repr_packed` (i.e., the + // `repr(packed(N))` attribute, if any) [1]. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // The alignments of each field, for the purpose of positioning + // fields, is the smaller of the specified alignment and the alignment + // of the field's type. + let field_align = min(field.align, max_align); + + // The struct's alignment is the maximum of its previous alignment and + // `field_align`. + let align = max(self.align, field_align); + + let (interfield_padding, size_info) = match self.size_info { + // If the layout is already a DST, we panic; DSTs cannot be extended + // with additional fields. + SizeInfo::SliceDst(..) => const_panic!("Cannot extend a DST with additional fields."), + + SizeInfo::Sized { size: preceding_size } => { + // Compute the minimum amount of inter-field padding needed to + // satisfy the field's alignment, and offset of the trailing + // field. [1] + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // Inter-field padding is guaranteed to be the minimum + // required in order to satisfy each field's (possibly + // altered) alignment. + let padding = padding_needed_for(preceding_size, field_align); + + // This will not panic (and is proven to not panic, with Kani) + // if the layout components can correspond to a leading layout + // fragment of a valid Rust type, but may panic otherwise (e.g., + // combining or aligning the components would create a size + // exceeding `isize::MAX`). + let offset = match preceding_size.checked_add(padding) { + Some(offset) => offset, + None => const_panic!("Adding padding to `self`'s size overflows `usize`."), + }; + + ( + padding, + match field.size_info { + SizeInfo::Sized { size: field_size } => { + // If the trailing field is sized, the resulting layout + // will be sized. Its size will be the sum of the + // preceding layout, the size of the new field, and the + // size of inter-field padding between the two. + // + // This will not panic (and is proven with Kani to not + // panic) if the layout components can correspond to a + // leading layout fragment of a valid Rust type, but may + // panic otherwise (e.g., combining or aligning the + // components would create a size exceeding + // `usize::MAX`). + let size = match offset.checked_add(field_size) { + Some(size) => size, + None => const_panic!("`field` cannot be appended without the total size overflowing `usize`"), + }; + SizeInfo::Sized { size } + } + SizeInfo::SliceDst(TrailingSliceLayout { + offset: trailing_offset, + elem_size, + }) => { + // If the trailing field is dynamically sized, so too + // will the resulting layout. The offset of the trailing + // slice component is the sum of the offset of the + // trailing field and the trailing slice offset within + // that field. + // + // This will not panic (and is proven with Kani to not + // panic) if the layout components can correspond to a + // leading layout fragment of a valid Rust type, but may + // panic otherwise (e.g., combining or aligning the + // components would create a size exceeding + // `usize::MAX`). + let offset = match offset.checked_add(trailing_offset) { + Some(offset) => offset, + None => const_panic!("`field` cannot be appended without the total size overflowing `usize`"), + }; + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) + } + }, + ) + } + }; + + let statically_shallow_unpadded = self.statically_shallow_unpadded + && field.statically_shallow_unpadded + && interfield_padding == 0; + + DstLayout { align, size_info, statically_shallow_unpadded } + } + + /// Like `Layout::pad_to_align`, this routine rounds the size of this layout + /// up to the nearest multiple of this type's alignment or `repr_packed` + /// (whichever is less). This method leaves DST layouts unchanged, since the + /// trailing padding of DSTs is computed at runtime. + /// + /// The accompanying boolean is `true` if the resulting composition of + /// fields necessitated static (as opposed to dynamic) padding; otherwise + /// `false`. + /// + /// In order to match the layout of a `#[repr(C)]` struct, this method + /// should be invoked after the invocations of [`DstLayout::extend`]. If + /// `self` corresponds to a type marked with `repr(packed(N))`, then + /// `repr_packed` should be set to `Some(N)`, otherwise `None`. + /// + /// This method cannot be used to match the layout of a record with the + /// default representation, as that representation is mostly unspecified. + /// + /// # Safety + /// + /// If a (potentially hypothetical) valid `repr(C)` type begins with fields + /// whose layout are `self` followed only by zero or more bytes of trailing + /// padding (not included in `self`), then unsafe code may rely on + /// `self.pad_to_align(repr_packed)` producing a layout that correctly + /// encapsulates the layout of that type. + /// + /// We make no guarantees to the behavior of this method if `self` cannot + /// appear in a valid Rust type (e.g., because the addition of trailing + /// padding would lead to a size larger than `isize::MAX`). + #[doc(hidden)] + #[must_use] + #[inline] + pub const fn pad_to_align(self) -> Self { + use util::padding_needed_for; + + let (static_padding, size_info) = match self.size_info { + // For sized layouts, we add the minimum amount of trailing padding + // needed to satisfy alignment. + SizeInfo::Sized { size: unpadded_size } => { + let padding = padding_needed_for(unpadded_size, self.align); + let size = match unpadded_size.checked_add(padding) { + Some(size) => size, + None => const_panic!("Adding padding caused size to overflow `usize`."), + }; + (padding, SizeInfo::Sized { size }) + } + // For DST layouts, trailing padding depends on the length of the + // trailing DST and is computed at runtime. This does not alter the + // offset or element size of the layout, so we leave `size_info` + // unchanged. + size_info @ SizeInfo::SliceDst(_) => (0, size_info), + }; + + let statically_shallow_unpadded = self.statically_shallow_unpadded && static_padding == 0; + + DstLayout { align: self.align, size_info, statically_shallow_unpadded } + } + + /// Produces `true` if `self` requires static padding; otherwise `false`. + #[must_use] + #[inline(always)] + pub const fn requires_static_padding(self) -> bool { + !self.statically_shallow_unpadded + } + + /// Produces `true` if there exists any metadata for which a type of layout + /// `self` would require dynamic trailing padding; otherwise `false`. + #[must_use] + #[inline(always)] + pub const fn requires_dynamic_padding(self) -> bool { + // A `% self.align.get()` cannot panic, since `align` is non-zero. + #[allow(clippy::arithmetic_side_effects)] + match self.size_info { + SizeInfo::Sized { .. } => false, + SizeInfo::SliceDst(trailing_slice_layout) => { + // SAFETY: This predicate is formally proved sound by + // `proofs::prove_requires_dynamic_padding`. + trailing_slice_layout.offset % self.align.get() != 0 + || trailing_slice_layout.elem_size % self.align.get() != 0 + } + } + } + + /// Validates that a cast is sound from a layout perspective. + /// + /// Validates that the size and alignment requirements of a type with the + /// layout described in `self` would not be violated by performing a + /// `cast_type` cast from a pointer with address `addr` which refers to a + /// memory region of size `bytes_len`. + /// + /// If the cast is valid, `validate_cast_and_convert_metadata` returns + /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then + /// `elems` is the maximum number of trailing slice elements for which a + /// cast would be valid (for sized types, `elem` is meaningless and should + /// be ignored). `split_at` is the index at which to split the memory region + /// in order for the prefix (suffix) to contain the result of the cast, and + /// in order for the remaining suffix (prefix) to contain the leftover + /// bytes. + /// + /// There are three conditions under which a cast can fail: + /// - The smallest possible value for the type is larger than the provided + /// memory region + /// - A prefix cast is requested, and `addr` does not satisfy `self`'s + /// alignment requirement + /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy + /// `self`'s alignment requirement (as a consequence, since all instances + /// of the type are a multiple of its alignment, no size for the type will + /// result in a starting address which is properly aligned) + /// + /// # Safety + /// + /// The caller may assume that this implementation is correct, and may rely + /// on that assumption for the soundness of their code. In particular, the + /// caller may assume that, if `validate_cast_and_convert_metadata` returns + /// `Some((elems, split_at))`, then: + /// - A pointer to the type (for dynamically sized types, this includes + /// `elems` as its pointer metadata) describes an object of size `size <= + /// bytes_len` + /// - If this is a prefix cast: + /// - `addr` satisfies `self`'s alignment + /// - `size == split_at` + /// - If this is a suffix cast: + /// - `split_at == bytes_len - size` + /// - `addr + split_at` satisfies `self`'s alignment + /// + /// Note that this method does *not* ensure that a pointer constructed from + /// its return values will be a valid pointer. In particular, this method + /// does not reason about `isize` overflow, which is a requirement of many + /// Rust pointer APIs, and may at some point be determined to be a validity + /// invariant of pointer types themselves. This should never be a problem so + /// long as the arguments to this method are derived from a known-valid + /// pointer (e.g., one derived from a safe Rust reference), but it is + /// nonetheless the caller's responsibility to justify that pointer + /// arithmetic will not overflow based on a safety argument *other than* the + /// mere fact that this method returned successfully. + /// + /// # Panics + /// + /// `validate_cast_and_convert_metadata` will panic if `self` describes a + /// DST whose trailing slice element is zero-sized. + /// + /// If `addr + bytes_len` overflows `usize`, + /// `validate_cast_and_convert_metadata` may panic, or it may return + /// incorrect results. No guarantees are made about when + /// `validate_cast_and_convert_metadata` will panic. The caller should not + /// rely on `validate_cast_and_convert_metadata` panicking in any particular + /// condition, even if `debug_assertions` are enabled. + #[allow(unused)] + #[inline(always)] + pub(crate) const fn validate_cast_and_convert_metadata( + &self, + addr: usize, + bytes_len: usize, + cast_type: CastType, + ) -> Result<(usize, usize), MetadataCastError> { + // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`. + macro_rules! __const_debug_assert { + ($e:expr $(, $msg:expr)?) => { + const_debug_assert!({ + #[allow(clippy::arithmetic_side_effects)] + let e = $e; + e + } $(, $msg)?); + }; + } + + // Note that, in practice, `self` is always a compile-time constant. We + // do this check earlier than needed to ensure that we always panic as a + // result of bugs in the program (such as calling this function on an + // invalid type) instead of allowing this panic to be hidden if the cast + // would have failed anyway for runtime reasons (such as a too-small + // memory region). + // + // FIXME(#67): Once our MSRV is 1.65, use let-else: + // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements + let size_info = match self.size_info.try_to_nonzero_elem_size() { + Some(size_info) => size_info, + None => const_panic!("attempted to cast to slice type with zero-sized element"), + }; + + // Precondition + __const_debug_assert!( + addr.checked_add(bytes_len).is_some(), + "`addr` + `bytes_len` > usize::MAX" + ); + + // Alignment checks go in their own block to avoid introducing variables + // into the top-level scope. + { + // We check alignment for `addr` (for prefix casts) or `addr + + // bytes_len` (for suffix casts). For a prefix cast, the correctness + // of this check is trivial - `addr` is the address the object will + // live at. + // + // For a suffix cast, we know that all valid sizes for the type are + // a multiple of the alignment (and by safety precondition, we know + // `DstLayout` may only describe valid Rust types). Thus, a + // validly-sized instance which lives at a validly-aligned address + // must also end at a validly-aligned address. Thus, if the end + // address for a suffix cast (`addr + bytes_len`) is not aligned, + // then no valid start address will be aligned either. + let offset = match cast_type { + CastType::Prefix => 0, + CastType::Suffix => bytes_len, + }; + + // Addition is guaranteed not to overflow because `offset <= + // bytes_len`, and `addr + bytes_len <= usize::MAX` is a + // precondition of this method. Modulus is guaranteed not to divide + // by 0 because `align` is non-zero. + #[allow(clippy::arithmetic_side_effects)] + if (addr + offset) % self.align.get() != 0 { + return Err(MetadataCastError::Alignment); + } + } + + let (elems, self_bytes) = match size_info { + SizeInfo::Sized { size } => { + if size > bytes_len { + return Err(MetadataCastError::Size); + } + (0, size) + } + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + // Calculate the maximum number of bytes that could be consumed + // - any number of bytes larger than this will either not be a + // multiple of the alignment, or will be larger than + // `bytes_len`. + let max_total_bytes = + util::round_down_to_next_multiple_of_alignment(bytes_len, self.align); + // Calculate the maximum number of bytes that could be consumed + // by the trailing slice. + // + // FIXME(#67): Once our MSRV is 1.65, use let-else: + // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements + let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) { + Some(max) => max, + // `bytes_len` too small even for 0 trailing slice elements. + None => return Err(MetadataCastError::Size), + }; + + // Calculate the number of elements that fit in + // `max_slice_and_padding_bytes`; any remaining bytes will be + // considered padding. + // + // Guaranteed not to divide by zero: `elem_size` is non-zero. + #[allow(clippy::arithmetic_side_effects)] + let elems = max_slice_and_padding_bytes / elem_size.get(); + // Guaranteed not to overflow on multiplication: `usize::MAX >= + // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes / + // elem_size) * elem_size`. + // + // Guaranteed not to overflow on addition: + // - max_slice_and_padding_bytes == max_total_bytes - offset + // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset + // - elems * elem_size + offset <= max_total_bytes <= usize::MAX + #[allow(clippy::arithmetic_side_effects)] + let without_padding = offset + elems * elem_size.get(); + // `self_bytes` is equal to the offset bytes plus the bytes + // consumed by the trailing slice plus any padding bytes + // required to satisfy the alignment. Note that we have computed + // the maximum number of trailing slice elements that could fit + // in `self_bytes`, so any padding is guaranteed to be less than + // the size of an extra element. + // + // Guaranteed not to overflow: + // - By previous comment: without_padding == elems * elem_size + + // offset <= max_total_bytes + // - By construction, `max_total_bytes` is a multiple of + // `self.align`. + // - At most, adding padding needed to round `without_padding` + // up to the next multiple of the alignment will bring + // `self_bytes` up to `max_total_bytes`. + #[allow(clippy::arithmetic_side_effects)] + let self_bytes = + without_padding + util::padding_needed_for(without_padding, self.align); + (elems, self_bytes) + } + }; + + __const_debug_assert!(self_bytes <= bytes_len); + + let split_at = match cast_type { + CastType::Prefix => self_bytes, + // Guaranteed not to underflow: + // - In the `Sized` branch, only returns `size` if `size <= + // bytes_len`. + // - In the `SliceDst` branch, calculates `self_bytes <= + // max_toatl_bytes`, which is upper-bounded by `bytes_len`. + #[allow(clippy::arithmetic_side_effects)] + CastType::Suffix => bytes_len - self_bytes, + }; + + Ok((elems, split_at)) + } +} + +pub(crate) use cast_from::CastFrom; +mod cast_from { + use crate::*; + + pub(crate) struct CastFrom { + _never: core::convert::Infallible, + _marker: PhantomData, + } + + // SAFETY: The implementation of `Project::project` preserves the address + // of the referent – it only modifies pointer metadata. + unsafe impl crate::pointer::cast::Cast for CastFrom + where + Src: KnownLayout + ?Sized, + Dst: KnownLayout + ?Sized, + { + } + + // SAFETY: The implementation of `Project::project` preserves the size of + // the referent (see inline comments for a more detailed proof of this). + unsafe impl crate::pointer::cast::CastExact for CastFrom + where + Src: KnownLayout + ?Sized, + Dst: KnownLayout + ?Sized, + { + } + + // SAFETY: `project` produces a pointer which refers to the same referent + // bytes as its input, or to a subset of them (see inline comments for a + // more detailed proof of this). It does this using provenance-preserving + // operations. + unsafe impl crate::pointer::cast::Project for CastFrom + where + Src: KnownLayout + ?Sized, + Dst: KnownLayout + ?Sized, + { + /// # PME + /// + /// Generates a post-monomorphization error if it is not possible to + /// implement soundly. + // + // FIXME(#1817): Support Sized->Unsized and Unsized->Sized casts + fn project(src: PtrInner<'_, Src>) -> *mut Dst { + /// The parameters required in order to perform a pointer cast from + /// `Src` to `Dst`. + /// + /// These are a compile-time function of the layouts of `Src` + /// and `Dst`. + /// + /// # Safety + /// + /// `Src`'s alignment must not be smaller than `Dst`'s alignment. + struct CastParams { + inner: CastParamsInner, + _src: PhantomData, + _dst: PhantomData, + } + + #[derive(Copy, Clone)] + enum CastParamsInner { + // At compile time (specifically, post-monomorphization time), + // we need to compute two things: + // - Whether, given *any* `*Src`, it is possible to construct a + // `*Dst` which addresses the same number of bytes (ie, + // whether, for any `Src` pointer metadata, there exists `Dst` + // pointer metadata that addresses the same number of bytes) + // - If this is possible, any information necessary to perform + // the `Src`->`Dst` metadata conversion at runtime. + // + // Assume that `Src` and `Dst` are slice DSTs, and define: + // - `S_OFF = Src::LAYOUT.size_info.offset` + // - `S_ELEM = Src::LAYOUT.size_info.elem_size` + // - `D_OFF = Dst::LAYOUT.size_info.offset` + // - `D_ELEM = Dst::LAYOUT.size_info.elem_size` + // + // We are trying to solve the following equation: + // + // D_OFF + d_meta * D_ELEM = S_OFF + s_meta * S_ELEM + // + // At runtime, we will be attempting to compute `d_meta`, given + // `s_meta` (a runtime value) and all other parameters (which + // are compile-time values). We can solve like so: + // + // D_OFF + d_meta * D_ELEM = S_OFF + s_meta * S_ELEM + // + // d_meta * D_ELEM = S_OFF - D_OFF + s_meta * S_ELEM + // + // d_meta = (S_OFF - D_OFF + s_meta * S_ELEM)/D_ELEM + // + // Since `d_meta` will be a `usize`, we need the right-hand side + // to be an integer, and this needs to hold for *any* value of + // `s_meta` (in order for our conversion to be infallible - ie, + // to not have to reject certain values of `s_meta` at runtime). + // This means that: + // + // - `s_meta * S_ELEM` must be a multiple of `D_ELEM` + // - Since this must hold for any value of `s_meta`, `S_ELEM` + // must be a multiple of `D_ELEM` + // - `S_OFF - D_OFF` must be a multiple of `D_ELEM` + // + // Thus, let `OFFSET_DELTA_ELEMS = (S_OFF - D_OFF)/D_ELEM` and + // `ELEM_MULTIPLE = S_ELEM/D_ELEM`. We can rewrite the above + // expression as: + // + // d_meta = (S_OFF - D_OFF + s_meta * S_ELEM)/D_ELEM + // + // d_meta = OFFSET_DELTA_ELEMS + s_meta * ELEM_MULTIPLE + // + // Thus, we just need to compute the following and confirm that + // they have integer solutions in order to both a) determine + // whether infallible `Src` -> `Dst` casts are possible and, b) + // pre-compute the parameters necessary to perform those casts + // at runtime. These parameters are encapsulated in + // `CastParams`, which acts as a witness that such infallible + // casts are possible. + /// The parameters required in order to perform an + /// unsized-to-unsized pointer cast from `Src` to `Dst` as + /// described above. + /// + /// # Safety + /// + /// `Src` and `Dst` must both be slice DSTs. + /// + /// `offset_delta_elems` and `elem_multiple` must be valid as + /// described above. + UnsizedToUnsized { offset_delta_elems: usize, elem_multiple: usize }, + + /// The metadata of a `Dst` which has the same size as `Src: + /// Sized`. + /// + /// # Safety + /// + /// `Src: Sized` and `Dst` must be a slice DST. + /// + /// A raw `Dst` pointer with metadata `dst_meta` must address + /// `size_of::()` bytes. + SizedToUnsized { dst_meta: usize }, + + /// The metadata of a `Dst` which has the same size as `Src: + /// Sized`. + /// + /// # Safety + /// + /// `Src` and `Dst` must both be `Sized` and `size_of::() + /// == size_of::()`. + SizedToSized, + } + + impl Copy for CastParams {} + impl Clone for CastParams { + fn clone(&self) -> Self { + *self + } + } + + impl CastParams { + const fn try_compute( + src: &DstLayout, + dst: &DstLayout, + ) -> Option> { + if src.align.get() < dst.align.get() { + return None; + } + + let inner = match (src.size_info, dst.size_info) { + ( + SizeInfo::Sized { size: src_size }, + SizeInfo::Sized { size: dst_size }, + ) => { + if src_size != dst_size { + return None; + } + + // SAFETY: We checked above that `src_size == + // dst_size`. + CastParamsInner::SizedToSized + } + (SizeInfo::Sized { size: src_size }, SizeInfo::SliceDst(dst)) => { + let offset_delta = if let Some(od) = src_size.checked_sub(dst.offset) { + od + } else { + return None; + }; + + let dst_elem_size = if let Some(e) = NonZeroUsize::new(dst.elem_size) { + e + } else { + return None; + }; + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let delta_mod_other_elem = offset_delta % dst_elem_size.get(); + + if delta_mod_other_elem != 0 { + return None; + } + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let dst_meta = offset_delta / dst_elem_size.get(); + + // SAFETY: The preceding math ensures that a `Dst` + // with `dst_meta` addresses `src_size` bytes. + CastParamsInner::SizedToUnsized { dst_meta } + } + (SizeInfo::SliceDst(src), SizeInfo::SliceDst(dst)) => { + let offset_delta = if let Some(od) = src.offset.checked_sub(dst.offset) + { + od + } else { + return None; + }; + + let dst_elem_size = if let Some(e) = NonZeroUsize::new(dst.elem_size) { + e + } else { + return None; + }; + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let delta_mod_other_elem = offset_delta % dst_elem_size.get(); + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let elem_remainder = src.elem_size % dst_elem_size.get(); + + if delta_mod_other_elem != 0 + || src.elem_size < dst.elem_size + || elem_remainder != 0 + { + return None; + } + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let offset_delta_elems = offset_delta / dst_elem_size.get(); + + // PANICS: `dst_elem_size: NonZeroUsize`, so this won't + // divide by zero. + #[allow(clippy::arithmetic_side_effects)] + let elem_multiple = src.elem_size / dst_elem_size.get(); + + CastParamsInner::UnsizedToUnsized { + // SAFETY: We checked above that this is an exact ratio. + offset_delta_elems, + // SAFETY: We checked above that this is an exact ratio. + elem_multiple, + } + } + _ => return None, + }; + + // SAFETY: We checked above that `src.align >= dst.align`. + Some(CastParams { inner, _src: PhantomData, _dst: PhantomData }) + } + } + + impl CastParams { + /// # Safety + /// + /// `src_meta` describes a `Src` whose size is no larger than + /// `isize::MAX`. + /// + /// The returned metadata describes a `Dst` of the same size as + /// the original `Src`. + #[inline(always)] + unsafe fn cast_metadata( + self, + src_meta: Src::PointerMetadata, + ) -> Dst::PointerMetadata { + #[allow(unused)] + use crate::util::polyfills::*; + + let dst_meta = match self.inner { + CastParamsInner::UnsizedToUnsized { offset_delta_elems, elem_multiple } => { + let src_meta = src_meta.to_elem_count(); + #[allow( + unstable_name_collisions, + clippy::multiple_unsafe_ops_per_block + )] + // SAFETY: `self` is a witness that the following + // equation holds: + // + // D_OFF + d_meta * D_ELEM = S_OFF + s_meta * S_ELEM + // + // Since the caller promises that `src_meta` is + // valid `Src` metadata, this math will not + // overflow, and the returned value will describe a + // `Dst` of the same size. + unsafe { + offset_delta_elems + .unchecked_add(src_meta.unchecked_mul(elem_multiple)) + } + } + CastParamsInner::SizedToUnsized { dst_meta } => dst_meta, + CastParamsInner::SizedToSized => 0, + }; + Dst::PointerMetadata::from_elem_count(dst_meta) + } + } + + trait Params { + const CAST_PARAMS: CastParams; + } + + impl Params for Dst + where + Src: KnownLayout + ?Sized, + Dst: KnownLayout + ?Sized, + { + const CAST_PARAMS: CastParams = + match CastParams::try_compute(&Src::LAYOUT, &Dst::LAYOUT) { + Some(params) => params, + None => const_panic!( + "cannot `transmute_ref!` or `transmute_mut!` between incompatible types" + ), + }; + } + + let src_meta = ::pointer_to_metadata(src.as_ptr()); + let params = >::CAST_PARAMS; + + // SAFETY: `src: PtrInner` guarantees that `src`'s referent is zero + // bytes or lives in a single allocation, which means that it is no + // larger than `isize::MAX` bytes [1]. + // + // [1] https://doc.rust-lang.org/1.92.0/std/ptr/index.html#allocation + let dst_meta = unsafe { params.cast_metadata(src_meta) }; + + ::raw_from_ptr_len(src.as_non_null().cast(), dst_meta).as_ptr() + } + } +} + +// FIXME(#67): For some reason, on our MSRV toolchain, this `allow` isn't +// enforced despite having `#![allow(unknown_lints)]` at the crate root, but +// putting it here works. Once our MSRV is high enough that this bug has been +// fixed, remove this `allow`. +#[allow(unknown_lints)] +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dst_layout_for_slice() { + let layout = DstLayout::for_slice::(); + match layout.size_info { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + assert_eq!(offset, 0); + assert_eq!(elem_size, 4); + } + _ => panic!("Expected SliceDst"), + } + assert_eq!(layout.align.get(), 4); + } + + /// Tests of when a sized `DstLayout` is extended with a sized field. + #[allow(clippy::decimal_literal_representation)] + #[test] + fn test_dst_layout_extend_sized_with_sized() { + // This macro constructs a layout corresponding to a `u8` and extends it + // with a zero-sized trailing field of given alignment `n`. The macro + // tests that the resulting layout has both size and alignment `min(n, + // P)` for all valid values of `repr(packed(P))`. + macro_rules! test_align_is_size { + ($n:expr) => { + let base = DstLayout::for_type::(); + let trailing_field = DstLayout::for_type::>(); + + let packs = + core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p)))); + + for pack in packs { + let composite = base.extend(trailing_field, pack); + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN); + let align = $n.min(max_align.get()); + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::Sized { size: align }, + statically_shallow_unpadded: false, + } + ) + } + }; + } + + test_align_is_size!(1); + test_align_is_size!(2); + test_align_is_size!(4); + test_align_is_size!(8); + test_align_is_size!(16); + test_align_is_size!(32); + test_align_is_size!(64); + test_align_is_size!(128); + test_align_is_size!(256); + test_align_is_size!(512); + test_align_is_size!(1024); + test_align_is_size!(2048); + test_align_is_size!(4096); + test_align_is_size!(8192); + test_align_is_size!(16384); + test_align_is_size!(32768); + test_align_is_size!(65536); + test_align_is_size!(131072); + test_align_is_size!(262144); + test_align_is_size!(524288); + test_align_is_size!(1048576); + test_align_is_size!(2097152); + test_align_is_size!(4194304); + test_align_is_size!(8388608); + test_align_is_size!(16777216); + test_align_is_size!(33554432); + test_align_is_size!(67108864); + test_align_is_size!(33554432); + test_align_is_size!(134217728); + test_align_is_size!(268435456); + } + + /// Tests of when a sized `DstLayout` is extended with a DST field. + #[test] + fn test_dst_layout_extend_sized_with_dst() { + // Test that for all combinations of real-world alignments and + // `repr_packed` values, that the extension of a sized `DstLayout`` with + // a DST field correctly computes the trailing offset in the composite + // layout. + + let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()); + let packs = core::iter::once(None).chain(aligns.clone().map(Some)); + + for align in aligns { + for pack in packs.clone() { + let base = DstLayout::for_type::(); + let elem_size = 42; + let trailing_field_offset = 11; + + let trailing_field = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { elem_size, offset: 11 }), + statically_shallow_unpadded: false, + }; + + let composite = base.extend(trailing_field, pack); + + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get(); + + let align = align.get().min(max_align); + + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + elem_size, + offset: align + trailing_field_offset, + }), + statically_shallow_unpadded: false, + } + ) + } + } + } + + /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the + /// expected amount of trailing padding. + #[test] + fn test_dst_layout_pad_to_align_with_sized() { + // For all valid alignments `align`, construct a one-byte layout aligned + // to `align`, call `pad_to_align`, and assert that the size of the + // resulting layout is equal to `align`. + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + let layout = DstLayout { + align, + size_info: SizeInfo::Sized { size: 1 }, + statically_shallow_unpadded: true, + }; + + assert_eq!( + layout.pad_to_align(), + DstLayout { + align, + size_info: SizeInfo::Sized { size: align.get() }, + statically_shallow_unpadded: align.get() == 1 + } + ); + } + + // Test explicitly-provided combinations of unpadded and padded + // counterparts. + + macro_rules! test { + (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr } + => padded { size: $padded_size:expr, align: $padded_align:expr }) => { + let unpadded = DstLayout { + align: NonZeroUsize::new($unpadded_align).unwrap(), + size_info: SizeInfo::Sized { size: $unpadded_size }, + statically_shallow_unpadded: false, + }; + let padded = unpadded.pad_to_align(); + + assert_eq!( + padded, + DstLayout { + align: NonZeroUsize::new($padded_align).unwrap(), + size_info: SizeInfo::Sized { size: $padded_size }, + statically_shallow_unpadded: false, + } + ); + }; + } + + test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 }); + test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 }); + + let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get(); + + test!(unpadded { size: 1, align: current_max_align } + => padded { size: current_max_align, align: current_max_align }); + + test!(unpadded { size: current_max_align + 1, align: current_max_align } + => padded { size: current_max_align * 2, align: current_max_align }); + } + + /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op. + #[test] + fn test_dst_layout_pad_to_align_with_dst() { + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + for offset in 0..10 { + for elem_size in 0..10 { + let layout = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), + statically_shallow_unpadded: false, + }; + assert_eq!(layout.pad_to_align(), layout); + } + } + } + } + + // This test takes a long time when running under Miri, so we skip it in + // that case. This is acceptable because this is a logic test that doesn't + // attempt to expose UB. + #[test] + #[cfg_attr(miri, ignore)] + fn test_validate_cast_and_convert_metadata() { + #[allow(non_local_definitions)] + impl From for SizeInfo { + fn from(size: usize) -> SizeInfo { + SizeInfo::Sized { size } + } + } + + #[allow(non_local_definitions)] + impl From<(usize, usize)> for SizeInfo { + fn from((offset, elem_size): (usize, usize)) -> SizeInfo { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) + } + } + + fn layout>(s: S, align: usize) -> DstLayout { + DstLayout { + size_info: s.into(), + align: NonZeroUsize::new(align).unwrap(), + statically_shallow_unpadded: false, + } + } + + /// This macro accepts arguments in the form of: + /// + /// layout(_, _).validate(_, _, _), Ok(Some((_, _))) + /// | | | | | | | + /// size ---------+ | | | | | | + /// align -----------+ | | | | | + /// addr ------------------------+ | | | | + /// bytes_len ----------------------+ | | | + /// cast_type -------------------------+ | | + /// elems ------------------------------------------+ | + /// split_at ------------------------------------------+ + /// + /// `.validate` is shorthand for `.validate_cast_and_convert_metadata` + /// for brevity. + /// + /// Each argument can either be an iterator or a wildcard. Each + /// wildcarded variable is implicitly replaced by an iterator over a + /// representative sample of values for that variable. Each `test!` + /// invocation iterates over every combination of values provided by + /// each variable's iterator (ie, the cartesian product) and validates + /// that the results are expected. + /// + /// The final argument uses the same syntax, but it has a different + /// meaning: + /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to + /// a matching assert to validate the computed result for each + /// combination of input values. + /// - If it is `Err(Some(msg) | None)`, then `test!` validates that the + /// call to `validate_cast_and_convert_metadata` panics with the given + /// panic message or, if the current Rust toolchain version is too + /// early to support panicking in `const fn`s, panics with *some* + /// message. In the latter case, the `const_panic!` macro is used, + /// which emits code which causes a non-panicking error at const eval + /// time, but which does panic when invoked at runtime. Thus, it is + /// merely difficult to predict the *value* of this panic. We deem + /// that testing against the real panic strings on stable and nightly + /// toolchains is enough to ensure correctness. + /// + /// Note that the meta-variables that match these variables have the + /// `tt` type, and some valid expressions are not valid `tt`s (such as + /// `a..b`). In this case, wrap the expression in parentheses, and it + /// will become valid `tt`. + macro_rules! test { + ( + layout($size:tt, $align:tt) + .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)? + ) => { + itertools::iproduct!( + test!(@generate_size $size), + test!(@generate_align $align), + test!(@generate_usize $addr), + test!(@generate_usize $bytes_len), + test!(@generate_cast_type $cast_type) + ).for_each(|(size_info, align, addr, bytes_len, cast_type)| { + // Temporarily disable the panic hook installed by the test + // harness. If we don't do this, all panic messages will be + // kept in an internal log. On its own, this isn't a + // problem, but if a non-caught panic ever happens (ie, in + // code later in this test not in this macro), all of the + // previously-buffered messages will be dumped, hiding the + // real culprit. + let previous_hook = std::panic::take_hook(); + // I don't understand why, but this seems to be required in + // addition to the previous line. + std::panic::set_hook(Box::new(|_| {})); + let actual = std::panic::catch_unwind(|| { + layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + }).map_err(|d| { + let msg = d.downcast::<&'static str>().ok().map(|s| *s.as_ref()); + assert!(msg.is_some() || cfg!(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0), "non-string panic messages are not permitted when usage of panic in const fn is enabled"); + msg + }); + std::panic::set_hook(previous_hook); + + assert!( + matches!(actual, $expect), + "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?})" ,size_info, align, addr, bytes_len, cast_type + ); + }); + }; + (@generate_usize _) => { 0..8 }; + // Generate sizes for both Sized and !Sized types. + (@generate_size _) => { + test!(@generate_size (_)).chain(test!(@generate_size (_, _))) + }; + // Generate sizes for both Sized and !Sized types by chaining + // specified iterators for each. + (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => { + test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes)) + }; + // Generate sizes for Sized types. + (@generate_size (_)) => { test!(@generate_size (0..8)) }; + (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::::into) }; + // Generate sizes for !Sized types. + (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => { + itertools::iproduct!( + test!(@generate_min_size $min_sizes), + test!(@generate_elem_size $elem_sizes) + ).map(Into::::into) + }; + (@generate_fixed_size _) => { (0..8).into_iter().map(Into::::into) }; + (@generate_min_size _) => { 0..8 }; + (@generate_elem_size _) => { 1..8 }; + (@generate_align _) => { [1, 2, 4, 8, 16] }; + (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) }; + (@generate_cast_type _) => { [CastType::Prefix, CastType::Suffix] }; + (@generate_cast_type $variant:ident) => { [CastType::$variant] }; + // Some expressions need to be wrapped in parentheses in order to be + // valid `tt`s (required by the top match pattern). See the comment + // below for more details. This arm removes these parentheses to + // avoid generating an `unused_parens` warning. + (@$_:ident ($vals:expr)) => { $vals }; + (@$_:ident $vals:expr) => { $vals }; + } + + const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14]; + const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15]; + + // base_size is too big for the memory region. + test!( + layout(((1..8) | ((1..8), (1..8))), _).validate([0], [0], _), + Ok(Err(MetadataCastError::Size)) + ); + test!( + layout(((2..8) | ((2..8), (2..8))), _).validate([0], [1], Prefix), + Ok(Err(MetadataCastError::Size)) + ); + test!( + layout(((2..8) | ((2..8), (2..8))), _).validate([0x1000_0000 - 1], [1], Suffix), + Ok(Err(MetadataCastError::Size)) + ); + + // addr is unaligned for prefix cast + test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); + test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); + + // addr is aligned, but end of buffer is unaligned for suffix cast + test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); + test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); + + // Unfortunately, these constants cannot easily be used in the + // implementation of `validate_cast_and_convert_metadata`, since + // `panic!` consumes a string literal, not an expression. + // + // It's important that these messages be in a separate module. If they + // were at the function's top level, we'd pass them to `test!` as, e.g., + // `Err(TRAILING)`, which would run into a subtle Rust footgun - the + // `TRAILING` identifier would be treated as a pattern to match rather + // than a value to check for equality. + mod msgs { + pub(super) const TRAILING: &str = + "attempted to cast to slice type with zero-sized element"; + pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX"; + } + + // casts with ZST trailing element types are unsupported + test!(layout((_, [0]), _).validate(_, _, _), Err(Some(msgs::TRAILING) | None),); + + // addr + bytes_len must not overflow usize + test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(Some(msgs::OVERFLOW) | None)); + test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(Some(msgs::OVERFLOW) | None)); + test!( + layout(_, _).validate( + [usize::MAX / 2 + 1, usize::MAX], + [usize::MAX / 2 + 1, usize::MAX], + _ + ), + Err(Some(msgs::OVERFLOW) | None) + ); + + // Validates that `validate_cast_and_convert_metadata` satisfies its own + // documented safety postconditions, and also a few other properties + // that aren't documented but we want to guarantee anyway. + fn validate_behavior( + (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, CastType), + ) { + if let Ok((elems, split_at)) = + layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + { + let (size_info, align) = (layout.size_info, layout.align); + let debug_str = format!( + "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?}) => ({}, {})", + size_info, align, addr, bytes_len, cast_type, elems, split_at + ); + + // If this is a sized type (no trailing slice), then `elems` is + // meaningless, but in practice we set it to 0. Callers are not + // allowed to rely on this, but a lot of math is nicer if + // they're able to, and some callers might accidentally do that. + let sized = matches!(layout.size_info, SizeInfo::Sized { .. }); + assert!(!(sized && elems != 0), "{}", debug_str); + + let resulting_size = match layout.size_info { + SizeInfo::Sized { size } => size, + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + let padded_size = |elems| { + let without_padding = offset + elems * elem_size; + without_padding + util::padding_needed_for(without_padding, align) + }; + + let resulting_size = padded_size(elems); + // Test that `validate_cast_and_convert_metadata` + // computed the largest possible value that fits in the + // given range. + assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str); + resulting_size + } + }; + + // Test safety postconditions guaranteed by + // `validate_cast_and_convert_metadata`. + assert!(resulting_size <= bytes_len, "{}", debug_str); + match cast_type { + CastType::Prefix => { + assert_eq!(addr % align, 0, "{}", debug_str); + assert_eq!(resulting_size, split_at, "{}", debug_str); + } + CastType::Suffix => { + assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str); + assert_eq!((addr + split_at) % align, 0, "{}", debug_str); + } + } + } else { + let min_size = match layout.size_info { + SizeInfo::Sized { size } => size, + SizeInfo::SliceDst(TrailingSliceLayout { offset, .. }) => { + offset + util::padding_needed_for(offset, layout.align) + } + }; + + // If a cast is invalid, it is either because... + // 1. there are insufficient bytes at the given region for type: + let insufficient_bytes = bytes_len < min_size; + // 2. performing the cast would misalign type: + let base = match cast_type { + CastType::Prefix => 0, + CastType::Suffix => bytes_len, + }; + let misaligned = (base + addr) % layout.align != 0; + + assert!(insufficient_bytes || misaligned); + } + } + + let sizes = 0..8; + let elem_sizes = 1..8; + let size_infos = sizes + .clone() + .map(Into::::into) + .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::::into)); + let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32]) + .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { size } if size % align != 0)) + .map(|(size_info, align)| layout(size_info, align)); + itertools::iproduct!(layouts, 0..8, 0..8, [CastType::Prefix, CastType::Suffix]) + .for_each(validate_behavior); + } + + #[test] + #[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] + fn test_validate_rust_layout() { + use core::{ + convert::TryInto as _, + ptr::{self, NonNull}, + }; + + use crate::util::testutil::*; + + // This test synthesizes pointers with various metadata and uses Rust's + // built-in APIs to confirm that Rust makes decisions about type layout + // which are consistent with what we believe is guaranteed by the + // language. If this test fails, it doesn't just mean our code is wrong + // - it means we're misunderstanding the language's guarantees. + + #[derive(Debug)] + struct MacroArgs { + offset: usize, + align: NonZeroUsize, + elem_size: Option, + } + + /// # Safety + /// + /// `test` promises to only call `addr_of_slice_field` on a `NonNull` + /// which points to a valid `T`. + /// + /// `with_elems` must produce a pointer which points to a valid `T`. + fn test NonNull>( + args: MacroArgs, + with_elems: W, + addr_of_slice_field: Option) -> NonNull>, + ) { + let dst = args.elem_size.is_some(); + let layout = { + let size_info = match args.elem_size { + Some(elem_size) => { + SizeInfo::SliceDst(TrailingSliceLayout { offset: args.offset, elem_size }) + } + None => SizeInfo::Sized { + // Rust only supports types whose sizes are a multiple + // of their alignment. If the macro created a type like + // this: + // + // #[repr(C, align(2))] + // struct Foo([u8; 1]); + // + // ...then Rust will automatically round the type's size + // up to 2. + size: args.offset + util::padding_needed_for(args.offset, args.align), + }, + }; + DstLayout { size_info, align: args.align, statically_shallow_unpadded: false } + }; + + for elems in 0..128 { + let ptr = with_elems(elems); + + if let Some(addr_of_slice_field) = addr_of_slice_field { + let slc_field_ptr = addr_of_slice_field(ptr).as_ptr(); + // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to + // the same valid Rust object. + // Work around https://github.com/rust-lang/rust-clippy/issues/12280 + let offset: usize = + unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() }; + assert_eq!(offset, args.offset); + } + + // SAFETY: `ptr` points to a valid `T`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + let (size, align) = unsafe { + (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr())) + }; + + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!("\n{:?}\nsize:{}, align:{}", args, size, align) + } else { + String::new() + }; + + let without_padding = + args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0); + assert!(size >= without_padding, "{}", assert_msg); + assert_eq!(align, args.align.get(), "{}", assert_msg); + + // This encodes the most important part of the test: our + // understanding of how Rust determines the layout of repr(C) + // types. Sized repr(C) types are trivial, but DST types have + // some subtlety. Note that: + // - For sized types, `without_padding` is just the size of the + // type that we constructed for `Foo`. Since we may have + // requested a larger alignment, `Foo` may actually be larger + // than this, hence `padding_needed_for`. + // - For unsized types, `without_padding` is dynamically + // computed from the offset, the element size, and element + // count. We expect that the size of the object should be + // `offset + elem_size * elems` rounded up to the next + // alignment. + let expected_size = + without_padding + util::padding_needed_for(without_padding, args.align); + assert_eq!(expected_size, size, "{}", assert_msg); + + // For zero-sized element types, + // `validate_cast_and_convert_metadata` just panics, so we skip + // testing those types. + if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) { + let addr = ptr.addr().get(); + let (got_elems, got_split_at) = layout + .validate_cast_and_convert_metadata(addr, size, CastType::Prefix) + .unwrap(); + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!( + "{}\nvalidate_cast_and_convert_metadata({}, {})", + assert_msg, addr, size, + ) + } else { + String::new() + }; + assert_eq!(got_split_at, size, "{}", assert_msg); + if dst { + assert!(got_elems >= elems, "{}", assert_msg); + if got_elems != elems { + // If `validate_cast_and_convert_metadata` + // returned more elements than `elems`, that + // means that `elems` is not the maximum number + // of elements that can fit in `size` - in other + // words, there is enough padding at the end of + // the value to fit at least one more element. + // If we use this metadata to synthesize a + // pointer, despite having a different element + // count, we still expect it to have the same + // size. + let got_ptr = with_elems(got_elems); + // SAFETY: `got_ptr` is a pointer to a valid `T`. + let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) }; + assert_eq!(size_of_got_ptr, size, "{}", assert_msg); + } + } else { + // For sized casts, the returned element value is + // technically meaningless, and we don't guarantee any + // particular value. In practice, it's always zero. + assert_eq!(got_elems, 0, "{}", assert_msg) + } + } + } + } + + macro_rules! validate_against_rust { + ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{ + #[repr(C, align($align))] + struct Foo([u8; $offset]$(, [[u8; $elem_size]])?); + + let args = MacroArgs { + offset: $offset, + align: $align.try_into().unwrap(), + elem_size: { + #[allow(unused)] + let ret = None::; + $(let ret = Some($elem_size);)? + ret + } + }; + + #[repr(C, align($align))] + struct FooAlign; + // Create an aligned buffer to use in order to synthesize + // pointers to `Foo`. We don't ever load values from these + // pointers - we just do arithmetic on them - so having a "real" + // block of memory as opposed to a validly-aligned-but-dangling + // pointer is only necessary to make Miri happy since we run it + // with "strict provenance" checking enabled. + let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]); + let with_elems = |elems| { + let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems); + #[allow(clippy::as_conversions)] + NonNull::new(slc.as_ptr() as *mut Foo).unwrap() + }; + let addr_of_slice_field = { + #[allow(unused)] + let f = None::) -> NonNull>; + $( + // SAFETY: `test` promises to only call `f` with a `ptr` + // to a valid `Foo`. + let f: Option) -> NonNull> = Some(|ptr: NonNull| unsafe { + NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::() + }); + let _ = $elem_size; + )? + f + }; + + test::(args, with_elems, addr_of_slice_field); + }}; + } + + // Every permutation of: + // - offset in [0, 4] + // - align in [1, 16] + // - elem_size in [0, 4] (plus no elem_size) + validate_against_rust!(0, 1); + validate_against_rust!(0, 1, 0); + validate_against_rust!(0, 1, 1); + validate_against_rust!(0, 1, 2); + validate_against_rust!(0, 1, 3); + validate_against_rust!(0, 1, 4); + validate_against_rust!(0, 2); + validate_against_rust!(0, 2, 0); + validate_against_rust!(0, 2, 1); + validate_against_rust!(0, 2, 2); + validate_against_rust!(0, 2, 3); + validate_against_rust!(0, 2, 4); + validate_against_rust!(0, 4); + validate_against_rust!(0, 4, 0); + validate_against_rust!(0, 4, 1); + validate_against_rust!(0, 4, 2); + validate_against_rust!(0, 4, 3); + validate_against_rust!(0, 4, 4); + validate_against_rust!(0, 8); + validate_against_rust!(0, 8, 0); + validate_against_rust!(0, 8, 1); + validate_against_rust!(0, 8, 2); + validate_against_rust!(0, 8, 3); + validate_against_rust!(0, 8, 4); + validate_against_rust!(0, 16); + validate_against_rust!(0, 16, 0); + validate_against_rust!(0, 16, 1); + validate_against_rust!(0, 16, 2); + validate_against_rust!(0, 16, 3); + validate_against_rust!(0, 16, 4); + validate_against_rust!(1, 1); + validate_against_rust!(1, 1, 0); + validate_against_rust!(1, 1, 1); + validate_against_rust!(1, 1, 2); + validate_against_rust!(1, 1, 3); + validate_against_rust!(1, 1, 4); + validate_against_rust!(1, 2); + validate_against_rust!(1, 2, 0); + validate_against_rust!(1, 2, 1); + validate_against_rust!(1, 2, 2); + validate_against_rust!(1, 2, 3); + validate_against_rust!(1, 2, 4); + validate_against_rust!(1, 4); + validate_against_rust!(1, 4, 0); + validate_against_rust!(1, 4, 1); + validate_against_rust!(1, 4, 2); + validate_against_rust!(1, 4, 3); + validate_against_rust!(1, 4, 4); + validate_against_rust!(1, 8); + validate_against_rust!(1, 8, 0); + validate_against_rust!(1, 8, 1); + validate_against_rust!(1, 8, 2); + validate_against_rust!(1, 8, 3); + validate_against_rust!(1, 8, 4); + validate_against_rust!(1, 16); + validate_against_rust!(1, 16, 0); + validate_against_rust!(1, 16, 1); + validate_against_rust!(1, 16, 2); + validate_against_rust!(1, 16, 3); + validate_against_rust!(1, 16, 4); + validate_against_rust!(2, 1); + validate_against_rust!(2, 1, 0); + validate_against_rust!(2, 1, 1); + validate_against_rust!(2, 1, 2); + validate_against_rust!(2, 1, 3); + validate_against_rust!(2, 1, 4); + validate_against_rust!(2, 2); + validate_against_rust!(2, 2, 0); + validate_against_rust!(2, 2, 1); + validate_against_rust!(2, 2, 2); + validate_against_rust!(2, 2, 3); + validate_against_rust!(2, 2, 4); + validate_against_rust!(2, 4); + validate_against_rust!(2, 4, 0); + validate_against_rust!(2, 4, 1); + validate_against_rust!(2, 4, 2); + validate_against_rust!(2, 4, 3); + validate_against_rust!(2, 4, 4); + validate_against_rust!(2, 8); + validate_against_rust!(2, 8, 0); + validate_against_rust!(2, 8, 1); + validate_against_rust!(2, 8, 2); + validate_against_rust!(2, 8, 3); + validate_against_rust!(2, 8, 4); + validate_against_rust!(2, 16); + validate_against_rust!(2, 16, 0); + validate_against_rust!(2, 16, 1); + validate_against_rust!(2, 16, 2); + validate_against_rust!(2, 16, 3); + validate_against_rust!(2, 16, 4); + validate_against_rust!(3, 1); + validate_against_rust!(3, 1, 0); + validate_against_rust!(3, 1, 1); + validate_against_rust!(3, 1, 2); + validate_against_rust!(3, 1, 3); + validate_against_rust!(3, 1, 4); + validate_against_rust!(3, 2); + validate_against_rust!(3, 2, 0); + validate_against_rust!(3, 2, 1); + validate_against_rust!(3, 2, 2); + validate_against_rust!(3, 2, 3); + validate_against_rust!(3, 2, 4); + validate_against_rust!(3, 4); + validate_against_rust!(3, 4, 0); + validate_against_rust!(3, 4, 1); + validate_against_rust!(3, 4, 2); + validate_against_rust!(3, 4, 3); + validate_against_rust!(3, 4, 4); + validate_against_rust!(3, 8); + validate_against_rust!(3, 8, 0); + validate_against_rust!(3, 8, 1); + validate_against_rust!(3, 8, 2); + validate_against_rust!(3, 8, 3); + validate_against_rust!(3, 8, 4); + validate_against_rust!(3, 16); + validate_against_rust!(3, 16, 0); + validate_against_rust!(3, 16, 1); + validate_against_rust!(3, 16, 2); + validate_against_rust!(3, 16, 3); + validate_against_rust!(3, 16, 4); + validate_against_rust!(4, 1); + validate_against_rust!(4, 1, 0); + validate_against_rust!(4, 1, 1); + validate_against_rust!(4, 1, 2); + validate_against_rust!(4, 1, 3); + validate_against_rust!(4, 1, 4); + validate_against_rust!(4, 2); + validate_against_rust!(4, 2, 0); + validate_against_rust!(4, 2, 1); + validate_against_rust!(4, 2, 2); + validate_against_rust!(4, 2, 3); + validate_against_rust!(4, 2, 4); + validate_against_rust!(4, 4); + validate_against_rust!(4, 4, 0); + validate_against_rust!(4, 4, 1); + validate_against_rust!(4, 4, 2); + validate_against_rust!(4, 4, 3); + validate_against_rust!(4, 4, 4); + validate_against_rust!(4, 8); + validate_against_rust!(4, 8, 0); + validate_against_rust!(4, 8, 1); + validate_against_rust!(4, 8, 2); + validate_against_rust!(4, 8, 3); + validate_against_rust!(4, 8, 4); + validate_against_rust!(4, 16); + validate_against_rust!(4, 16, 0); + validate_against_rust!(4, 16, 1); + validate_against_rust!(4, 16, 2); + validate_against_rust!(4, 16, 3); + validate_against_rust!(4, 16, 4); + } +} + +#[cfg(kani)] +mod proofs { + use core::alloc::Layout; + + use super::*; + + impl kani::Arbitrary for DstLayout { + fn any() -> Self { + let align: NonZeroUsize = kani::any(); + let size_info: SizeInfo = kani::any(); + + kani::assume(align.is_power_of_two()); + kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN); + + // For testing purposes, we most care about instantiations of + // `DstLayout` that can correspond to actual Rust types. We use + // `Layout` to verify that our `DstLayout` satisfies the validity + // conditions of Rust layouts. + kani::assume( + match size_info { + SizeInfo::Sized { size } => Layout::from_size_align(size, align.get()), + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size: _ }) => { + // `SliceDst` cannot encode an exact size, but we know + // it is at least `offset` bytes. + Layout::from_size_align(offset, align.get()) + } + } + .is_ok(), + ); + + Self { align: align, size_info: size_info, statically_shallow_unpadded: kani::any() } + } + } + + impl kani::Arbitrary for SizeInfo { + fn any() -> Self { + let is_sized: bool = kani::any(); + + match is_sized { + true => { + let size: usize = kani::any(); + + kani::assume(size <= isize::MAX as _); + + SizeInfo::Sized { size } + } + false => SizeInfo::SliceDst(kani::any()), + } + } + } + + impl kani::Arbitrary for TrailingSliceLayout { + fn any() -> Self { + let elem_size: usize = kani::any(); + let offset: usize = kani::any(); + + kani::assume(elem_size < isize::MAX as _); + kani::assume(offset < isize::MAX as _); + + TrailingSliceLayout { elem_size, offset } + } + } + + #[kani::proof] + fn prove_requires_dynamic_padding() { + let layout: DstLayout = kani::any(); + + let SizeInfo::SliceDst(size_info) = layout.size_info else { + kani::assume(false); + loop {} + }; + + let meta: usize = kani::any(); + + let Some(trailing_slice_size) = size_info.elem_size.checked_mul(meta) else { + // The `trailing_slice_size` exceeds `usize::MAX`; `meta` is invalid. + kani::assume(false); + loop {} + }; + + let Some(unpadded_size) = size_info.offset.checked_add(trailing_slice_size) else { + // The `unpadded_size` exceeds `usize::MAX`; `meta`` is invalid. + kani::assume(false); + loop {} + }; + + if unpadded_size >= isize::MAX as usize { + // The `unpadded_size` exceeds `isize::MAX`; `meta` is invalid. + kani::assume(false); + loop {} + } + + let trailing_padding = util::padding_needed_for(unpadded_size, layout.align); + + if !layout.requires_dynamic_padding() { + assert!(trailing_padding == 0); + } + } + + #[kani::proof] + fn prove_dst_layout_extend() { + use crate::util::{max, min, padding_needed_for}; + + let base: DstLayout = kani::any(); + let field: DstLayout = kani::any(); + let packed: Option = kani::any(); + + if let Some(max_align) = packed { + kani::assume(max_align.is_power_of_two()); + kani::assume(base.align <= max_align); + } + + // The base can only be extended if it's sized. + kani::assume(matches!(base.size_info, SizeInfo::Sized { .. })); + let base_size = if let SizeInfo::Sized { size } = base.size_info { + size + } else { + unreachable!(); + }; + + // Under the above conditions, `DstLayout::extend` will not panic. + let composite = base.extend(field, packed); + + // The field's alignment is clamped by `max_align` (i.e., the + // `packed` attribute, if any) [1]. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // The alignments of each field, for the purpose of positioning + // fields, is the smaller of the specified alignment and the + // alignment of the field's type. + let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN)); + + // The struct's alignment is the maximum of its previous alignment and + // `field_align`. + assert_eq!(composite.align, max(base.align, field_align)); + + // Compute the minimum amount of inter-field padding needed to + // satisfy the field's alignment, and offset of the trailing field. + // [1] + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // Inter-field padding is guaranteed to be the minimum required in + // order to satisfy each field's (possibly altered) alignment. + let padding = padding_needed_for(base_size, field_align); + let offset = base_size + padding; + + // For testing purposes, we'll also construct `alloc::Layout` + // stand-ins for `DstLayout`, and show that `extend` behaves + // comparably on both types. + let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap(); + + match field.size_info { + SizeInfo::Sized { size: field_size } => { + if let SizeInfo::Sized { size: composite_size } = composite.size_info { + // If the trailing field is sized, the resulting layout will + // be sized. Its size will be the sum of the preceding + // layout, the size of the new field, and the size of + // inter-field padding between the two. + assert_eq!(composite_size, offset + field_size); + + let field_analog = + Layout::from_size_align(field_size, field_align.get()).unwrap(); + + if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) + { + assert_eq!(actual_offset, offset); + assert_eq!(actual_composite.size(), composite_size); + assert_eq!(actual_composite.align(), composite.align.get()); + } else { + // An error here reflects that composite of `base` + // and `field` cannot correspond to a real Rust type + // fragment, because such a fragment would violate + // the basic invariants of a valid Rust layout. At + // the time of writing, `DstLayout` is a little more + // permissive than `Layout`, so we don't assert + // anything in this branch (e.g., unreachability). + } + } else { + panic!("The composite of two sized layouts must be sized.") + } + } + SizeInfo::SliceDst(TrailingSliceLayout { + offset: field_offset, + elem_size: field_elem_size, + }) => { + if let SizeInfo::SliceDst(TrailingSliceLayout { + offset: composite_offset, + elem_size: composite_elem_size, + }) = composite.size_info + { + // The offset of the trailing slice component is the sum + // of the offset of the trailing field and the trailing + // slice offset within that field. + assert_eq!(composite_offset, offset + field_offset); + // The elem size is unchanged. + assert_eq!(composite_elem_size, field_elem_size); + + let field_analog = + Layout::from_size_align(field_offset, field_align.get()).unwrap(); + + if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) + { + assert_eq!(actual_offset, offset); + assert_eq!(actual_composite.size(), composite_offset); + assert_eq!(actual_composite.align(), composite.align.get()); + } else { + // An error here reflects that composite of `base` + // and `field` cannot correspond to a real Rust type + // fragment, because such a fragment would violate + // the basic invariants of a valid Rust layout. At + // the time of writing, `DstLayout` is a little more + // permissive than `Layout`, so we don't assert + // anything in this branch (e.g., unreachability). + } + } else { + panic!("The extension of a layout with a DST must result in a DST.") + } + } + } + } + + #[kani::proof] + #[kani::should_panic] + fn prove_dst_layout_extend_dst_panics() { + let base: DstLayout = kani::any(); + let field: DstLayout = kani::any(); + let packed: Option = kani::any(); + + if let Some(max_align) = packed { + kani::assume(max_align.is_power_of_two()); + kani::assume(base.align <= max_align); + } + + kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..))); + + let _ = base.extend(field, packed); + } + + #[kani::proof] + fn prove_dst_layout_pad_to_align() { + use crate::util::padding_needed_for; + + let layout: DstLayout = kani::any(); + + let padded = layout.pad_to_align(); + + // Calling `pad_to_align` does not alter the `DstLayout`'s alignment. + assert_eq!(padded.align, layout.align); + + if let SizeInfo::Sized { size: unpadded_size } = layout.size_info { + if let SizeInfo::Sized { size: padded_size } = padded.size_info { + // If the layout is sized, it will remain sized after padding is + // added. Its sum will be its unpadded size and the size of the + // trailing padding needed to satisfy its alignment + // requirements. + let padding = padding_needed_for(unpadded_size, layout.align); + assert_eq!(padded_size, unpadded_size + padding); + + // Prove that calling `DstLayout::pad_to_align` behaves + // identically to `Layout::pad_to_align`. + let layout_analog = + Layout::from_size_align(unpadded_size, layout.align.get()).unwrap(); + let padded_analog = layout_analog.pad_to_align(); + assert_eq!(padded_analog.align(), layout.align.get()); + assert_eq!(padded_analog.size(), padded_size); + } else { + panic!("The padding of a sized layout must result in a sized layout.") + } + } else { + // If the layout is a DST, padding cannot be statically added. + assert_eq!(padded.size_info, layout.size_info); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/lib.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..286f294497fa393f5c9e1d8d1bd7be8d138f775d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/lib.rs @@ -0,0 +1,7044 @@ +// Copyright 2018 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// After updating the following doc comment, make sure to run the following +// command to update `README.md` based on its contents: +// +// cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md + +//! ***Fast, safe, compile error. Pick two.*** +//! +//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe` +//! so you don't have to. +//! +//! *For an overview of what's changed from zerocopy 0.7, check out our [release +//! notes][release-notes], which include a step-by-step upgrading guide.* +//! +//! *Have questions? Need more out of zerocopy? Submit a [customer request +//! issue][customer-request-issue] or ask the maintainers on +//! [GitHub][github-q-a] or [Discord][discord]!* +//! +//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose +//! [release-notes]: https://github.com/google/zerocopy/discussions/1680 +//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a +//! [discord]: https://discord.gg/MAvWH2R6zk +//! +//! # Overview +//! +//! ##### Conversion Traits +//! +//! Zerocopy provides four derivable traits for zero-cost conversions: +//! - [`TryFromBytes`] indicates that a type may safely be converted from +//! certain byte sequences (conditional on runtime checks) +//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid +//! instance of a type +//! - [`FromBytes`] indicates that a type may safely be converted from an +//! arbitrary byte sequence +//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte +//! sequence +//! +//! These traits support sized types, slices, and [slice DSTs][slice-dsts]. +//! +//! [slice-dsts]: KnownLayout#dynamically-sized-types +//! +//! ##### Marker Traits +//! +//! Zerocopy provides three derivable marker traits that do not provide any +//! functionality themselves, but are required to call certain methods provided +//! by the conversion traits: +//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout +//! qualities of a type +//! - [`Immutable`] indicates that a type is free from interior mutability, +//! except by ownership or an exclusive (`&mut`) borrow +//! - [`Unaligned`] indicates that a type's alignment requirement is 1 +//! +//! You should generally derive these marker traits whenever possible. +//! +//! ##### Conversion Macros +//! +//! Zerocopy provides six macros for safe casting between types: +//! +//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of +//! one type to a value of another type of the same size +//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a +//! mutable reference of one type to a mutable reference of another type of +//! the same size +//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a +//! mutable or immutable reference of one type to an immutable reference of +//! another type of the same size +//! +//! These macros perform *compile-time* size and alignment checks, meaning that +//! unconditional casts have zero cost at runtime. Conditional casts do not need +//! to validate size or alignment runtime, but do need to validate contents. +//! +//! These macros cannot be used in generic contexts. For generic conversions, +//! use the methods defined by the [conversion traits](#conversion-traits). +//! +//! ##### Byteorder-Aware Numerics +//! +//! Zerocopy provides byte-order aware integer types that support these +//! conversions; see the [`byteorder`] module. These types are especially useful +//! for network parsing. +//! +//! # Cargo Features +//! +//! - **`alloc`** +//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled, +//! the `alloc` crate is added as a dependency, and some allocation-related +//! functionality is added. +//! +//! - **`std`** +//! By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the +//! `std` crate is added as a dependency (ie, `no_std` is disabled), and +//! support for some `std` types is added. `std` implies `alloc`. +//! +//! - **`derive`** +//! Provides derives for the core marker traits via the `zerocopy-derive` +//! crate. These derives are re-exported from `zerocopy`, so it is not +//! necessary to depend on `zerocopy-derive` directly. +//! +//! However, you may experience better compile times if you instead directly +//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`, +//! since doing so will allow Rust to compile these crates in parallel. To do +//! so, do *not* enable the `derive` feature, and list both dependencies in +//! your `Cargo.toml` with the same leading non-zero version number; e.g: +//! +//! ```toml +//! [dependencies] +//! zerocopy = "0.X" +//! zerocopy-derive = "0.X" +//! ``` +//! +//! To avoid the risk of [duplicate import errors][duplicate-import-errors] if +//! one of your dependencies enables zerocopy's `derive` feature, import +//! derives as `use zerocopy_derive::*` rather than by name (e.g., `use +//! zerocopy_derive::FromBytes`). +//! +//! - **`simd`** +//! When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and +//! `IntoBytes` impls are emitted for all stable SIMD types which exist on the +//! target platform. Note that the layout of SIMD types is not yet stabilized, +//! so these impls may be removed in the future if layout changes make them +//! invalid. For more information, see the Unsafe Code Guidelines Reference +//! page on the [layout of packed SIMD vectors][simd-layout]. +//! +//! - **`simd-nightly`** +//! Enables the `simd` feature and adds support for SIMD types which are only +//! available on nightly. Since these types are unstable, support for any type +//! may be removed at any point in the future. +//! +//! - **`float-nightly`** +//! Adds support for the unstable `f16` and `f128` types. These types are +//! not yet fully implemented and may not be supported on all platforms. +//! +//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587 +//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html +//! +//! # Security Ethos +//! +//! Zerocopy is expressly designed for use in security-critical contexts. We +//! strive to ensure that that zerocopy code is sound under Rust's current +//! memory model, and *any future memory model*. We ensure this by: +//! - **...not 'guessing' about Rust's semantics.** +//! We annotate `unsafe` code with a precise rationale for its soundness that +//! cites a relevant section of Rust's official documentation. When Rust's +//! documented semantics are unclear, we work with the Rust Operational +//! Semantics Team to clarify Rust's documentation. +//! - **...rigorously testing our implementation.** +//! We run tests using [Miri], ensuring that zerocopy is sound across a wide +//! array of supported target platforms of varying endianness and pointer +//! width, and across both current and experimental memory models of Rust. +//! - **...formally proving the correctness of our implementation.** +//! We apply formal verification tools like [Kani][kani] to prove zerocopy's +//! correctness. +//! +//! For more information, see our full [soundness policy]. +//! +//! [Miri]: https://github.com/rust-lang/miri +//! [Kani]: https://github.com/model-checking/kani +//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness +//! +//! # Relationship to Project Safe Transmute +//! +//! [Project Safe Transmute] is an official initiative of the Rust Project to +//! develop language-level support for safer transmutation. The Project consults +//! with crates like zerocopy to identify aspects of safer transmutation that +//! would benefit from compiler support, and has developed an [experimental, +//! compiler-supported analysis][mcp-transmutability] which determines whether, +//! for a given type, any value of that type may be soundly transmuted into +//! another type. Once this functionality is sufficiently mature, zerocopy +//! intends to replace its internal transmutability analysis (implemented by our +//! custom derives) with the compiler-supported one. This change will likely be +//! an implementation detail that is invisible to zerocopy's users. +//! +//! Project Safe Transmute will not replace the need for most of zerocopy's +//! higher-level abstractions. The experimental compiler analysis is a tool for +//! checking the soundness of `unsafe` code, not a tool to avoid writing +//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy +//! will still be required in order to provide higher-level abstractions on top +//! of the building block provided by Project Safe Transmute. +//! +//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html +//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411 +//! +//! # MSRV +//! +//! See our [MSRV policy]. +//! +//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv +//! +//! # Changelog +//! +//! Zerocopy uses [GitHub Releases]. +//! +//! [GitHub Releases]: https://github.com/google/zerocopy/releases +//! +//! # Thanks +//! +//! Zerocopy is maintained by engineers at Google with help from [many wonderful +//! contributors][contributors]. Thank you to everyone who has lent a hand in +//! making Rust a little more secure! +//! +//! [contributors]: https://github.com/google/zerocopy/graphs/contributors + +// Sometimes we want to use lints which were added after our MSRV. +// `unknown_lints` is `warn` by default and we deny warnings in CI, so without +// this attribute, any unknown lint would cause a CI failure when testing with +// our MSRV. +#![allow(unknown_lints, non_local_definitions, unreachable_patterns)] +#![deny(renamed_and_removed_lints)] +#![deny( + anonymous_parameters, + deprecated_in_future, + late_bound_lifetime_arguments, + missing_copy_implementations, + missing_debug_implementations, + missing_docs, + path_statements, + patterns_in_fns_without_body, + rust_2018_idioms, + trivial_numeric_casts, + unreachable_pub, + unsafe_op_in_unsafe_fn, + unused_extern_crates, + // We intentionally choose not to deny `unused_qualifications`. When items + // are added to the prelude (e.g., `core::mem::size_of`), this has the + // consequence of making some uses trigger this lint on the latest toolchain + // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`) + // does not work on older toolchains. + // + // We tested a more complicated fix in #1413, but ultimately decided that, + // since this lint is just a minor style lint, the complexity isn't worth it + // - it's fine to occasionally have unused qualifications slip through, + // especially since these do not affect our user-facing API in any way. + variant_size_differences +)] +#![cfg_attr( + __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, + deny(fuzzy_provenance_casts, lossy_provenance_casts) +)] +#![deny( + clippy::all, + clippy::alloc_instead_of_core, + clippy::arithmetic_side_effects, + clippy::as_underscore, + clippy::assertions_on_result_states, + clippy::as_conversions, + clippy::correctness, + clippy::dbg_macro, + clippy::decimal_literal_representation, + clippy::double_must_use, + clippy::get_unwrap, + clippy::indexing_slicing, + clippy::missing_inline_in_public_items, + clippy::missing_safety_doc, + clippy::multiple_unsafe_ops_per_block, + clippy::must_use_candidate, + clippy::must_use_unit, + clippy::obfuscated_if_else, + clippy::perf, + clippy::print_stdout, + clippy::return_self_not_must_use, + clippy::std_instead_of_core, + clippy::style, + clippy::suspicious, + clippy::todo, + clippy::undocumented_unsafe_blocks, + clippy::unimplemented, + clippy::unnested_or_patterns, + clippy::unwrap_used, + clippy::use_debug +)] +// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes +// has false positives, and we test on our MSRV in CI, so it doesn't help us +// anyway. +#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)] +#![deny( + rustdoc::bare_urls, + rustdoc::broken_intra_doc_links, + rustdoc::invalid_codeblock_attributes, + rustdoc::invalid_html_tags, + rustdoc::invalid_rust_codeblocks, + rustdoc::missing_crate_level_docs, + rustdoc::private_intra_doc_links +)] +// In test code, it makes sense to weight more heavily towards concise, readable +// code over correct or debuggable code. +#![cfg_attr(any(test, kani), allow( + // In tests, you get line numbers and have access to source code, so panic + // messages are less important. You also often unwrap a lot, which would + // make expect'ing instead very verbose. + clippy::unwrap_used, + // In tests, there's no harm to "panic risks" - the worst that can happen is + // that your test will fail, and you'll fix it. By contrast, panic risks in + // production code introduce the possibly of code panicking unexpectedly "in + // the field". + clippy::arithmetic_side_effects, + clippy::indexing_slicing, +))] +#![cfg_attr(not(any(test, kani, feature = "std")), no_std)] +#![cfg_attr( + all(feature = "simd-nightly", target_arch = "arm"), + feature(stdarch_arm_neon_intrinsics) +)] +#![cfg_attr( + all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")), + feature(stdarch_powerpc) +)] +#![cfg_attr(feature = "float-nightly", feature(f16, f128))] +#![cfg_attr(doc_cfg, feature(doc_cfg))] +#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))] +#![cfg_attr( + any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri), + feature(layout_for_ptr) +)] +#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))] + +// This is a hack to allow zerocopy-derive derives to work in this crate. They +// assume that zerocopy is linked as an extern crate, so they access items from +// it as `zerocopy::Xxx`. This makes that still work. +#[cfg(any(feature = "derive", test))] +extern crate self as zerocopy; + +#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))] +extern crate test; + +#[doc(hidden)] +#[macro_use] +pub mod util; + +pub mod byte_slice; +pub mod byteorder; +mod deprecated; + +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)] +pub mod doctests; + +// This module is `pub` so that zerocopy's error types and error handling +// documentation is grouped together in a cohesive module. In practice, we +// expect most users to use the re-export of `error`'s items to avoid identifier +// stuttering. +pub mod error; +mod impls; +#[doc(hidden)] +pub mod layout; +mod macros; +#[doc(hidden)] +pub mod pointer; +mod r#ref; +mod split_at; +// FIXME(#252): If we make this pub, come up with a better name. +mod wrappers; + +use core::{ + cell::{Cell, UnsafeCell}, + cmp::Ordering, + fmt::{self, Debug, Display, Formatter}, + hash::Hasher, + marker::PhantomData, + mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit}, + num::{ + NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, + NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping, + }, + ops::{Deref, DerefMut}, + ptr::{self, NonNull}, + slice, +}; +#[cfg(feature = "std")] +use std::io; + +#[doc(hidden)] +pub use crate::pointer::invariant::{self, BecauseExclusive}; +#[doc(hidden)] +pub use crate::pointer::PtrInner; +pub use crate::{ + byte_slice::*, + byteorder::*, + error::*, + r#ref::*, + split_at::{Split, SplitAt}, + wrappers::*, +}; + +#[cfg(any(feature = "alloc", test, kani))] +extern crate alloc; +#[cfg(any(feature = "alloc", test))] +use alloc::{boxed::Box, vec::Vec}; +#[cfg(any(feature = "alloc", test))] +use core::alloc::Layout; + +use util::MetadataOf; + +// Used by `KnownLayout`. +#[doc(hidden)] +pub use crate::layout::*; +// Used by `TryFromBytes::is_bit_valid`. +#[doc(hidden)] +pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr}; +// For each trait polyfill, as soon as the corresponding feature is stable, the +// polyfill import will be unused because method/function resolution will prefer +// the inherent method/function over a trait method/function. Thus, we suppress +// the `unused_imports` warning. +// +// See the documentation on `util::polyfills` for more information. +#[allow(unused_imports)] +use crate::util::polyfills::{self, NonNullExt as _, NumExt as _}; + +#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)))] +const _: () = { + #[deprecated = "Development of zerocopy using cargo is not supported. Please use `cargo.sh` or `win-cargo.bat` instead."] + #[allow(unused)] + const WARNING: () = (); + #[warn(deprecated)] + WARNING +}; + +/// Implements [`KnownLayout`]. +/// +/// This derive analyzes various aspects of a type's layout that are needed for +/// some of zerocopy's APIs. It can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::KnownLayout; +/// #[derive(KnownLayout)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(KnownLayout)] +/// enum MyEnum { +/// # V00, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(KnownLayout)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// # Limitations +/// +/// This derive cannot currently be applied to unsized structs without an +/// explicit `repr` attribute. +/// +/// Some invocations of this derive run afoul of a [known bug] in Rust's type +/// privacy checker. For example, this code: +/// +/// ```compile_fail,E0446 +/// use zerocopy::*; +/// # use zerocopy_derive::*; +/// +/// #[derive(KnownLayout)] +/// #[repr(C)] +/// pub struct PublicType { +/// leading: Foo, +/// trailing: Bar, +/// } +/// +/// #[derive(KnownLayout)] +/// struct Foo; +/// +/// #[derive(KnownLayout)] +/// struct Bar; +/// ``` +/// +/// ...results in a compilation error: +/// +/// ```text +/// error[E0446]: private type `Bar` in public interface +/// --> examples/bug.rs:3:10 +/// | +/// 3 | #[derive(KnownLayout)] +/// | ^^^^^^^^^^^ can't leak private type +/// ... +/// 14 | struct Bar; +/// | ---------- `Bar` declared as private +/// | +/// = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +/// ``` +/// +/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)` +/// structs whose trailing field type is less public than the enclosing struct. +/// +/// To work around this, mark the trailing field type `pub` and annotate it with +/// `#[doc(hidden)]`; e.g.: +/// +/// ```no_run +/// use zerocopy::*; +/// # use zerocopy_derive::*; +/// +/// #[derive(KnownLayout)] +/// #[repr(C)] +/// pub struct PublicType { +/// leading: Foo, +/// trailing: Bar, +/// } +/// +/// #[derive(KnownLayout)] +/// struct Foo; +/// +/// #[doc(hidden)] +/// #[derive(KnownLayout)] +/// pub struct Bar; // <- `Bar` is now also `pub` +/// ``` +/// +/// [known bug]: https://github.com/rust-lang/rust/issues/45713 +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::KnownLayout; +// These exist so that code which was written against the old names will get +// less confusing error messages when they upgrade to a more recent version of +// zerocopy. On our MSRV toolchain, the error messages read, for example: +// +// error[E0603]: trait `FromZeroes` is private +// --> examples/deprecated.rs:1:15 +// | +// 1 | use zerocopy::FromZeroes; +// | ^^^^^^^^^^ private trait +// | +// note: the trait `FromZeroes` is defined here +// --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5 +// | +// 1845 | use FromZeros as FromZeroes; +// | ^^^^^^^^^^^^^^^^^^^^^^^ +// +// The "note" provides enough context to make it easy to figure out how to fix +// the error. +#[allow(unused)] +use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified}; + +/// Indicates that zerocopy can reason about certain aspects of a type's layout. +/// +/// This trait is required by many of zerocopy's APIs. It supports sized types, +/// slices, and [slice DSTs](#dynamically-sized-types). +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(KnownLayout)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::KnownLayout; +/// #[derive(KnownLayout)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(KnownLayout)] +/// enum MyEnum { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(KnownLayout)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated analysis to deduce the layout +/// characteristics of types. You **must** implement this trait via the derive. +/// +/// # Dynamically-sized types +/// +/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs"). +/// +/// A slice DST is a type whose trailing field is either a slice or another +/// slice DST, rather than a type with fixed size. For example: +/// +/// ``` +/// #[repr(C)] +/// struct PacketHeader { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[repr(C)] +/// struct Packet { +/// header: PacketHeader, +/// body: [u8], +/// } +/// ``` +/// +/// It can be useful to think of slice DSTs as a generalization of slices - in +/// other words, a normal slice is just the special case of a slice DST with +/// zero leading fields. In particular: +/// - Like slices, slice DSTs can have different lengths at runtime +/// - Like slices, slice DSTs cannot be passed by-value, but only by reference +/// or via other indirection such as `Box` +/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST +/// encodes the number of elements in the trailing slice field +/// +/// ## Slice DST layout +/// +/// Just like other composite Rust types, the layout of a slice DST is not +/// well-defined unless it is specified using an explicit `#[repr(...)]` +/// attribute such as `#[repr(C)]`. [Other representations are +/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our +/// example. +/// +/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]` +/// types][repr-c-structs], but the presence of a variable-length field +/// introduces the possibility of *dynamic padding*. In particular, it may be +/// necessary to add trailing padding *after* the trailing slice field in order +/// to satisfy the outer type's alignment, and the amount of padding required +/// may be a function of the length of the trailing slice field. This is just a +/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs, +/// but it can result in surprising behavior. For example, consider the +/// following type: +/// +/// ``` +/// #[repr(C)] +/// struct Foo { +/// a: u32, +/// b: u8, +/// z: [u16], +/// } +/// ``` +/// +/// Assuming that `u32` has alignment 4 (this is not true on all platforms), +/// then `Foo` has alignment 4 as well. Here is the smallest possible value for +/// `Foo`: +/// +/// ```text +/// byte offset | 01234567 +/// field | aaaab--- +/// >< +/// ``` +/// +/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset +/// that we can place `z` at is 5, but since `z` has alignment 2, we need to +/// round up to offset 6. This means that there is one byte of padding between +/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and +/// then two bytes of padding after `z` in order to satisfy the overall +/// alignment of `Foo`. The size of this instance is 8 bytes. +/// +/// What about if `z` has length 1? +/// +/// ```text +/// byte offset | 01234567 +/// field | aaaab-zz +/// ``` +/// +/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means +/// that we no longer need padding after `z` in order to satisfy `Foo`'s +/// alignment. We've now seen two different values of `Foo` with two different +/// lengths of `z`, but they both have the same size - 8 bytes. +/// +/// What about if `z` has length 2? +/// +/// ```text +/// byte offset | 012345678901 +/// field | aaaab-zzzz-- +/// ``` +/// +/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded +/// size to 10, and so we now need another 2 bytes of padding after `z` to +/// satisfy `Foo`'s alignment. +/// +/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules +/// applied to slice DSTs, but it can be surprising that the amount of trailing +/// padding becomes a function of the trailing slice field's length, and thus +/// can only be computed at runtime. +/// +/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations +/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs +/// +/// ## What is a valid size? +/// +/// There are two places in zerocopy's API that we refer to "a valid size" of a +/// type. In normal casts or conversions, where the source is a byte slice, we +/// need to know whether the source byte slice is a valid size of the +/// destination type. In prefix or suffix casts, we need to know whether *there +/// exists* a valid size of the destination type which fits in the source byte +/// slice and, if so, what the largest such size is. +/// +/// As outlined above, a slice DST's size is defined by the number of elements +/// in its trailing slice field. However, there is not necessarily a 1-to-1 +/// mapping between trailing slice field length and overall size. As we saw in +/// the previous section with the type `Foo`, instances with both 0 and 1 +/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes. +/// +/// When we say "x is a valid size of `T`", we mean one of two things: +/// - If `T: Sized`, then we mean that `x == size_of::()` +/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of +/// `T` with `len` trailing slice elements has size `x` +/// +/// When we say "largest possible size of `T` that fits in a byte slice", we +/// mean one of two things: +/// - If `T: Sized`, then we mean `size_of::()` if the byte slice is at least +/// `size_of::()` bytes long +/// - If `T` is a slice DST, then we mean to consider all values, `len`, such +/// that the instance of `T` with `len` trailing slice elements fits in the +/// byte slice, and to choose the largest such `len`, if any +/// +/// +/// # Safety +/// +/// This trait does not convey any safety guarantees to code outside this crate. +/// +/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future +/// releases of zerocopy may make backwards-breaking changes to these items, +/// including changes that only affect soundness, which may cause code which +/// uses those items to silently become unsound. +/// +#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`") +)] +pub unsafe trait KnownLayout { + // The `Self: Sized` bound makes it so that `KnownLayout` can still be + // object safe. It's not currently object safe thanks to `const LAYOUT`, and + // it likely won't be in the future, but there's no reason not to be + // forwards-compatible with object safety. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// The type of metadata stored in a pointer to `Self`. + /// + /// This is `()` for sized types and [`usize`] for slice DSTs. + type PointerMetadata: PointerMetadata; + + /// A maybe-uninitialized analog of `Self` + /// + /// # Safety + /// + /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical. + /// `Self::MaybeUninit` admits uninitialized bytes in all positions. + #[doc(hidden)] + type MaybeUninit: ?Sized + KnownLayout; + + /// The layout of `Self`. + /// + /// # Safety + /// + /// Callers may assume that `LAYOUT` accurately reflects the layout of + /// `Self`. In particular: + /// - `LAYOUT.align` is equal to `Self`'s alignment + /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }` + /// where `size == size_of::()` + /// - If `Self` is a slice DST, then `LAYOUT.size_info == + /// SizeInfo::SliceDst(slice_layout)` where: + /// - The size, `size`, of an instance of `Self` with `elems` trailing + /// slice elements is equal to `slice_layout.offset + + /// slice_layout.elem_size * elems` rounded up to the nearest multiple + /// of `LAYOUT.align` + /// - For such an instance, any bytes in the range `[slice_layout.offset + + /// slice_layout.elem_size * elems, size)` are padding and must not be + /// assumed to be initialized + #[doc(hidden)] + const LAYOUT: DstLayout; + + /// SAFETY: The returned pointer has the same address and provenance as + /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems` + /// elements in its trailing slice. + #[doc(hidden)] + fn raw_from_ptr_len(bytes: NonNull, meta: Self::PointerMetadata) -> NonNull; + + /// Extracts the metadata from a pointer to `Self`. + /// + /// # Safety + /// + /// `pointer_to_metadata` always returns the correct metadata stored in + /// `ptr`. + #[doc(hidden)] + fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata; + + /// Computes the length of the byte range addressed by `ptr`. + /// + /// Returns `None` if the resulting length would not fit in an `usize`. + /// + /// # Safety + /// + /// Callers may assume that `size_of_val_raw` always returns the correct + /// size. + /// + /// Callers may assume that, if `ptr` addresses a byte range whose length + /// fits in an `usize`, this will return `Some`. + #[doc(hidden)] + #[must_use] + #[inline(always)] + fn size_of_val_raw(ptr: NonNull) -> Option { + let meta = Self::pointer_to_metadata(ptr.as_ptr()); + // SAFETY: `size_for_metadata` promises to only return `None` if the + // resulting size would not fit in a `usize`. + Self::size_for_metadata(meta) + } + + #[doc(hidden)] + #[must_use] + #[inline(always)] + fn raw_dangling() -> NonNull { + let meta = Self::PointerMetadata::from_elem_count(0); + Self::raw_from_ptr_len(NonNull::dangling(), meta) + } + + /// Computes the size of an object of type `Self` with the given pointer + /// metadata. + /// + /// # Safety + /// + /// `size_for_metadata` promises to return `None` if and only if the + /// resulting size would not fit in a [`usize`]. Note that the returned size + /// could exceed the actual maximum valid size of an allocated object, + /// [`isize::MAX`]. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::KnownLayout; + /// + /// assert_eq!(u8::size_for_metadata(()), Some(1)); + /// assert_eq!(u16::size_for_metadata(()), Some(2)); + /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42)); + /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84)); + /// + /// // This size exceeds the maximum valid object size (`isize::MAX`): + /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX)); + /// + /// // This size, if computed, would exceed `usize::MAX`: + /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None); + /// ``` + #[inline(always)] + fn size_for_metadata(meta: Self::PointerMetadata) -> Option { + meta.size_for_metadata(Self::LAYOUT) + } +} + +/// Efficiently produces the [`TrailingSliceLayout`] of `T`. +#[inline(always)] +pub(crate) fn trailing_slice_layout() -> TrailingSliceLayout +where + T: ?Sized + KnownLayout, +{ + trait LayoutFacts { + const SIZE_INFO: TrailingSliceLayout; + } + + impl LayoutFacts for T + where + T: KnownLayout, + { + const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info { + crate::SizeInfo::Sized { .. } => const_panic!("unreachable"), + crate::SizeInfo::SliceDst(info) => info, + }; + } + + T::SIZE_INFO +} + +/// The metadata associated with a [`KnownLayout`] type. +#[doc(hidden)] +pub trait PointerMetadata: Copy + Eq + Debug { + /// Constructs a `Self` from an element count. + /// + /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns + /// `elems`. No other types are currently supported. + fn from_elem_count(elems: usize) -> Self; + + /// Converts `self` to an element count. + /// + /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns + /// `self`. No other types are currently supported. + fn to_elem_count(self) -> usize; + + /// Computes the size of the object with the given layout and pointer + /// metadata. + /// + /// # Panics + /// + /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`, + /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may + /// panic. + /// + /// # Safety + /// + /// `size_for_metadata` promises to only return `None` if the resulting size + /// would not fit in a `usize`. + fn size_for_metadata(self, layout: DstLayout) -> Option; +} + +impl PointerMetadata for () { + #[inline] + #[allow(clippy::unused_unit)] + fn from_elem_count(_elems: usize) -> () {} + + #[inline] + fn to_elem_count(self) -> usize { + 0 + } + + #[inline] + fn size_for_metadata(self, layout: DstLayout) -> Option { + match layout.size_info { + SizeInfo::Sized { size } => Some(size), + // NOTE: This branch is unreachable, but we return `None` rather + // than `unreachable!()` to avoid generating panic paths. + SizeInfo::SliceDst(_) => None, + } + } +} + +impl PointerMetadata for usize { + #[inline] + fn from_elem_count(elems: usize) -> usize { + elems + } + + #[inline] + fn to_elem_count(self) -> usize { + self + } + + #[inline] + fn size_for_metadata(self, layout: DstLayout) -> Option { + match layout.size_info { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + let slice_len = elem_size.checked_mul(self)?; + let without_padding = offset.checked_add(slice_len)?; + without_padding.checked_add(util::padding_needed_for(without_padding, layout.align)) + } + // NOTE: This branch is unreachable, but we return `None` rather + // than `unreachable!()` to avoid generating panic paths. + SizeInfo::Sized { .. } => None, + } + } +} + +// SAFETY: Delegates safety to `DstLayout::for_slice`. +unsafe impl KnownLayout for [T] { + #[allow(clippy::missing_inline_in_public_items, dead_code)] + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type PointerMetadata = usize; + + // SAFETY: `CoreMaybeUninit::LAYOUT` and `T::LAYOUT` are identical + // because `CoreMaybeUninit` has the same size and alignment as `T` [1]. + // Consequently, `[CoreMaybeUninit]::LAYOUT` and `[T]::LAYOUT` are + // identical, because they both lack a fixed-sized prefix and because they + // inherit the alignments of their inner element type (which are identical) + // [2][3]. + // + // `[CoreMaybeUninit]` admits uninitialized bytes at all positions + // because `CoreMaybeUninit` admits uninitialized bytes at all positions + // and because the inner elements of `[CoreMaybeUninit]` are laid out + // back-to-back [2][3]. + // + // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: + // + // `MaybeUninit` is guaranteed to have the same size, alignment, and ABI as + // `T` + // + // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout: + // + // Slices have the same layout as the section of the array they slice. + // + // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout: + // + // An array of `[T; N]` has a size of `size_of::() * N` and the same + // alignment of `T`. Arrays are laid out so that the zero-based `nth` + // element of the array is offset from the start of the array by `n * + // size_of::()` bytes. + type MaybeUninit = [CoreMaybeUninit]; + + const LAYOUT: DstLayout = DstLayout::for_slice::(); + + // SAFETY: `.cast` preserves address and provenance. The returned pointer + // refers to an object with `elems` elements by construction. + #[inline(always)] + fn raw_from_ptr_len(data: NonNull, elems: usize) -> NonNull { + // FIXME(#67): Remove this allow. See NonNullExt for more details. + #[allow(unstable_name_collisions)] + NonNull::slice_from_raw_parts(data.cast::(), elems) + } + + #[inline(always)] + fn pointer_to_metadata(ptr: *mut [T]) -> usize { + #[allow(clippy::as_conversions)] + let slc = ptr as *const [()]; + + // SAFETY: + // - `()` has alignment 1, so `slc` is trivially aligned. + // - `slc` was derived from a non-null pointer. + // - The size is 0 regardless of the length, so it is sound to + // materialize a reference regardless of location. + // - By invariant, `self.ptr` has valid provenance. + let slc = unsafe { &*slc }; + + // This is correct because the preceding `as` cast preserves the number + // of slice elements. [1] + // + // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast: + // + // For slice types like `[T]` and `[U]`, the raw pointer types `*const + // [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of + // elements in this slice. Casts between these raw pointer types + // preserve the number of elements. ... The same holds for `str` and + // any compound type whose unsized tail is a slice type, such as + // struct `Foo(i32, [u8])` or `(u64, Foo)`. + slc.len() + } +} + +#[rustfmt::skip] +impl_known_layout!( + (), + u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64, + bool, char, + NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, + NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize +); +#[rustfmt::skip] +#[cfg(feature = "float-nightly")] +impl_known_layout!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] + f16, + #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))] + f128 +); +#[rustfmt::skip] +impl_known_layout!( + T => Option, + T: ?Sized => PhantomData, + T => Wrapping, + T => CoreMaybeUninit, + T: ?Sized => *const T, + T: ?Sized => *mut T, + T: ?Sized => &'_ T, + T: ?Sized => &'_ mut T, +); +impl_known_layout!(const N: usize, T => [T; N]); + +// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop` [1], +// `UnsafeCell` [2], and `Cell` [3] have the same representation as `T`. +// +// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html: +// +// `ManuallyDrop` is guaranteed to have the same layout and bit validity as +// `T` +// +// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout: +// +// `UnsafeCell` has the same in-memory representation as its inner type +// `T`. +// +// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout: +// +// `Cell` has the same in-memory representation as `T`. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + unsafe_impl_known_layout!( + #[repr([u8])] + str + ); + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop); + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell); + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell); +}; + +// SAFETY: +// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and +// `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same: +// - Fixed prefix size +// - Alignment +// - (For DSTs) trailing slice element size +// - By consequence of the above, referents `T::MaybeUninit` and `T` have the +// require the same kind of pointer metadata, and thus it is valid to perform +// an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation +// preserves referent size (ie, `size_of_val_raw`). +const _: () = unsafe { + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit) +}; + +// FIXME(#196, #2856): Eventually, we'll want to support enums variants and +// union fields being treated uniformly since they behave similarly to each +// other in terms of projecting validity – specifically, for a type `T` with +// validity `V`, if `T` is a struct type, then its fields straightforwardly also +// have validity `V`. By contrast, if `T` is an enum or union type, then +// validity is not straightforwardly recursive in this way. +#[doc(hidden)] +pub const STRUCT_VARIANT_ID: i128 = -1; +#[doc(hidden)] +pub const UNION_VARIANT_ID: i128 = -2; +#[doc(hidden)] +pub const REPR_C_UNION_VARIANT_ID: i128 = -3; + +/// # Safety +/// +/// `Self::ProjectToTag` must satisfy its safety invariant. +#[doc(hidden)] +pub unsafe trait HasTag { + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// The type's enum tag, or `()` for non-enum types. + type Tag: Immutable; + + /// A pointer projection from `Self` to its tag. + /// + /// # Safety + /// + /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound + /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection. + type ProjectToTag: pointer::cast::Project; +} + +/// Projects a given field from `Self`. +/// +/// All implementations of `HasField` for a particular field `f` in `Self` +/// should use the same `Field` type; this ensures that `Field` is inferable +/// given an explicit `VARIANT_ID` and `FIELD_ID`. +/// +/// # Safety +/// +/// A field `f` is `HasField` for `Self` if and only if: +/// +/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is +/// `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if +/// `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index +/// of the enum variant in which `f` appears. Note that `Self` does not need +/// to actually *be* such a type – it just needs to have the same layout as +/// such a type. For example, a `#[repr(transparent)]` wrapper around an enum +/// has the same layout as that enum. +/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise, +/// if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`. +/// - `Field` is a type with the same visibility as `f`. +/// - `Type` has the same type as `f`. +/// +/// The caller must **not** assume that a pointer's referent being aligned +/// implies that calling `project` on that pointer will result in a pointer to +/// an aligned referent. For example, `HasField` may be implemented for +/// `#[repr(packed)]` structs. +/// +/// The implementation of `project` must satisfy its safety post-condition. +#[doc(hidden)] +pub unsafe trait HasField: + HasTag +{ + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// The type of the field. + type Type: ?Sized; + + /// Projects from `slf` to the field. + /// + /// Users should generally not call `project` directly, and instead should + /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`]. + /// + /// # Safety + /// + /// The returned pointer refers to a non-strict subset of the bytes of + /// `slf`'s referent, and has the same provenance as `slf`. + #[must_use] + fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type; +} + +/// Projects a given field from `Self`. +/// +/// Implementations of this trait encode the conditions under which a field can +/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that +/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In +/// other words, it is a type-level function over invariants; `I` goes in, +/// `Self::Invariants` comes out. +/// +/// # Safety +/// +/// `T: ProjectField` if, for a +/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`, +/// `>::project(ptr.as_inner())` +/// conforms to `T::Invariants`. +#[doc(hidden)] +pub unsafe trait ProjectField: + HasField +where + I: invariant::Invariants, +{ + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// The invariants of the projected field pointer, with respect to the + /// invariants, `I`, of the containing pointer. The aliasing dimension of + /// the invariants is guaranteed to remain unchanged. + type Invariants: invariant::Invariants; + + /// The failure mode of projection. `()` if the projection is fallible, + /// otherwise [`core::convert::Infallible`]. + type Error; + + /// Is the given field projectable from `ptr`? + /// + /// If a field with [`Self::Invariants`] is projectable from the referent, + /// this function produces an `Ok(ptr)` from which the projection can be + /// made; otherwise `Err`. + /// + /// This method must be overriden if the field's projectability depends on + /// the value of the bytes in `ptr`. + #[inline(always)] + fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> { + trait IsInfallible { + const IS_INFALLIBLE: bool; + } + + struct Projection( + PhantomData<(Field, I, T)>, + ) + where + T: ?Sized + HasField, + I: invariant::Invariants; + + impl IsInfallible + for Projection + where + T: ?Sized + HasField, + I: invariant::Invariants, + { + const IS_INFALLIBLE: bool = { + let is_infallible = match VARIANT_ID { + // For nondestructive projections of struct and union + // fields, the projected field's satisfaction of + // `Invariants` does not depend on the value of the + // referent. This default implementation of `is_projectable` + // is non-destructive, as it does not overwrite any part of + // the referent. + crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true, + _enum_variant => { + use crate::invariant::{Validity, ValidityKind}; + match I::Validity::KIND { + // The `Uninit` and `Initialized` validity + // invariants do not depend on the enum's tag. In + // particular, we don't actually care about what + // variant is present – we can treat *any* range of + // uninitialized or initialized memory as containing + // an uninitialized or initialized instance of *any* + // type – the type itself is irrelevant. + ValidityKind::Uninit | ValidityKind::Initialized => true, + // The projectability of an enum field from an + // `AsInitialized` or `Valid` state is a dynamic + // property of its tag. + ValidityKind::AsInitialized | ValidityKind::Valid => false, + } + } + }; + const_assert!(is_infallible); + is_infallible + }; + } + + const_assert!( + as IsInfallible>::IS_INFALLIBLE + ); + + Ok(()) + } +} + +/// Analyzes whether a type is [`FromZeros`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its +/// supertraits if it is sound to do so. This derive can be applied to structs, +/// enums, and unions; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromZeros, Immutable}; +/// #[derive(FromZeros)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeros)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeros, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@FromZeros#safety +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `FromZeros` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `FromZeros` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `FromZeros` for that type: +/// +/// - If the type is a struct, all of its fields must be `FromZeros`. +/// - If the type is an enum: +/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, +/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). +/// - It must have a variant with a discriminant/tag of `0`, and its fields +/// must be `FromZeros`. See [the reference] for a description of +/// discriminant values are specified. +/// - The fields of that variant must be `FromZeros`. +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromZeros`, and must *not* rely on the +/// implementation details of this derive. +/// +/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations +/// +/// ## Why isn't an explicit representation required for structs? +/// +/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires +/// that structs are marked with `#[repr(C)]`. +/// +/// Per the [Rust reference](reference), +/// +/// > The representation of a type can change the padding between fields, but +/// > does not change the layout of the fields themselves. +/// +/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations +/// +/// Since the layout of structs only consists of padding bytes and field bytes, +/// a struct is soundly `FromZeros` if: +/// 1. its padding is soundly `FromZeros`, and +/// 2. its fields are soundly `FromZeros`. +/// +/// The answer to the first question is always yes: padding bytes do not have +/// any validity constraints. A [discussion] of this question in the Unsafe Code +/// Guidelines Working Group concluded that it would be virtually unimaginable +/// for future versions of rustc to add validity constraints to padding bytes. +/// +/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 +/// +/// Whether a struct is soundly `FromZeros` therefore solely depends on whether +/// its fields are `FromZeros`. +// FIXME(#146): Document why we don't require an enum to have an explicit `repr` +// attribute. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::FromZeros; +/// Analyzes whether a type is [`Immutable`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `Immutable` and implements `Immutable` if it is +/// sound to do so. This derive can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::Immutable; +/// #[derive(Immutable)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Immutable)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `Immutable` for a given type. +/// Unless you are modifying the implementation of this derive, you don't need +/// to read this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `Immutable` for that type: +/// +/// - All fields must be `Immutable`. +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `Immutable`, and must *not* rely on the +/// implementation details of this derive. +/// +/// [safety conditions]: trait@Immutable#safety +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::Immutable; + +/// Types which are free from interior mutability. +/// +/// `T: Immutable` indicates that `T` does not permit interior mutation, except +/// by ownership or an exclusive (`&mut`) borrow. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature); +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::Immutable; +/// #[derive(Immutable)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Immutable)] +/// enum MyEnum { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `Immutable`. +/// +/// # Safety +/// +/// Unsafe code outside of this crate must not make any assumptions about `T` +/// based on `T: Immutable`. We reserve the right to relax the requirements for +/// `Immutable` in the future, and if unsafe code outside of this crate makes +/// assumptions based on `T: Immutable`, future relaxations may cause that code +/// to become unsound. +/// +// # Safety (Internal) +// +// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given +// `t: &T`, `t` does not permit interior mutation of its referent. Because +// [`UnsafeCell`] is the only type which permits interior mutation, it is +// sufficient (though not necessary) to guarantee that `T` contains no +// `UnsafeCell`s. +// +// [`UnsafeCell`]: core::cell::UnsafeCell +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::Immutable", + doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`") +)] +pub unsafe trait Immutable { + // The `Self: Sized` bound makes it so that `Immutable` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; +} + +/// Implements [`TryFromBytes`]. +/// +/// This derive synthesizes the runtime checks required to check whether a +/// sequence of initialized bytes corresponds to a valid instance of a type. +/// This derive can be applied to structs, enums, and unions; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{TryFromBytes, Immutable}; +/// #[derive(TryFromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(TryFromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(TryFromBytes, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// # Portability +/// +/// To ensure consistent endianness for enums with multi-byte representations, +/// explicitly specify and convert each discriminant using `.to_le()` or +/// `.to_be()`; e.g.: +/// +/// ``` +/// # use zerocopy_derive::TryFromBytes; +/// // `DataStoreVersion` is encoded in little-endian. +/// #[derive(TryFromBytes)] +/// #[repr(u32)] +/// pub enum DataStoreVersion { +/// /// Version 1 of the data store. +/// V1 = 9u32.to_le(), +/// +/// /// Version 2 of the data store. +/// V2 = 10u32.to_le(), +/// } +/// ``` +/// +/// [safety conditions]: trait@TryFromBytes#safety +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::TryFromBytes; + +/// Types for which some bit patterns are valid. +/// +/// A memory region of the appropriate length which contains initialized bytes +/// can be viewed as a `TryFromBytes` type so long as the runtime value of those +/// bytes corresponds to a [*valid instance*] of that type. For example, +/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a +/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or +/// `1`. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(TryFromBytes)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{TryFromBytes, Immutable}; +/// #[derive(TryFromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(TryFromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(TryFromBytes, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive ensures that the runtime check of whether bytes correspond to a +/// valid instance is sound. You **must** implement this trait via the derive. +/// +/// # What is a "valid instance"? +/// +/// In Rust, each type has *bit validity*, which refers to the set of bit +/// patterns which may appear in an instance of that type. It is impossible for +/// safe Rust code to produce values which violate bit validity (ie, values +/// outside of the "valid" set of bit patterns). If `unsafe` code produces an +/// invalid value, this is considered [undefined behavior]. +/// +/// Rust's bit validity rules are currently being decided, which means that some +/// types have three classes of bit patterns: those which are definitely valid, +/// and whose validity is documented in the language; those which may or may not +/// be considered valid at some point in the future; and those which are +/// definitely invalid. +/// +/// Zerocopy takes a conservative approach, and only considers a bit pattern to +/// be valid if its validity is a documented guarantee provided by the +/// language. +/// +/// For most use cases, Rust's current guarantees align with programmers' +/// intuitions about what ought to be valid. As a result, zerocopy's +/// conservatism should not affect most users. +/// +/// If you are negatively affected by lack of support for a particular type, +/// we encourage you to let us know by [filing an issue][github-repo]. +/// +/// # `TryFromBytes` is not symmetrical with [`IntoBytes`] +/// +/// There are some types which implement both `TryFromBytes` and [`IntoBytes`], +/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences +/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes + +/// IntoBytes`, there exist values of `t: T` such that +/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not +/// generally assume that values produced by `IntoBytes` will necessarily be +/// accepted as valid by `TryFromBytes`. +/// +/// # Safety +/// +/// On its own, `T: TryFromBytes` does not make any guarantees about the layout +/// or representation of `T`. It merely provides the ability to perform a +/// validity check at runtime via methods like [`try_ref_from_bytes`]. +/// +/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`. +/// Future releases of zerocopy may make backwards-breaking changes to these +/// items, including changes that only affect soundness, which may cause code +/// which uses those items to silently become unsound. +/// +/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html +/// [github-repo]: https://github.com/google/zerocopy +/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes +/// [*valid instance*]: #what-is-a-valid-instance +#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`") +)] +pub unsafe trait TryFromBytes { + // The `Self: Sized` bound makes it so that `TryFromBytes` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Does a given memory range contain a valid instance of `Self`? + /// + /// # Safety + /// + /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true, + /// `*candidate` contains a valid `Self`. + /// + /// # Panics + /// + /// `is_bit_valid` may panic. Callers are responsible for ensuring that any + /// `unsafe` code remains sound even in the face of `is_bit_valid` + /// panicking. (We support user-defined validation routines; so long as + /// these routines are not required to be `unsafe`, there is no way to + /// ensure that these do not generate panics.) + /// + /// Besides user-defined validation routines panicking, `is_bit_valid` will + /// either panic or fail to compile if called on a pointer with [`Shared`] + /// aliasing when `Self: !Immutable`. + /// + /// [`UnsafeCell`]: core::cell::UnsafeCell + /// [`Shared`]: invariant::Shared + #[doc(hidden)] + fn is_bit_valid(candidate: Maybe<'_, Self, A>) -> bool + where + A: invariant::Alignment; + + /// Attempts to interpret the given `source` as a `&Self`. + /// + /// If the bytes of `source` are a valid instance of `Self`, this method + /// returns a reference to those bytes interpreted as a `Self`. If the + /// length of `source` is not a [valid size of `Self`][valid-size], or if + /// `source` is not appropriately aligned, or if `source` is not a valid + /// instance of `Self`, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the byte sequence `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; + /// + /// let packet = Packet::try_ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; + /// assert!(Packet::try_ref_from_bytes(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + static_assert_dst_is_not_zst!(Self); + match Ptr::from_ref(source).try_cast_into_no_leftover::(None) { + Ok(source) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match source.try_into_valid() { + Ok(valid) => Ok(valid.as_ref()), + Err(e) => { + Err(e.map_src(|src| src.as_bytes::().as_ref()).into()) + } + } + } + Err(e) => Err(e.map_src(Ptr::as_ref).into()), + } + } + + /// Attempts to interpret the prefix of the given `source` as a `&Self`. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the leading bytes of `source`. If that prefix is a valid + /// instance of `Self`, this method returns a reference to those bytes + /// interpreted as `Self`, and a reference to the remaining bytes. If there + /// are insufficient bytes, or if `source` is not appropriately aligned, or + /// if those bytes are not a valid instance of `Self`, this returns `Err`. + /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// + /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); + /// assert_eq!(suffix, &[6u8][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// assert!(Packet::try_ref_from_prefix(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + static_assert_dst_is_not_zst!(Self); + try_ref_from_prefix_suffix(source, CastType::Prefix, None) + } + + /// Attempts to interpret the suffix of the given `source` as a `&Self`. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the trailing bytes of `source`. If that suffix is a + /// valid instance of `Self`, this method returns a reference to those bytes + /// interpreted as `Self`, and a reference to the preceding bytes. If there + /// are insufficient bytes, or if the suffix of `source` would not be + /// appropriately aligned, or if the suffix is not a valid instance of + /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you + /// can [infallibly discard the alignment error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(prefix, &[0u8][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..]; + /// assert!(Packet::try_ref_from_suffix(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + static_assert_dst_is_not_zst!(Self); + try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap) + } + + /// Attempts to interpret the given `source` as a `&mut Self` without + /// copying. + /// + /// If the bytes of `source` are a valid instance of `Self`, this method + /// returns a reference to those bytes interpreted as a `Self`. If the + /// length of `source` is not a [valid size of `Self`][valid-size], or if + /// `source` is not appropriately aligned, or if `source` is not a valid + /// instance of `Self`, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; + /// + /// let packet = Packet::try_mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); + /// + /// packet.temperature = 111; + /// + /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// assert!(Packet::try_mut_from_bytes(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + static_assert_dst_is_not_zst!(Self); + match Ptr::from_mut(bytes).try_cast_into_no_leftover::(None) { + Ok(source) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match source.try_into_valid() { + Ok(source) => Ok(source.as_mut()), + Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()), + } + } + Err(e) => Err(e.map_src(Ptr::as_mut).into()), + } + } + + /// Attempts to interpret the prefix of the given `source` as a `&mut + /// Self`. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the leading bytes of `source`. If that prefix is a valid + /// instance of `Self`, this method returns a reference to those bytes + /// interpreted as `Self`, and a reference to the remaining bytes. If there + /// are insufficient bytes, or if `source` is not appropriately aligned, or + /// if the bytes are not a valid instance of `Self`, this returns `Err`. If + /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// + /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); + /// assert_eq!(suffix, &[6u8][..]); + /// + /// packet.temperature = 111; + /// suffix[0] = 222; + /// + /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// assert!(Packet::try_mut_from_prefix(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_prefix( + source: &mut [u8], + ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + static_assert_dst_is_not_zst!(Self); + try_mut_from_prefix_suffix(source, CastType::Prefix, None) + } + + /// Attempts to interpret the suffix of the given `source` as a `&mut + /// Self`. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the trailing bytes of `source`. If that suffix is a + /// valid instance of `Self`, this method returns a reference to those bytes + /// interpreted as `Self`, and a reference to the preceding bytes. If there + /// are insufficient bytes, or if the suffix of `source` would not be + /// appropriately aligned, or if the suffix is not a valid instance of + /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you + /// can [infallibly discard the alignment error][ConvertError::from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(prefix, &[0u8][..]); + /// + /// prefix[0] = 111; + /// packet.temperature = 222; + /// + /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..]; + /// assert!(Packet::try_mut_from_suffix(bytes).is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_suffix( + source: &mut [u8], + ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + static_assert_dst_is_not_zst!(Self); + try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap) + } + + /// Attempts to interpret the given `source` as a `&Self` with a DST length + /// equal to `count`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self` with `count` trailing elements. If the length of `source` is not + /// equal to the size of `Self` with `count` elements, if `source` is not + /// appropriately aligned, or if `source` does not contain a valid instance + /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], + /// you can [infallibly discard the alignment error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let src = 0xCAFEu16.as_bytes(); + /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_bytes_with_elems( + source: &[u8], + count: usize, + ) -> Result<&Self, TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + match Ptr::from_ref(source).try_cast_into_no_leftover::(Some(count)) + { + Ok(source) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match source.try_into_valid() { + Ok(source) => Ok(source.as_ref()), + Err(e) => { + Err(e.map_src(|src| src.as_bytes::().as_ref()).into()) + } + } + } + Err(e) => Err(e.map_src(Ptr::as_ref).into()), + } + } + + /// Attempts to interpret the prefix of the given `source` as a `&Self` with + /// a DST length equal to `count`. + /// + /// This method attempts to return a reference to the prefix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the remaining bytes. If the length of `source` is less than the size + /// of `Self` with `count` elements, if `source` is not appropriately + /// aligned, or if the prefix of `source` does not contain a valid instance + /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], + /// you can [infallibly discard the alignment error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..]; + /// + /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(suffix, &[8u8][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let src = 0xCAFEu16.as_bytes(); + /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_prefix_with_elems( + source: &[u8], + count: usize, + ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count)) + } + + /// Attempts to interpret the suffix of the given `source` as a `&Self` with + /// a DST length equal to `count`. + /// + /// This method attempts to return a reference to the suffix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the preceding bytes. If the length of `source` is less than the size + /// of `Self` with `count` elements, if the suffix of `source` is not + /// appropriately aligned, or if the suffix of `source` does not contain a + /// valid instance of `Self`, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(prefix, &[123u8][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let src = 0xCAFEu16.as_bytes(); + /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn try_ref_from_suffix_with_elems( + source: &[u8], + count: usize, + ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap) + } + + /// Attempts to interpret the given `source` as a `&mut Self` with a DST + /// length equal to `count`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self` with `count` trailing elements. If the length of `source` is not + /// equal to the size of `Self` with `count` elements, if `source` is not + /// appropriately aligned, or if `source` does not contain a valid instance + /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], + /// you can [infallibly discard the alignment error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// + /// packet.temperature = 111; + /// + /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let mut src = 0xCAFEu16; + /// let src = src.as_mut_bytes(); + /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_bytes_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<&mut Self, TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + match Ptr::from_mut(source).try_cast_into_no_leftover::(Some(count)) + { + Ok(source) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match source.try_into_valid() { + Ok(source) => Ok(source.as_mut()), + Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()), + } + } + Err(e) => Err(e.map_src(Ptr::as_mut).into()), + } + } + + /// Attempts to interpret the prefix of the given `source` as a `&mut Self` + /// with a DST length equal to `count`. + /// + /// This method attempts to return a reference to the prefix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the remaining bytes. If the length of `source` is less than the size + /// of `Self` with `count` elements, if `source` is not appropriately + /// aligned, or if the prefix of `source` does not contain a valid instance + /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], + /// you can [infallibly discard the alignment error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..]; + /// + /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(suffix, &[8u8][..]); + /// + /// packet.temperature = 111; + /// suffix[0] = 222; + /// + /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let mut src = 0xCAFEu16; + /// let src = src.as_mut_bytes(); + /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_prefix_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count)) + } + + /// Attempts to interpret the suffix of the given `source` as a `&mut Self` + /// with a DST length equal to `count`. + /// + /// This method attempts to return a reference to the suffix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the preceding bytes. If the length of `source` is less than the size + /// of `Self` with `count` elements, if the suffix of `source` is not + /// appropriately aligned, or if the suffix of `source` does not contain a + /// valid instance of `Self`, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][ConvertError::from]. + /// + /// [self-unaligned]: Unaligned + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// marshmallows: [[u8; 2]], + /// } + /// + /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; + /// + /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); + /// assert_eq!(prefix, &[123u8][..]); + /// + /// prefix[0] = 111; + /// packet.temperature = 222; + /// + /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; + /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err()); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use core::num::NonZeroU16; + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(TryFromBytes, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: NonZeroU16, + /// trailing_dst: [()], + /// } + /// + /// let mut src = 0xCAFEu16; + /// let src = src.as_mut_bytes(); + /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn try_mut_from_suffix_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>> + where + Self: KnownLayout + IntoBytes, + { + try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap) + } + + /// Attempts to read the given `source` as a `Self`. + /// + /// If `source.len() != size_of::()` or the bytes are not a valid + /// instance of `Self`, this returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// } + /// + /// let bytes = &[0xC0, 0xC0, 240, 77][..]; + /// + /// let packet = Packet::try_read_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &mut [0x10, 0xC0, 240, 77][..]; + /// assert!(Packet::try_read_from_bytes(bytes).is_err()); + /// ``` + /// + /// # Performance Considerations + /// + /// In this version of zerocopy, this method reads the `source` into a + /// well-aligned stack allocation and *then* validates that the allocation + /// is a valid `Self`. This ensures that validation can be performed using + /// aligned reads (which carry a performance advantage over unaligned reads + /// on many platforms) at the cost of an unconditional copy. + #[must_use = "has no side effects"] + #[inline] + fn try_read_from_bytes(source: &[u8]) -> Result> + where + Self: Sized, + { + // FIXME(#2981): If `align_of::() == 1`, validate `source` in-place. + + let candidate = match CoreMaybeUninit::::read_from_bytes(source) { + Ok(candidate) => candidate, + Err(e) => { + return Err(TryReadError::Size(e.with_dst())); + } + }; + // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of + // its bytes are initialized. + unsafe { try_read_from(source, candidate) } + } + + /// Attempts to read a `Self` from the prefix of the given `source`. + /// + /// This attempts to read a `Self` from the first `size_of::()` bytes + /// of `source`, returning that `Self` and any remaining bytes. If + /// `source.len() < size_of::()` or the bytes are not a valid instance + /// of `Self`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// + /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; + /// assert!(Packet::try_read_from_prefix(bytes).is_err()); + /// ``` + /// + /// # Performance Considerations + /// + /// In this version of zerocopy, this method reads the `source` into a + /// well-aligned stack allocation and *then* validates that the allocation + /// is a valid `Self`. This ensures that validation can be performed using + /// aligned reads (which carry a performance advantage over unaligned reads + /// on many platforms) at the cost of an unconditional copy. + #[must_use = "has no side effects"] + #[inline] + fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>> + where + Self: Sized, + { + // FIXME(#2981): If `align_of::() == 1`, validate `source` in-place. + + let (candidate, suffix) = match CoreMaybeUninit::::read_from_prefix(source) { + Ok(candidate) => candidate, + Err(e) => { + return Err(TryReadError::Size(e.with_dst())); + } + }; + // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of + // its bytes are initialized. + unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) } + } + + /// Attempts to read a `Self` from the suffix of the given `source`. + /// + /// This attempts to read a `Self` from the last `size_of::()` bytes + /// of `source`, returning that `Self` and any preceding bytes. If + /// `source.len() < size_of::()` or the bytes are not a valid instance + /// of `Self`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// # #![allow(non_camel_case_types)] // For C0::xC0 + /// use zerocopy::TryFromBytes; + /// # use zerocopy_derive::*; + /// + /// // The only valid value of this type is the byte `0xC0` + /// #[derive(TryFromBytes)] + /// #[repr(u8)] + /// enum C0 { xC0 = 0xC0 } + /// + /// // The only valid value of this type is the bytes `0xC0C0`. + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct C0C0(C0, C0); + /// + /// #[derive(TryFromBytes)] + /// #[repr(C)] + /// struct Packet { + /// magic_number: C0C0, + /// mug_size: u8, + /// temperature: u8, + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..]; + /// + /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(packet.mug_size, 240); + /// assert_eq!(packet.temperature, 77); + /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]); + /// + /// // These bytes are not valid instance of `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..]; + /// assert!(Packet::try_read_from_suffix(bytes).is_err()); + /// ``` + /// + /// # Performance Considerations + /// + /// In this version of zerocopy, this method reads the `source` into a + /// well-aligned stack allocation and *then* validates that the allocation + /// is a valid `Self`. This ensures that validation can be performed using + /// aligned reads (which carry a performance advantage over unaligned reads + /// on many platforms) at the cost of an unconditional copy. + #[must_use = "has no side effects"] + #[inline] + fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>> + where + Self: Sized, + { + // FIXME(#2981): If `align_of::() == 1`, validate `source` in-place. + + let (prefix, candidate) = match CoreMaybeUninit::::read_from_suffix(source) { + Ok(candidate) => candidate, + Err(e) => { + return Err(TryReadError::Size(e.with_dst())); + } + }; + // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of + // its bytes are initialized. + unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) } + } +} + +#[inline(always)] +fn try_ref_from_prefix_suffix( + source: &[u8], + cast_type: CastType, + meta: Option, +) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> { + match Ptr::from_ref(source).try_cast_into::(cast_type, meta) { + Ok((source, prefix_suffix)) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match source.try_into_valid() { + Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())), + Err(e) => Err(e.map_src(|src| src.as_bytes::().as_ref()).into()), + } + } + Err(e) => Err(e.map_src(Ptr::as_ref).into()), + } +} + +#[inline(always)] +fn try_mut_from_prefix_suffix( + candidate: &mut [u8], + cast_type: CastType, + meta: Option, +) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> { + match Ptr::from_mut(candidate).try_cast_into::(cast_type, meta) { + Ok((candidate, prefix_suffix)) => { + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + match candidate.try_into_valid() { + Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())), + Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()), + } + } + Err(e) => Err(e.map_src(Ptr::as_mut).into()), + } +} + +#[inline(always)] +fn swap((t, u): (T, U)) -> (U, T) { + (u, t) +} + +/// # Safety +/// +/// All bytes of `candidate` must be initialized. +#[inline(always)] +unsafe fn try_read_from( + source: S, + mut candidate: CoreMaybeUninit, +) -> Result> { + // We use `from_mut` despite not mutating via `c_ptr` so that we don't need + // to add a `T: Immutable` bound. + let c_ptr = Ptr::from_mut(&mut candidate); + // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from + // `candidate`, which the caller promises is entirely initialized. Since + // `candidate` is a `MaybeUninit`, it has no validity requirements, and so + // no values written to an `Initialized` `c_ptr` can violate its validity. + // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except + // via `c_ptr` so long as it is live, so we don't need to worry about the + // fact that `c_ptr` may have more restricted validity than `candidate`. + let c_ptr = unsafe { c_ptr.assume_validity::() }; + let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>(); + + // Since we don't have `T: KnownLayout`, we hack around that by using + // `Wrapping`, which implements `KnownLayout` even if `T` doesn't. + // + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to fix + // before returning. + if !Wrapping::::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) { + return Err(ValidityError::new(source).into()); + } + + fn _assert_same_size_and_validity() + where + Wrapping: pointer::TransmuteFrom, + T: pointer::TransmuteFrom, invariant::Valid, invariant::Valid>, + { + } + + _assert_same_size_and_validity::(); + + // SAFETY: We just validated that `candidate` contains a valid + // `Wrapping`, which has the same size and bit validity as `T`, as + // guaranteed by the preceding type assertion. + Ok(unsafe { candidate.assume_init() }) +} + +/// Types for which a sequence of `0` bytes is a valid instance. +/// +/// Any memory region of the appropriate length which is guaranteed to contain +/// only zero bytes can be viewed as any `FromZeros` type with no runtime +/// overhead. This is useful whenever memory is known to be in a zeroed state, +/// such memory returned from some allocation routines. +/// +/// # Warning: Padding bytes +/// +/// Note that, when a value is moved or copied, only the non-padding bytes of +/// that value are guaranteed to be preserved. It is unsound to assume that +/// values written to padding bytes are preserved after a move or copy. For more +/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes]. +/// +/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(FromZeros)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromZeros, Immutable}; +/// #[derive(FromZeros)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeros)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeros, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `FromZeros`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: FromZeros`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `FromZeros` manually, and you don't plan on writing unsafe code that +/// operates on `FromZeros` types, then you don't need to read this section.* +/// +/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a +/// `T` whose bytes are all initialized to zero. If a type is marked as +/// `FromZeros` which violates this contract, it may cause undefined behavior. +/// +/// `#[derive(FromZeros)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::FromZeros", + doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`") +)] +pub unsafe trait FromZeros: TryFromBytes { + // The `Self: Sized` bound makes it so that `FromZeros` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Overwrites `self` with zeros. + /// + /// Sets every byte in `self` to 0. While this is similar to doing `*self = + /// Self::new_zeroed()`, it differs in that `zero` does not semantically + /// drop the current value and replace it with a new one — it simply + /// modifies the bytes of the existing value. + /// + /// # Examples + /// + /// ``` + /// # use zerocopy::FromZeros; + /// # use zerocopy_derive::*; + /// # + /// #[derive(FromZeros)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let mut header = PacketHeader { + /// src_port: 100u16.to_be_bytes(), + /// dst_port: 200u16.to_be_bytes(), + /// length: 300u16.to_be_bytes(), + /// checksum: 400u16.to_be_bytes(), + /// }; + /// + /// header.zero(); + /// + /// assert_eq!(header.src_port, [0, 0]); + /// assert_eq!(header.dst_port, [0, 0]); + /// assert_eq!(header.length, [0, 0]); + /// assert_eq!(header.checksum, [0, 0]); + /// ``` + #[inline(always)] + fn zero(&mut self) { + let slf: *mut Self = self; + let len = mem::size_of_val(self); + // SAFETY: + // - `self` is guaranteed by the type system to be valid for writes of + // size `size_of_val(self)`. + // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned + // as required by `u8`. + // - Since `Self: FromZeros`, the all-zeros instance is a valid instance + // of `Self.` + // + // FIXME(#429): Add references to docs and quotes. + unsafe { ptr::write_bytes(slf.cast::(), 0, len) }; + } + + /// Creates an instance of `Self` from zeroed bytes. + /// + /// # Examples + /// + /// ``` + /// # use zerocopy::FromZeros; + /// # use zerocopy_derive::*; + /// # + /// #[derive(FromZeros)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header: PacketHeader = FromZeros::new_zeroed(); + /// + /// assert_eq!(header.src_port, [0, 0]); + /// assert_eq!(header.dst_port, [0, 0]); + /// assert_eq!(header.length, [0, 0]); + /// assert_eq!(header.checksum, [0, 0]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + fn new_zeroed() -> Self + where + Self: Sized, + { + // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal. + unsafe { mem::zeroed() } + } + + /// Creates a `Box` from zeroed bytes. + /// + /// This function is useful for allocating large values on the heap and + /// zero-initializing them, without ever creating a temporary instance of + /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()` + /// will allocate `[u8; 1048576]` directly on the heap; it does not require + /// storing `[u8; 1048576]` in a temporary variable on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may + /// have performance benefits. + /// + /// # Errors + /// + /// Returns an error on allocation failure. Allocation failure is guaranteed + /// never to cause a panic or an abort. + #[must_use = "has no side effects (other than allocation)"] + #[cfg(any(feature = "alloc", test))] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[inline] + fn new_box_zeroed() -> Result, AllocError> + where + Self: Sized, + { + // If `T` is a ZST, then return a proper boxed instance of it. There is + // no allocation, but `Box` does require a correct dangling pointer. + let layout = Layout::new::(); + if layout.size() == 0 { + // Construct the `Box` from a dangling pointer to avoid calling + // `Self::new_zeroed`. This ensures that stack space is never + // allocated for `Self` even on lower opt-levels where this branch + // might not get optimized out. + + // SAFETY: Per [1], when `T` is a ZST, `Box`'s only validity + // requirements are that the pointer is non-null and sufficiently + // aligned. Per [2], `NonNull::dangling` produces a pointer which + // is sufficiently aligned. Since the produced pointer is a + // `NonNull`, it is non-null. + // + // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout: + // + // For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned. + // + // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling: + // + // Creates a new `NonNull` that is dangling, but well-aligned. + return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) }); + } + + // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::() }; + if ptr.is_null() { + return Err(AllocError); + } + // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + Ok(unsafe { Box::from_raw(ptr) }) + } + + /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes. + /// + /// This function is useful for allocating large values of `[Self]` on the + /// heap and zero-initializing them, without ever creating a temporary + /// instance of `[Self; _]` on the stack. For example, + /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on + /// the heap; it does not require storing the slice on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance + /// benefits. + /// + /// If `Self` is a zero-sized type, then this function will return a + /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any + /// actual information, but its `len()` property will report the correct + /// value. + /// + /// # Errors + /// + /// Returns an error on allocation failure. Allocation failure is + /// guaranteed never to cause a panic or an abort. + #[must_use = "has no side effects (other than allocation)"] + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[inline] + fn new_box_zeroed_with_elems(count: usize) -> Result, AllocError> + where + Self: KnownLayout, + { + // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of + // `new_box`. The referent of the pointer returned by `alloc_zeroed` + // (and, consequently, the `Box` derived from it) is a valid instance of + // `Self`, because `Self` is `FromZeros`. + unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) } + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")] + #[doc(hidden)] + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[must_use = "has no side effects (other than allocation)"] + #[inline(always)] + fn new_box_slice_zeroed(len: usize) -> Result, AllocError> + where + Self: Sized, + { + <[Self]>::new_box_zeroed_with_elems(len) + } + + /// Creates a `Vec` from zeroed bytes. + /// + /// This function is useful for allocating large values of `Vec`s and + /// zero-initializing them, without ever creating a temporary instance of + /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For + /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the + /// heap; it does not require storing intermediate values on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits. + /// + /// If `Self` is a zero-sized type, then this function will return a + /// `Vec` that has the correct `len`. Such a `Vec` cannot contain any + /// actual information, but its `len()` property will report the correct + /// value. + /// + /// # Errors + /// + /// Returns an error on allocation failure. Allocation failure is + /// guaranteed never to cause a panic or an abort. + #[must_use = "has no side effects (other than allocation)"] + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[inline(always)] + fn new_vec_zeroed(len: usize) -> Result, AllocError> + where + Self: Sized, + { + <[Self]>::new_box_zeroed_with_elems(len).map(Into::into) + } + + /// Extends a `Vec` by pushing `additional` new items onto the end of + /// the vector. The new items are initialized with zeros. + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))] + #[inline(always)] + fn extend_vec_zeroed(v: &mut Vec, additional: usize) -> Result<(), AllocError> + where + Self: Sized, + { + // PANICS: We pass `v.len()` for `position`, so the `position > v.len()` + // panic condition is not satisfied. + ::insert_vec_zeroed(v, v.len(), additional) + } + + /// Inserts `additional` new items into `Vec` at `position`. The new + /// items are initialized with zeros. + /// + /// # Panics + /// + /// Panics if `position > v.len()`. + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))] + #[inline] + fn insert_vec_zeroed( + v: &mut Vec, + position: usize, + additional: usize, + ) -> Result<(), AllocError> + where + Self: Sized, + { + assert!(position <= v.len()); + // We only conditionally compile on versions on which `try_reserve` is + // stable; the Clippy lint is a false positive. + v.try_reserve(additional).map_err(|_| AllocError)?; + // SAFETY: The `try_reserve` call guarantees that these cannot overflow: + // * `ptr.add(position)` + // * `position + additional` + // * `v.len() + additional` + // + // `v.len() - position` cannot overflow because we asserted that + // `position <= v.len()`. + #[allow(clippy::multiple_unsafe_ops_per_block)] + unsafe { + // This is a potentially overlapping copy. + let ptr = v.as_mut_ptr(); + #[allow(clippy::arithmetic_side_effects)] + ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position); + ptr.add(position).write_bytes(0, additional); + #[allow(clippy::arithmetic_side_effects)] + v.set_len(v.len() + additional); + } + + Ok(()) + } +} + +/// Analyzes whether a type is [`FromBytes`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its +/// supertraits if it is sound to do so. This derive can be applied to structs, +/// enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable}; +/// #[derive(FromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, +/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, +/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, +/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, +/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, +/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, +/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, +/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, +/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, +/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, +/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, +/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, +/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, +/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, +/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, +/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, +/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, +/// # VFF, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromBytes, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@FromBytes#safety +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `FromBytes` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `FromBytes` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `FromBytes` for that type: +/// +/// - If the type is a struct, all of its fields must be `FromBytes`. +/// - If the type is an enum: +/// - It must have a defined representation which is one of `u8`, `u16`, `i8`, +/// or `i16`. +/// - The maximum number of discriminants must be used (so that every possible +/// bit pattern is a valid one). +/// - Its fields must be `FromBytes`. +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromBytes`, and must *not* rely on the +/// implementation details of this derive. +/// +/// ## Why isn't an explicit representation required for structs? +/// +/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires +/// that structs are marked with `#[repr(C)]`. +/// +/// Per the [Rust reference](reference), +/// +/// > The representation of a type can change the padding between fields, but +/// > does not change the layout of the fields themselves. +/// +/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations +/// +/// Since the layout of structs only consists of padding bytes and field bytes, +/// a struct is soundly `FromBytes` if: +/// 1. its padding is soundly `FromBytes`, and +/// 2. its fields are soundly `FromBytes`. +/// +/// The answer to the first question is always yes: padding bytes do not have +/// any validity constraints. A [discussion] of this question in the Unsafe Code +/// Guidelines Working Group concluded that it would be virtually unimaginable +/// for future versions of rustc to add validity constraints to padding bytes. +/// +/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 +/// +/// Whether a struct is soundly `FromBytes` therefore solely depends on whether +/// its fields are `FromBytes`. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::FromBytes; + +/// Types for which any bit pattern is valid. +/// +/// Any memory region of the appropriate length which contains initialized bytes +/// can be viewed as any `FromBytes` type with no runtime overhead. This is +/// useful for efficiently parsing bytes as structured data. +/// +/// # Warning: Padding bytes +/// +/// Note that, when a value is moved or copied, only the non-padding bytes of +/// that value are guaranteed to be preserved. It is unsound to assume that +/// values written to padding bytes are preserved after a move or copy. For +/// example, the following is unsound: +/// +/// ```rust,no_run +/// use core::mem::{size_of, transmute}; +/// use zerocopy::FromZeros; +/// # use zerocopy_derive::*; +/// +/// // Assume `Foo` is a type with padding bytes. +/// #[derive(FromZeros, Default)] +/// struct Foo { +/// # /* +/// ... +/// # */ +/// } +/// +/// let mut foo: Foo = Foo::default(); +/// FromZeros::zero(&mut foo); +/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`, +/// // those writes are not guaranteed to be preserved in padding bytes when +/// // `foo` is moved, so this may expose padding bytes as `u8`s. +/// let foo_bytes: [u8; size_of::()] = unsafe { transmute(foo) }; +/// ``` +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(FromBytes)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromBytes, Immutable}; +/// #[derive(FromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, +/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, +/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, +/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, +/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, +/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, +/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, +/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, +/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, +/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, +/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, +/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, +/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, +/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, +/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, +/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, +/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, +/// # VFF, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromBytes, Immutable)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `FromBytes`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: FromBytes`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `FromBytes` manually, and you don't plan on writing unsafe code that +/// operates on `FromBytes` types, then you don't need to read this section.* +/// +/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a +/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other +/// words, any byte value which is not uninitialized). If a type is marked as +/// `FromBytes` which violates this contract, it may cause undefined behavior. +/// +/// `#[derive(FromBytes)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::FromBytes", + doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`") +)] +pub unsafe trait FromBytes: FromZeros { + // The `Self: Sized` bound makes it so that `FromBytes` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Interprets the given `source` as a `&Self`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self`. If the length of `source` is not a [valid size of + /// `Self`][valid-size], or if `source` is not appropriately aligned, this + /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can + /// [infallibly discard the alignment error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// #[derive(FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// header: PacketHeader, + /// body: [u8], + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..]; + /// + /// let packet = Packet::ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.header.src_port, [0, 1]); + /// assert_eq!(packet.header.dst_port, [2, 3]); + /// assert_eq!(packet.header.length, [4, 5]); + /// assert_eq!(packet.header.checksum, [6, 7]); + /// assert_eq!(packet.body, [8, 9, 10, 11]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + static_assert_dst_is_not_zst!(Self); + match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) { + Ok(ptr) => Ok(ptr.recall_validity().as_ref()), + Err(err) => Err(err.map_src(|src| src.as_ref())), + } + } + + /// Interprets the prefix of the given `source` as a `&Self` without + /// copying. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the leading bytes of `source`, then attempts to return + /// both a reference to those bytes interpreted as a `Self`, and a reference + /// to the remaining bytes. If there are insufficient bytes, or if `source` + /// is not appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does + /// support such types. Attempting to use this method on such types results + /// in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// #[derive(FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// header: PacketHeader, + /// body: [[u8; 2]], + /// } + /// + /// // These are more bytes than are needed to encode a `Packet`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..]; + /// + /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(packet.header.src_port, [0, 1]); + /// assert_eq!(packet.header.dst_port, [2, 3]); + /// assert_eq!(packet.header.length, [4, 5]); + /// assert_eq!(packet.header.checksum, [6, 7]); + /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]); + /// assert_eq!(suffix, &[14u8][..]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + static_assert_dst_is_not_zst!(Self); + ref_from_prefix_suffix(source, None, CastType::Prefix) + } + + /// Interprets the suffix of the given bytes as a `&Self`. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the trailing bytes of `source`, then attempts to return + /// both a reference to those bytes interpreted as a `Self`, and a reference + /// to the preceding bytes. If there are insufficient bytes, or if that + /// suffix of `source` is not appropriately aligned, this returns `Err`. If + /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does + /// support such types. Attempting to use this method on such types results + /// in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // ⚠ Compile Error! + /// ``` + /// + /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]); + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>> + where + Self: Immutable + KnownLayout, + { + static_assert_dst_is_not_zst!(Self); + ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap) + } + + /// Interprets the given `source` as a `&mut Self`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self`. If the length of `source` is not a [valid size of + /// `Self`][valid-size], or if `source` is not appropriately aligned, this + /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can + /// [infallibly discard the alignment error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does + /// support such types. Attempting to use this method on such types results + /// in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These bytes encode a `PacketHeader`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let header = PacketHeader::mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// + /// header.checksum = [0, 0]; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout, + { + static_assert_dst_is_not_zst!(Self); + match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) { + Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()), + Err(err) => Err(err.map_src(|src| src.as_mut())), + } + } + + /// Interprets the prefix of the given `source` as a `&mut Self` without + /// copying. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the leading bytes of `source`, then attempts to return + /// both a reference to those bytes interpreted as a `Self`, and a reference + /// to the remaining bytes. If there are insufficient bytes, or if `source` + /// is not appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does + /// support such types. Attempting to use this method on such types results + /// in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketHeader`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// assert_eq!(body, &[8, 9][..]); + /// + /// header.checksum = [0, 0]; + /// body.fill(1); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn mut_from_prefix( + source: &mut [u8], + ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout, + { + static_assert_dst_is_not_zst!(Self); + mut_from_prefix_suffix(source, None, CastType::Prefix) + } + + /// Interprets the suffix of the given `source` as a `&mut Self` without + /// copying. + /// + /// This method computes the [largest possible size of `Self`][valid-size] + /// that can fit in the trailing bytes of `source`, then attempts to return + /// both a reference to those bytes interpreted as a `Self`, and a reference + /// to the preceding bytes. If there are insufficient bytes, or if that + /// suffix of `source` is not appropriately aligned, this returns `Err`. If + /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][size-error-from]. + /// + /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let mut source = [85, 85]; + /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // ⚠ Compile Error! + /// ``` + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]); + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// + /// prefix.fill(0); + /// trailer.frame_check_sequence.fill(1); + /// + /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn mut_from_suffix( + source: &mut [u8], + ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout, + { + static_assert_dst_is_not_zst!(Self); + mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap) + } + + /// Interprets the given `source` as a `&Self` with a DST length equal to + /// `count`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self` with `count` trailing elements. If the length of `source` is not + /// equal to the size of `Self` with `count` elements, or if `source` is not + /// appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &[85, 85][..]; + /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`ref_from_bytes`]: FromBytes::ref_from_bytes + #[must_use = "has no side effects"] + #[inline] + fn ref_from_bytes_with_elems( + source: &[u8], + count: usize, + ) -> Result<&Self, CastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + let source = Ptr::from_ref(source); + let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count)); + match maybe_slf { + Ok(slf) => Ok(slf.recall_validity().as_ref()), + Err(err) => Err(err.map_src(|s| s.as_ref())), + } + } + + /// Interprets the prefix of the given `source` as a DST `&Self` with length + /// equal to `count`. + /// + /// This method attempts to return a reference to the prefix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the remaining bytes. If there are insufficient bytes, or if `source` + /// is not appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// assert_eq!(suffix, &[8, 9]); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &[85, 85][..]; + /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`ref_from_prefix`]: FromBytes::ref_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn ref_from_prefix_with_elems( + source: &[u8], + count: usize, + ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + ref_from_prefix_suffix(source, Some(count), CastType::Prefix) + } + + /// Interprets the suffix of the given `source` as a DST `&Self` with length + /// equal to `count`. + /// + /// This method attempts to return a reference to the suffix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the preceding bytes. If there are insufficient bytes, or if that + /// suffix of `source` is not appropriately aligned, this returns `Err`. If + /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(prefix, &[0, 1]); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 2, g: 3, b: 4, a: 5 }, + /// Pixel { r: 6, g: 7, b: 8, a: 9 }, + /// ]); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &[85, 85][..]; + /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`ref_from_suffix`]: FromBytes::ref_from_suffix + #[must_use = "has no side effects"] + #[inline] + fn ref_from_suffix_with_elems( + source: &[u8], + count: usize, + ) -> Result<(&[u8], &Self), CastError<&[u8], Self>> + where + Self: KnownLayout + Immutable, + { + ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap) + } + + /// Interprets the given `source` as a `&mut Self` with a DST length equal + /// to `count`. + /// + /// This method attempts to return a reference to `source` interpreted as a + /// `Self` with `count` trailing elements. If the length of `source` is not + /// equal to the size of `Self` with `count` elements, or if `source` is not + /// appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &mut [85, 85][..]; + /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`mut_from_bytes`]: FromBytes::mut_from_bytes + #[must_use = "has no side effects"] + #[inline] + fn mut_from_bytes_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<&mut Self, CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout + Immutable, + { + let source = Ptr::from_mut(source); + let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count)); + match maybe_slf { + Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()), + Err(err) => Err(err.map_src(|s| s.as_mut())), + } + } + + /// Interprets the prefix of the given `source` as a `&mut Self` with DST + /// length equal to `count`. + /// + /// This method attempts to return a reference to the prefix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the preceding bytes. If there are insufficient bytes, or if `source` + /// is not appropriately aligned, this returns `Err`. If [`Self: + /// Unaligned`][self-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// assert_eq!(suffix, &[8, 9]); + /// + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// suffix.fill(1); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &mut [85, 85][..]; + /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`mut_from_prefix`]: FromBytes::mut_from_prefix + #[must_use = "has no side effects"] + #[inline] + fn mut_from_prefix_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout, + { + mut_from_prefix_suffix(source, Some(count), CastType::Prefix) + } + + /// Interprets the suffix of the given `source` as a `&mut Self` with DST + /// length equal to `count`. + /// + /// This method attempts to return a reference to the suffix of `source` + /// interpreted as a `Self` with `count` trailing elements, and a reference + /// to the remaining bytes. If there are insufficient bytes, or if that + /// suffix of `source` is not appropriately aligned, this returns `Err`. If + /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the + /// alignment error][size-error-from]. + /// + /// [self-unaligned]: Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromBytes, IntoBytes, Immutable)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap(); + /// + /// assert_eq!(prefix, &[0, 1]); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 2, g: 3, b: 4, a: 5 }, + /// Pixel { r: 6, g: 7, b: 8, a: 9 }, + /// ]); + /// + /// prefix.fill(9); + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// + /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]); + /// ``` + /// + /// Since an explicit `count` is provided, this method supports types with + /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`] + /// which do not take an explicit count do not support such types. + /// + /// ``` + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + /// #[repr(C, packed)] + /// struct ZSTy { + /// leading_sized: [u8; 2], + /// trailing_dst: [()], + /// } + /// + /// let src = &mut [85, 85][..]; + /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap(); + /// assert_eq!(zsty.trailing_dst.len(), 42); + /// ``` + /// + /// [`mut_from_suffix`]: FromBytes::mut_from_suffix + #[must_use = "has no side effects"] + #[inline] + fn mut_from_suffix_with_elems( + source: &mut [u8], + count: usize, + ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>> + where + Self: IntoBytes + KnownLayout, + { + mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap) + } + + /// Reads a copy of `Self` from the given `source`. + /// + /// If `source.len() != size_of::()`, `read_from_bytes` returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These bytes encode a `PacketHeader`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let header = PacketHeader::read_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn read_from_bytes(source: &[u8]) -> Result> + where + Self: Sized, + { + match Ref::<_, Unalign>::sized_from(source) { + Ok(r) => Ok(Ref::read(&r).into_inner()), + Err(CastError::Size(e)) => Err(e.with_dst()), + Err(CastError::Alignment(_)) => { + // SAFETY: `Unalign` is trivially aligned, so + // `Ref::sized_from` cannot fail due to unmet alignment + // requirements. + unsafe { core::hint::unreachable_unchecked() } + } + Err(CastError::Validity(i)) => match i {}, + } + } + + /// Reads a copy of `Self` from the prefix of the given `source`. + /// + /// This attempts to read a `Self` from the first `size_of::()` bytes + /// of `source`, returning that `Self` and any remaining bytes. If + /// `source.len() < size_of::()`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketHeader`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// assert_eq!(body, [8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>> + where + Self: Sized, + { + match Ref::<_, Unalign>::sized_from_prefix(source) { + Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)), + Err(CastError::Size(e)) => Err(e.with_dst()), + Err(CastError::Alignment(_)) => { + // SAFETY: `Unalign` is trivially aligned, so + // `Ref::sized_from_prefix` cannot fail due to unmet alignment + // requirements. + unsafe { core::hint::unreachable_unchecked() } + } + Err(CastError::Validity(i)) => match i {}, + } + } + + /// Reads a copy of `Self` from the suffix of the given `source`. + /// + /// This attempts to read a `Self` from the last `size_of::()` bytes + /// of `source`, returning that `Self` and any preceding bytes. If + /// `source.len() < size_of::()`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]); + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline] + fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>> + where + Self: Sized, + { + match Ref::<_, Unalign>::sized_from_suffix(source) { + Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())), + Err(CastError::Size(e)) => Err(e.with_dst()), + Err(CastError::Alignment(_)) => { + // SAFETY: `Unalign` is trivially aligned, so + // `Ref::sized_from_suffix` cannot fail due to unmet alignment + // requirements. + unsafe { core::hint::unreachable_unchecked() } + } + Err(CastError::Validity(i)) => match i {}, + } + } + + /// Reads a copy of `self` from an `io::Read`. + /// + /// This is useful for interfacing with operating system byte sinks (files, + /// sockets, etc.). + /// + /// # Examples + /// + /// ```no_run + /// use zerocopy::{byteorder::big_endian::*, FromBytes}; + /// use std::fs::File; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes)] + /// #[repr(C)] + /// struct BitmapFileHeader { + /// signature: [u8; 2], + /// size: U32, + /// reserved: U64, + /// offset: U64, + /// } + /// + /// let mut file = File::open("image.bin").unwrap(); + /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap(); + /// ``` + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + #[inline(always)] + fn read_from_io(mut src: R) -> io::Result + where + Self: Sized, + R: io::Read, + { + // NOTE(#2319, #2320): We do `buf.zero()` separately rather than + // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self` + // contains padding bytes, then a typed copy of `CoreMaybeUninit` + // will not necessarily preserve zeros written to those padding byte + // locations, and so `buf` could contain uninitialized bytes. + let mut buf = CoreMaybeUninit::::uninit(); + buf.zero(); + + let ptr = Ptr::from_mut(&mut buf); + // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized, + // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr` + // cannot be used to write values which will violate `buf`'s bit + // validity. Since `ptr` has `Exclusive` aliasing, nothing other than + // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity + // cannot be violated even though `buf` may have more permissive bit + // validity than `ptr`. + let ptr = unsafe { ptr.assume_validity::() }; + let ptr = ptr.as_bytes(); + src.read_exact(ptr.as_mut())?; + // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is + // `FromBytes`. + Ok(unsafe { buf.assume_init() }) + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn ref_from(source: &[u8]) -> Option<&Self> + where + Self: KnownLayout + Immutable, + { + Self::ref_from_bytes(source).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn mut_from(source: &mut [u8]) -> Option<&mut Self> + where + Self: KnownLayout + IntoBytes, + { + Self::mut_from_bytes(source).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])> + where + Self: Sized + Immutable, + { + <[Self]>::ref_from_prefix_with_elems(source, count).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])> + where + Self: Sized + Immutable, + { + <[Self]>::ref_from_suffix_with_elems(source, count).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])> + where + Self: Sized + IntoBytes, + { + <[Self]>::mut_from_prefix_with_elems(source, count).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])> + where + Self: Sized + IntoBytes, + { + <[Self]>::mut_from_suffix_with_elems(source, count).ok() + } + + #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")] + #[doc(hidden)] + #[must_use = "has no side effects"] + #[inline(always)] + fn read_from(source: &[u8]) -> Option + where + Self: Sized, + { + Self::read_from_bytes(source).ok() + } +} + +/// Interprets the given affix of the given bytes as a `&Self`. +/// +/// This method computes the largest possible size of `Self` that can fit in the +/// prefix or suffix bytes of `source`, then attempts to return both a reference +/// to those bytes interpreted as a `Self`, and a reference to the excess bytes. +/// If there are insufficient bytes, or if that affix of `source` is not +/// appropriately aligned, this returns `Err`. +#[inline(always)] +fn ref_from_prefix_suffix( + source: &[u8], + meta: Option, + cast_type: CastType, +) -> Result<(&T, &[u8]), CastError<&[u8], T>> { + let (slf, prefix_suffix) = Ptr::from_ref(source) + .try_cast_into::<_, BecauseImmutable>(cast_type, meta) + .map_err(|err| err.map_src(|s| s.as_ref()))?; + Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref())) +} + +/// Interprets the given affix of the given bytes as a `&mut Self` without +/// copying. +/// +/// This method computes the largest possible size of `Self` that can fit in the +/// prefix or suffix bytes of `source`, then attempts to return both a reference +/// to those bytes interpreted as a `Self`, and a reference to the excess bytes. +/// If there are insufficient bytes, or if that affix of `source` is not +/// appropriately aligned, this returns `Err`. +#[inline(always)] +fn mut_from_prefix_suffix( + source: &mut [u8], + meta: Option, + cast_type: CastType, +) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> { + let (slf, prefix_suffix) = Ptr::from_mut(source) + .try_cast_into::<_, BecauseExclusive>(cast_type, meta) + .map_err(|err| err.map_src(|s| s.as_mut()))?; + Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut())) +} + +/// Analyzes whether a type is [`IntoBytes`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is +/// sound to do so. This derive can be applied to structs and enums (see below +/// for union support); e.g.: +/// +/// ``` +/// # use zerocopy_derive::{IntoBytes}; +/// #[derive(IntoBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(IntoBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@IntoBytes#safety +/// +/// # Error Messages +/// +/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive +/// for `IntoBytes` is implemented, you may get an error like this: +/// +/// ```text +/// error[E0277]: the trait bound `(): PaddingFree` is not satisfied +/// --> lib.rs:23:10 +/// | +/// 1 | #[derive(IntoBytes)] +/// | ^^^^^^^^^ the trait `PaddingFree` is not implemented for `()` +/// | +/// = help: the following implementations were found: +/// <() as PaddingFree> +/// ``` +/// +/// This error indicates that the type being annotated has padding bytes, which +/// is illegal for `IntoBytes` types. Consider reducing the alignment of some +/// fields by using types in the [`byteorder`] module, wrapping field types in +/// [`Unalign`], adding explicit struct fields where those padding bytes would +/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type +/// layout] for more information about type layout and padding. +/// +/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html +/// +/// # Unions +/// +/// Currently, union bit validity is [up in the air][union-validity], and so +/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default. +/// However, implementing `IntoBytes` on a union type is likely sound on all +/// existing Rust toolchains - it's just that it may become unsound in the +/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by +/// passing the unstable `zerocopy_derive_union_into_bytes` cfg: +/// +/// ```shell +/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build +/// ``` +/// +/// However, it is your responsibility to ensure that this derive is sound on +/// the specific versions of the Rust toolchain you are using! We make no +/// stability or soundness guarantees regarding this cfg, and may remove it at +/// any point. +/// +/// We are actively working with Rust to stabilize the necessary language +/// guarantees to support this in a forwards-compatible way, which will enable +/// us to remove the cfg gate. As part of this effort, we need to know how much +/// demand there is for this feature. If you would like to use `IntoBytes` on +/// unions, [please let us know][discussion]. +/// +/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438 +/// [discussion]: https://github.com/google/zerocopy/discussions/1802 +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `IntoBytes` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `IntoBytes` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `IntoBytes` for that type: +/// +/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally: +/// - if the type is `repr(transparent)` or `repr(packed)`, it is +/// [`IntoBytes`] if its fields are [`IntoBytes`]; else, +/// - if the type is `repr(C)` with at most one field, it is [`IntoBytes`] +/// if its field is [`IntoBytes`]; else, +/// - if the type has no generic parameters, it is [`IntoBytes`] if the type +/// is sized and has no padding bytes; else, +/// - if the type is `repr(C)`, its fields must be [`Unaligned`]. +/// - If the type is an enum: +/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, +/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). +/// - It must have no padding bytes. +/// - Its fields must be [`IntoBytes`]. +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromBytes`, and must *not* rely on the +/// implementation details of this derive. +/// +/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::IntoBytes; + +/// Types that can be converted to an immutable slice of initialized bytes. +/// +/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the +/// same size. This is useful for efficiently serializing structured data as raw +/// bytes. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(IntoBytes)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::IntoBytes; +/// #[derive(IntoBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(IntoBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `IntoBytes`. See the [derive +/// documentation][derive] for guidance on how to interpret error messages +/// produced by the derive's analysis. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: IntoBytes`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `IntoBytes` manually, and you don't plan on writing unsafe code that +/// operates on `IntoBytes` types, then you don't need to read this section.* +/// +/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any +/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is +/// marked as `IntoBytes` which violates this contract, it may cause undefined +/// behavior. +/// +/// `#[derive(IntoBytes)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::IntoBytes", + doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`") +)] +pub unsafe trait IntoBytes { + // The `Self: Sized` bound makes it so that this function doesn't prevent + // `IntoBytes` from being object safe. Note that other `IntoBytes` methods + // prevent object safety, but those provide a benefit in exchange for object + // safety. If at some point we remove those methods, change their type + // signatures, or move them out of this trait so that `IntoBytes` is object + // safe again, it's important that this function not prevent object safety. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Gets the bytes of this value. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::IntoBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(IntoBytes, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let bytes = header.as_bytes(); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + fn as_bytes(&self) -> &[u8] + where + Self: Immutable, + { + // Note that this method does not have a `Self: Sized` bound; + // `size_of_val` works for unsized values too. + let len = mem::size_of_val(self); + let slf: *const Self = self; + + // SAFETY: + // - `slf.cast::()` is valid for reads for `len * size_of::()` + // many bytes because... + // - `slf` is the same pointer as `self`, and `self` is a reference + // which points to an object whose size is `len`. Thus... + // - The entire region of `len` bytes starting at `slf` is contained + // within a single allocation. + // - `slf` is non-null. + // - `slf` is trivially aligned to `align_of::() == 1`. + // - `Self: IntoBytes` ensures that all of the bytes of `slf` are + // initialized. + // - Since `slf` is derived from `self`, and `self` is an immutable + // reference, the only other references to this memory region that + // could exist are other immutable references, which by `Self: + // Immutable` don't permit mutation. + // - The total size of the resulting slice is no larger than + // `isize::MAX` because no allocation produced by safe code can be + // larger than `isize::MAX`. + // + // FIXME(#429): Add references to docs and quotes. + unsafe { slice::from_raw_parts(slf.cast::(), len) } + } + + /// Gets the bytes of this value mutably. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::IntoBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Eq, PartialEq, Debug)] + /// #[derive(FromBytes, IntoBytes, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let mut header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let bytes = header.as_mut_bytes(); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// + /// bytes.reverse(); + /// + /// assert_eq!(header, PacketHeader { + /// src_port: [7, 6], + /// dst_port: [5, 4], + /// length: [3, 2], + /// checksum: [1, 0], + /// }); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + fn as_mut_bytes(&mut self) -> &mut [u8] + where + Self: FromBytes, + { + // Note that this method does not have a `Self: Sized` bound; + // `size_of_val` works for unsized values too. + let len = mem::size_of_val(self); + let slf: *mut Self = self; + + // SAFETY: + // - `slf.cast::()` is valid for reads and writes for `len * + // size_of::()` many bytes because... + // - `slf` is the same pointer as `self`, and `self` is a reference + // which points to an object whose size is `len`. Thus... + // - The entire region of `len` bytes starting at `slf` is contained + // within a single allocation. + // - `slf` is non-null. + // - `slf` is trivially aligned to `align_of::() == 1`. + // - `Self: IntoBytes` ensures that all of the bytes of `slf` are + // initialized. + // - `Self: FromBytes` ensures that no write to this memory region + // could result in it containing an invalid `Self`. + // - Since `slf` is derived from `self`, and `self` is a mutable + // reference, no other references to this memory region can exist. + // - The total size of the resulting slice is no larger than + // `isize::MAX` because no allocation produced by safe code can be + // larger than `isize::MAX`. + // + // FIXME(#429): Add references to docs and quotes. + unsafe { slice::from_raw_parts_mut(slf.cast::(), len) } + } + + /// Writes a copy of `self` to `dst`. + /// + /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::IntoBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(IntoBytes, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + /// + /// If too many or too few target bytes are provided, `write_to` returns + /// `Err` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::IntoBytes; + /// # let header = u128::MAX; + /// let mut excessive_bytes = &mut [0u8; 128][..]; + /// + /// let write_result = header.write_to(excessive_bytes); + /// + /// assert!(write_result.is_err()); + /// assert_eq!(excessive_bytes, [0u8; 128]); + /// ``` + #[must_use = "callers should check the return value to see if the operation succeeded"] + #[inline] + #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]` + fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> + where + Self: Immutable, + { + let src = self.as_bytes(); + if dst.len() == src.len() { + // SAFETY: Within this branch of the conditional, we have ensured + // that `dst.len()` is equal to `src.len()`. Neither the size of the + // source nor the size of the destination change between the above + // size check and the invocation of `copy_unchecked`. + unsafe { util::copy_unchecked(src, dst) } + Ok(()) + } else { + Err(SizeError::new(self)) + } + } + + /// Writes a copy of `self` to the prefix of `dst`. + /// + /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes + /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::IntoBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(IntoBytes, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to_prefix(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]); + /// ``` + /// + /// If insufficient target bytes are provided, `write_to_prefix` returns + /// `Err` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::IntoBytes; + /// # let header = u128::MAX; + /// let mut insufficient_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficient_bytes); + /// + /// assert!(write_result.is_err()); + /// assert_eq!(insufficient_bytes, [0, 0]); + /// ``` + #[must_use = "callers should check the return value to see if the operation succeeded"] + #[inline] + #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]` + fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> + where + Self: Immutable, + { + let src = self.as_bytes(); + match dst.get_mut(..src.len()) { + Some(dst) => { + // SAFETY: Within this branch of the `match`, we have ensured + // through fallible subslicing that `dst.len()` is equal to + // `src.len()`. Neither the size of the source nor the size of + // the destination change between the above subslicing operation + // and the invocation of `copy_unchecked`. + unsafe { util::copy_unchecked(src, dst) } + Ok(()) + } + None => Err(SizeError::new(self)), + } + } + + /// Writes a copy of `self` to the suffix of `dst`. + /// + /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of + /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::IntoBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(IntoBytes, Immutable)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to_suffix(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); + /// + /// let mut insufficient_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficient_bytes); + /// + /// assert!(write_result.is_err()); + /// assert_eq!(insufficient_bytes, [0, 0]); + /// ``` + /// + /// If insufficient target bytes are provided, `write_to_suffix` returns + /// `Err` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::IntoBytes; + /// # let header = u128::MAX; + /// let mut insufficient_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficient_bytes); + /// + /// assert!(write_result.is_err()); + /// assert_eq!(insufficient_bytes, [0, 0]); + /// ``` + #[must_use = "callers should check the return value to see if the operation succeeded"] + #[inline] + #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]` + fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> + where + Self: Immutable, + { + let src = self.as_bytes(); + let start = if let Some(start) = dst.len().checked_sub(src.len()) { + start + } else { + return Err(SizeError::new(self)); + }; + let dst = if let Some(dst) = dst.get_mut(start..) { + dst + } else { + // get_mut() should never return None here. We return a `SizeError` + // rather than .unwrap() because in the event the branch is not + // optimized away, returning a value is generally lighter-weight + // than panicking. + return Err(SizeError::new(self)); + }; + // SAFETY: Through fallible subslicing of `dst`, we have ensured that + // `dst.len()` is equal to `src.len()`. Neither the size of the source + // nor the size of the destination change between the above subslicing + // operation and the invocation of `copy_unchecked`. + unsafe { + util::copy_unchecked(src, dst); + } + Ok(()) + } + + /// Writes a copy of `self` to an `io::Write`. + /// + /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful + /// for interfacing with operating system byte sinks (files, sockets, etc.). + /// + /// # Examples + /// + /// ```no_run + /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes}; + /// use std::fs::File; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + /// #[repr(C, packed)] + /// struct GrayscaleImage { + /// height: U16, + /// width: U16, + /// pixels: [U16], + /// } + /// + /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap(); + /// let mut file = File::create("image.bin").unwrap(); + /// image.write_to_io(&mut file).unwrap(); + /// ``` + /// + /// If the write fails, `write_to_io` returns `Err` and a partial write may + /// have occurred; e.g.: + /// + /// ``` + /// # use zerocopy::IntoBytes; + /// + /// let src = u128::MAX; + /// let mut dst = [0u8; 2]; + /// + /// let write_result = src.write_to_io(&mut dst[..]); + /// + /// assert!(write_result.is_err()); + /// assert_eq!(dst, [255, 255]); + /// ``` + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + #[inline(always)] + fn write_to_io(&self, mut dst: W) -> io::Result<()> + where + Self: Immutable, + W: io::Write, + { + dst.write_all(self.as_bytes()) + } + + #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")] + #[doc(hidden)] + #[inline] + fn as_bytes_mut(&mut self) -> &mut [u8] + where + Self: FromBytes, + { + self.as_mut_bytes() + } +} + +/// Analyzes whether a type is [`Unaligned`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is +/// sound to do so. This derive can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::Unaligned; +/// #[derive(Unaligned)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Unaligned)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Unaligned)] +/// #[repr(packed)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `Unaligned` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `Unaligned` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `Unaligned` for that type: +/// +/// - If the type is a struct or union: +/// - If `repr(align(N))` is provided, `N` must equal 1. +/// - If the type is `repr(C)` or `repr(transparent)`, all fields must be +/// [`Unaligned`]. +/// - If the type is not `repr(C)` or `repr(transparent)`, it must be +/// `repr(packed)` or `repr(packed(1))`. +/// - If the type is an enum: +/// - If `repr(align(N))` is provided, `N` must equal 1. +/// - It must be a field-less enum (meaning that all variants have no fields). +/// - It must be `repr(i8)` or `repr(u8)`. +/// +/// [safety conditions]: trait@Unaligned#safety +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::Unaligned; + +/// Types with no alignment requirement. +/// +/// If `T: Unaligned`, then `align_of::() == 1`. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(Unaligned)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::Unaligned; +/// #[derive(Unaligned)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Unaligned)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(Unaligned)] +/// #[repr(packed)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `Unaligned`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: Unaligned`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `Unaligned` manually, and you don't plan on writing unsafe code that +/// operates on `Unaligned` types, then you don't need to read this section.* +/// +/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a +/// reference to `T` at any memory location regardless of alignment. If a type +/// is marked as `Unaligned` which violates this contract, it may cause +/// undefined behavior. +/// +/// `#[derive(Unaligned)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::Unaligned", + doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`") +)] +pub unsafe trait Unaligned { + // The `Self: Sized` bound makes it so that `Unaligned` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; +} + +/// Derives optimized [`PartialEq`] and [`Eq`] implementations. +/// +/// This derive can be applied to structs and enums implementing both +/// [`Immutable`] and [`IntoBytes`]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes}; +/// #[derive(ByteEq, Immutable, IntoBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(ByteEq, Immutable, IntoBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes +/// equality by individually comparing each field. Instead, the implementation +/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of +/// `self` and `other` to byte slices and compares those slices for equality. +/// This may have performance advantages. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::ByteEq; +/// Derives an optimized [`Hash`] implementation. +/// +/// This derive can be applied to structs and enums implementing both +/// [`Immutable`] and [`IntoBytes`]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes}; +/// #[derive(ByteHash, Immutable, IntoBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(ByteHash, Immutable, IntoBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by +/// individually hashing each field and combining the results. Instead, the +/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by +/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes +/// it in a single call to [`Hasher::write()`]. This may have performance +/// advantages. +/// +/// [`Hash`]: core::hash::Hash +/// [`Hash::hash()`]: core::hash::Hash::hash() +/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice() +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::ByteHash; +/// Implements [`SplitAt`]. +/// +/// This derive can be applied to structs; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes}; +/// #[derive(ByteEq, Immutable, IntoBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// ``` +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::SplitAt; + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] +mod alloc_support { + use super::*; + + /// Extends a `Vec` by pushing `additional` new items onto the end of the + /// vector. The new items are initialized with zeros. + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[doc(hidden)] + #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")] + #[inline(always)] + pub fn extend_vec_zeroed( + v: &mut Vec, + additional: usize, + ) -> Result<(), AllocError> { + ::extend_vec_zeroed(v, additional) + } + + /// Inserts `additional` new items into `Vec` at `position`. The new + /// items are initialized with zeros. + /// + /// # Panics + /// + /// Panics if `position > v.len()`. + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[doc(hidden)] + #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")] + #[inline(always)] + pub fn insert_vec_zeroed( + v: &mut Vec, + position: usize, + additional: usize, + ) -> Result<(), AllocError> { + ::insert_vec_zeroed(v, position, additional) + } +} + +#[cfg(feature = "alloc")] +#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] +#[doc(hidden)] +pub use alloc_support::*; + +#[cfg(test)] +#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)] +mod tests { + use static_assertions::assert_impl_all; + + use super::*; + use crate::util::testutil::*; + + // An unsized type. + // + // This is used to test the custom derives of our traits. The `[u8]` type + // gets a hand-rolled impl, so it doesn't exercise our custom derives. + #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)] + #[repr(transparent)] + struct Unsized([u8]); + + impl Unsized { + fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized { + // SAFETY: This *probably* sound - since the layouts of `[u8]` and + // `Unsized` are the same, so are the layouts of `&mut [u8]` and + // `&mut Unsized`. [1] Even if it turns out that this isn't actually + // guaranteed by the language spec, we can just change this since + // it's in test code. + // + // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375 + unsafe { mem::transmute(slc) } + } + } + + #[test] + fn test_known_layout() { + // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout. + // Test that `PhantomData<$ty>` has the same layout as `()` regardless + // of `$ty`. + macro_rules! test { + ($ty:ty, $expect:expr) => { + let expect = $expect; + assert_eq!(<$ty as KnownLayout>::LAYOUT, expect); + assert_eq!( as KnownLayout>::LAYOUT, expect); + assert_eq!( as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT); + }; + } + + let layout = + |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: match trailing_slice_elem_size { + None => SizeInfo::Sized { size: offset }, + Some(elem_size) => { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) + } + }, + statically_shallow_unpadded, + }; + + test!((), layout(0, 1, None, false)); + test!(u8, layout(1, 1, None, false)); + // Use `align_of` because `u64` alignment may be smaller than 8 on some + // platforms. + test!(u64, layout(8, mem::align_of::(), None, false)); + test!(AU64, layout(8, 8, None, false)); + + test!(Option<&'static ()>, usize::LAYOUT); + + test!([()], layout(0, 1, Some(0), true)); + test!([u8], layout(0, 1, Some(1), true)); + test!(str, layout(0, 1, Some(1), true)); + } + + #[cfg(feature = "derive")] + #[test] + fn test_known_layout_derive() { + // In this and other files (`late_compile_pass.rs`, + // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure + // modes of `derive(KnownLayout)` for the following combination of + // properties: + // + // +------------+--------------------------------------+-----------+ + // | | trailing field properties | | + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // |------------+----------+----------------+----------+-----------| + // | N | N | N | N | KL00 | + // | N | N | N | Y | KL01 | + // | N | N | Y | N | KL02 | + // | N | N | Y | Y | KL03 | + // | N | Y | N | N | KL04 | + // | N | Y | N | Y | KL05 | + // | N | Y | Y | N | KL06 | + // | N | Y | Y | Y | KL07 | + // | Y | N | N | N | KL08 | + // | Y | N | N | Y | KL09 | + // | Y | N | Y | N | KL10 | + // | Y | N | Y | Y | KL11 | + // | Y | Y | N | N | KL12 | + // | Y | Y | N | Y | KL13 | + // | Y | Y | Y | N | KL14 | + // | Y | Y | Y | Y | KL15 | + // +------------+----------+----------------+----------+-----------+ + + struct NotKnownLayout { + _t: T, + } + + #[derive(KnownLayout)] + #[repr(C)] + struct AlignSize + where + elain::Align: elain::Alignment, + { + _align: elain::Align, + size: [u8; SIZE], + } + + type AU16 = AlignSize<2, 2>; + type AU32 = AlignSize<4, 4>; + + fn _assert_kl(_: &T) {} + + let sized_layout = |align, size| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::Sized { size }, + statically_shallow_unpadded: false, + }; + + let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), + statically_shallow_unpadded, + }; + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | N | N | Y | KL01 | + #[allow(dead_code)] + #[derive(KnownLayout)] + struct KL01(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(4, 8)); + + // ...with `align(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(align(64))] + struct KL01Align(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ...with `packed`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(packed)] + struct KL01Packed(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 6)); + + // ...with `packed(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(packed(2))] + struct KL01PackedN(NotKnownLayout, NotKnownLayout); + + assert_impl_all!(KL01PackedN: KnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 6)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | N | Y | Y | KL03 | + #[allow(dead_code)] + #[derive(KnownLayout)] + struct KL03(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 1)); + + // ... with `align(N)` + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(align(64))] + struct KL03Align(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ... with `packed`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(packed)] + struct KL03Packed(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 5)); + + // ... with `packed(N)` + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(packed(2))] + struct KL03PackedN(NotKnownLayout, u8); + + assert_impl_all!(KL03PackedN: KnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 6)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | Y | N | Y | KL05 | + #[allow(dead_code)] + #[derive(KnownLayout)] + struct KL05(u8, T); + + fn _test_kl05(t: T) -> impl KnownLayout { + KL05(0u8, t) + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | Y | Y | Y | KL07 | + #[allow(dead_code)] + #[derive(KnownLayout)] + struct KL07(u8, T); + + fn _test_kl07(t: T) -> impl KnownLayout { + let _ = KL07(0u8, t); + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | N | Y | N | KL10 | + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C)] + struct KL10(NotKnownLayout, [u8]); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), None) + .extend(<[u8] as KnownLayout>::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(4, 1, 4, false)); + + // ...with `align(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, align(64))] + struct KL10Align(NotKnownLayout, [u8]); + + let repr_align = NonZeroUsize::new(64); + + let expected = DstLayout::new_zst(repr_align) + .extend(DstLayout::for_type::>(), None) + .extend(<[u8] as KnownLayout>::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(64, 1, 4, false)); + + // ...with `packed`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, packed)] + struct KL10Packed(NotKnownLayout, [u8]); + + let repr_packed = NonZeroUsize::new(1); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(1, 1, 4, false)); + + // ...with `packed(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, packed(2))] + struct KL10PackedN(NotKnownLayout, [u8]); + + let repr_packed = NonZeroUsize::new(2); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(2, 1, 4, false)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | N | Y | Y | KL11 | + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C)] + struct KL11(NotKnownLayout, u8); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), None) + .extend(::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(8, 16)); + + // ...with `align(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, align(64))] + struct KL11Align(NotKnownLayout, u8); + + let repr_align = NonZeroUsize::new(64); + + let expected = DstLayout::new_zst(repr_align) + .extend(DstLayout::for_type::>(), None) + .extend(::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ...with `packed`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, packed)] + struct KL11Packed(NotKnownLayout, u8); + + let repr_packed = NonZeroUsize::new(1); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 9)); + + // ...with `packed(N)`: + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C, packed(2))] + struct KL11PackedN(NotKnownLayout, u8); + + let repr_packed = NonZeroUsize::new(2); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 10)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | Y | Y | N | KL14 | + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C)] + struct KL14(u8, T); + + fn _test_kl14(kl: &KL14) { + _assert_kl(kl) + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | Y | Y | Y | KL15 | + #[allow(dead_code)] + #[derive(KnownLayout)] + #[repr(C)] + struct KL15(u8, T); + + fn _test_kl15(t: T) -> impl KnownLayout { + let _ = KL15(0u8, t); + } + + // Test a variety of combinations of field types: + // - () + // - u8 + // - AU16 + // - [()] + // - [u8] + // - [AU16] + + #[allow(clippy::upper_case_acronyms, dead_code)] + #[derive(KnownLayout)] + #[repr(C)] + struct KLTU(T, U); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 0)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false)); + + // Test a variety of field counts. + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF0; + + assert_eq!(::LAYOUT, sized_layout(1, 0)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF1([u8]); + + assert_eq!(::LAYOUT, unsized_layout(1, 1, 0, true)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF2(NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(1, 1, 1, false)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF3(NotKnownLayout, NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(2, 1, 4, false)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF4(NotKnownLayout, NotKnownLayout, NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(4, 1, 8, false)); + } + + #[test] + fn test_object_safety() { + fn _takes_immutable(_: &dyn Immutable) {} + fn _takes_unaligned(_: &dyn Unaligned) {} + } + + #[test] + fn test_from_zeros_only() { + // Test types that implement `FromZeros` but not `FromBytes`. + + assert!(!bool::new_zeroed()); + assert_eq!(char::new_zeroed(), '\0'); + + #[cfg(feature = "alloc")] + { + assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false))); + assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0'))); + + assert_eq!( + <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(), + [false, false, false] + ); + assert_eq!( + <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(), + ['\0', '\0', '\0'] + ); + + assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]); + assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']); + } + + let mut string = "hello".to_string(); + let s: &mut str = string.as_mut(); + assert_eq!(s, "hello"); + s.zero(); + assert_eq!(s, "\0\0\0\0\0"); + } + + #[test] + fn test_zst_count_preserved() { + // Test that, when an explicit count is provided to for a type with a + // ZST trailing slice element, that count is preserved. This is + // important since, for such types, all element counts result in objects + // of the same size, and so the correct behavior is ambiguous. However, + // preserving the count as requested by the user is the behavior that we + // document publicly. + + // FromZeros methods + #[cfg(feature = "alloc")] + assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3); + #[cfg(feature = "alloc")] + assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3); + + // FromBytes methods + assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3); + assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3); + assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3); + assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3); + assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3); + assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3); + } + + #[test] + fn test_read_write() { + const VAL: u64 = 0x12345678; + #[cfg(target_endian = "big")] + const VAL_BYTES: [u8; 8] = VAL.to_be_bytes(); + #[cfg(target_endian = "little")] + const VAL_BYTES: [u8; 8] = VAL.to_le_bytes(); + const ZEROS: [u8; 8] = [0u8; 8]; + + // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`. + + assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL)); + // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all + // zeros. + let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); + assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..]))); + assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0))); + // The first 8 bytes are all zeros and the second 8 bytes are from + // `VAL_BYTES` + let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); + assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..]))); + assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL))); + + // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`. + + let mut bytes = [0u8; 8]; + assert_eq!(VAL.write_to(&mut bytes[..]), Ok(())); + assert_eq!(bytes, VAL_BYTES); + let mut bytes = [0u8; 16]; + assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(())); + let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); + assert_eq!(bytes, want); + let mut bytes = [0u8; 16]; + assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(())); + let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); + assert_eq!(bytes, want); + } + + #[test] + #[cfg(feature = "std")] + fn test_read_io_with_padding_soundness() { + // This test is designed to exhibit potential UB in + // `FromBytes::read_from_io`. (see #2319, #2320). + + // On most platforms (where `align_of::() == 2`), `WithPadding` + // will have inter-field padding between `x` and `y`. + #[derive(FromBytes)] + #[repr(C)] + struct WithPadding { + x: u8, + y: u16, + } + struct ReadsInRead; + impl std::io::Read for ReadsInRead { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + // This body branches on every byte of `buf`, ensuring that it + // exhibits UB if any byte of `buf` is uninitialized. + if buf.iter().all(|&x| x == 0) { + Ok(buf.len()) + } else { + buf.iter_mut().for_each(|x| *x = 0); + Ok(buf.len()) + } + } + } + assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 }))); + } + + #[test] + #[cfg(feature = "std")] + fn test_read_write_io() { + let mut long_buffer = [0, 0, 0, 0]; + assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(()))); + assert_eq!(long_buffer, [255, 255, 0, 0]); + assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX))); + + let mut short_buffer = [0, 0]; + assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err()); + assert_eq!(short_buffer, [255, 255]); + assert!(u32::read_from_io(&short_buffer[..]).is_err()); + } + + #[test] + fn test_try_from_bytes_try_read_from() { + assert_eq!(::try_read_from_bytes(&[0]), Ok(false)); + assert_eq!(::try_read_from_bytes(&[1]), Ok(true)); + + assert_eq!(::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..]))); + assert_eq!(::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..]))); + + assert_eq!(::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false))); + assert_eq!(::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true))); + + // If we don't pass enough bytes, it fails. + assert!(matches!( + ::try_read_from_bytes(&[]), + Err(TryReadError::Size(_)) + )); + assert!(matches!( + ::try_read_from_prefix(&[]), + Err(TryReadError::Size(_)) + )); + assert!(matches!( + ::try_read_from_suffix(&[]), + Err(TryReadError::Size(_)) + )); + + // If we pass too many bytes, it fails. + assert!(matches!( + ::try_read_from_bytes(&[0, 0]), + Err(TryReadError::Size(_)) + )); + + // If we pass an invalid value, it fails. + assert!(matches!( + ::try_read_from_bytes(&[2]), + Err(TryReadError::Validity(_)) + )); + assert!(matches!( + ::try_read_from_prefix(&[2, 0]), + Err(TryReadError::Validity(_)) + )); + assert!(matches!( + ::try_read_from_suffix(&[0, 2]), + Err(TryReadError::Validity(_)) + )); + + // Reading from a misaligned buffer should still succeed. Since `AU64`'s + // alignment is 8, and since we read from two adjacent addresses one + // byte apart, it is guaranteed that at least one of them (though + // possibly both) will be misaligned. + let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0]; + assert_eq!(::try_read_from_bytes(&bytes[..8]), Ok(AU64(0))); + assert_eq!(::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0))); + + assert_eq!( + ::try_read_from_prefix(&bytes[..8]), + Ok((AU64(0), &[][..])) + ); + assert_eq!( + ::try_read_from_prefix(&bytes[1..9]), + Ok((AU64(0), &[][..])) + ); + + assert_eq!( + ::try_read_from_suffix(&bytes[..8]), + Ok((&[][..], AU64(0))) + ); + assert_eq!( + ::try_read_from_suffix(&bytes[1..9]), + Ok((&[][..], AU64(0))) + ); + } + + #[test] + fn test_ref_from_mut_from_bytes() { + // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}` + // success cases. Exhaustive coverage for these methods is covered by + // the `Ref` tests above, which these helper methods defer to. + + let mut buf = + Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + + assert_eq!( + AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(), + [8, 9, 10, 11, 12, 13, 14, 15] + ); + let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap(); + suffix.0 = 0x0101010101010101; + // The `[u8:9]` is a non-half size of the full buffer, which would catch + // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). + assert_eq!( + <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), + (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]) + ); + let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); + assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]); + suffix.0 = 0x0202020202020202; + let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap(); + assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]); + suffix[0] = 42; + assert_eq!( + <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), + (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..]) + ); + <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30; + assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); + } + + #[test] + fn test_ref_from_mut_from_bytes_error() { + // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}` + // error cases. + + // Fail because the buffer is too large. + let mut buf = Align::<[u8; 16], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(AU64::ref_from_bytes(&buf.t[..]).is_err()); + assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err()); + assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err()); + assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err()); + + // Fail because the buffer is too small. + let mut buf = Align::<[u8; 4], AU64>::default(); + assert!(AU64::ref_from_bytes(&buf.t[..]).is_err()); + assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err()); + assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err()); + assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err()); + assert!(AU64::ref_from_prefix(&buf.t[..]).is_err()); + assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err()); + assert!(AU64::ref_from_suffix(&buf.t[..]).is_err()); + assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err()); + assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err()); + assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err()); + assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err()); + assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err()); + + // Fail because the alignment is insufficient. + let mut buf = Align::<[u8; 13], AU64>::default(); + assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err()); + assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err()); + assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err()); + assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err()); + assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err()); + assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err()); + assert!(AU64::ref_from_suffix(&buf.t[..]).is_err()); + assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err()); + } + + #[test] + fn test_to_methods() { + /// Run a series of tests by calling `IntoBytes` methods on `t`. + /// + /// `bytes` is the expected byte sequence returned from `t.as_bytes()` + /// before `t` has been modified. `post_mutation` is the expected + /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]` + /// has had its bits flipped (by applying `^= 0xFF`). + /// + /// `N` is the size of `t` in bytes. + fn test( + t: &mut T, + bytes: &[u8], + post_mutation: &T, + ) { + // Test that we can access the underlying bytes, and that we get the + // right bytes and the right number of bytes. + assert_eq!(t.as_bytes(), bytes); + + // Test that changes to the underlying byte slices are reflected in + // the original object. + t.as_mut_bytes()[0] ^= 0xFF; + assert_eq!(t, post_mutation); + t.as_mut_bytes()[0] ^= 0xFF; + + // `write_to` rejects slices that are too small or too large. + assert!(t.write_to(&mut vec![0; N - 1][..]).is_err()); + assert!(t.write_to(&mut vec![0; N + 1][..]).is_err()); + + // `write_to` works as expected. + let mut bytes = [0; N]; + assert_eq!(t.write_to(&mut bytes[..]), Ok(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_prefix` rejects slices that are too small. + assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err()); + + // `write_to_prefix` works with exact-sized slices. + let mut bytes = [0; N]; + assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_prefix` works with too-large slices, and any bytes past + // the prefix aren't modified. + let mut too_many_bytes = vec![0; N + 1]; + too_many_bytes[N] = 123; + assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(())); + assert_eq!(&too_many_bytes[..N], t.as_bytes()); + assert_eq!(too_many_bytes[N], 123); + + // `write_to_suffix` rejects slices that are too small. + assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err()); + + // `write_to_suffix` works with exact-sized slices. + let mut bytes = [0; N]; + assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_suffix` works with too-large slices, and any bytes + // before the suffix aren't modified. + let mut too_many_bytes = vec![0; N + 1]; + too_many_bytes[0] = 123; + assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(())); + assert_eq!(&too_many_bytes[1..], t.as_bytes()); + assert_eq!(too_many_bytes[0], 123); + } + + #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)] + #[repr(C)] + struct Foo { + a: u32, + b: Wrapping, + c: Option, + } + + let expected_bytes: Vec = if cfg!(target_endian = "little") { + vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0] + } else { + vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0] + }; + let post_mutation_expected_a = + if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 }; + test::<_, 12>( + &mut Foo { a: 1, b: Wrapping(2), c: None }, + expected_bytes.as_bytes(), + &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None }, + ); + test::<_, 3>( + Unsized::from_mut_slice(&mut [1, 2, 3]), + &[1, 2, 3], + Unsized::from_mut_slice(&mut [0xFE, 2, 3]), + ); + } + + #[test] + fn test_array() { + #[derive(FromBytes, IntoBytes, Immutable)] + #[repr(C)] + struct Foo { + a: [u16; 33], + } + + let foo = Foo { a: [0xFFFF; 33] }; + let expected = [0xFFu8; 66]; + assert_eq!(foo.as_bytes(), &expected[..]); + } + + #[test] + fn test_new_zeroed() { + assert!(!bool::new_zeroed()); + assert_eq!(u64::new_zeroed(), 0); + // This test exists in order to exercise unsafe code, especially when + // running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(<()>::new_zeroed(), ()); + } + } + + #[test] + fn test_transparent_packed_generic_struct() { + #[derive(IntoBytes, FromBytes, Unaligned)] + #[repr(transparent)] + #[allow(dead_code)] // We never construct this type + struct Foo { + _t: T, + _phantom: PhantomData<()>, + } + + assert_impl_all!(Foo: FromZeros, FromBytes, IntoBytes); + assert_impl_all!(Foo: Unaligned); + + #[derive(IntoBytes, FromBytes, Unaligned)] + #[repr(C, packed)] + #[allow(dead_code)] // We never construct this type + struct Bar { + _t: T, + _u: U, + } + + assert_impl_all!(Bar: FromZeros, FromBytes, IntoBytes, Unaligned); + } + + #[cfg(feature = "alloc")] + mod alloc { + use super::*; + + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[test] + fn test_extend_vec_zeroed() { + // Test extending when there is an existing allocation. + let mut v = vec![100u16, 200, 300]; + FromZeros::extend_vec_zeroed(&mut v, 3).unwrap(); + assert_eq!(v.len(), 6); + assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]); + drop(v); + + // Test extending when there is no existing allocation. + let mut v: Vec = Vec::new(); + FromZeros::extend_vec_zeroed(&mut v, 3).unwrap(); + assert_eq!(v.len(), 3); + assert_eq!(&*v, &[0, 0, 0]); + drop(v); + } + + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[test] + fn test_extend_vec_zeroed_zst() { + // Test extending when there is an existing (fake) allocation. + let mut v = vec![(), (), ()]; + <()>::extend_vec_zeroed(&mut v, 3).unwrap(); + assert_eq!(v.len(), 6); + assert_eq!(&*v, &[(), (), (), (), (), ()]); + drop(v); + + // Test extending when there is no existing (fake) allocation. + let mut v: Vec<()> = Vec::new(); + <()>::extend_vec_zeroed(&mut v, 3).unwrap(); + assert_eq!(&*v, &[(), (), ()]); + drop(v); + } + + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[test] + fn test_insert_vec_zeroed() { + // Insert at start (no existing allocation). + let mut v: Vec = Vec::new(); + u64::insert_vec_zeroed(&mut v, 0, 2).unwrap(); + assert_eq!(v.len(), 2); + assert_eq!(&*v, &[0, 0]); + drop(v); + + // Insert at start. + let mut v = vec![100u64, 200, 300]; + u64::insert_vec_zeroed(&mut v, 0, 2).unwrap(); + assert_eq!(v.len(), 5); + assert_eq!(&*v, &[0, 0, 100, 200, 300]); + drop(v); + + // Insert at middle. + let mut v = vec![100u64, 200, 300]; + u64::insert_vec_zeroed(&mut v, 1, 1).unwrap(); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[100, 0, 200, 300]); + drop(v); + + // Insert at end. + let mut v = vec![100u64, 200, 300]; + u64::insert_vec_zeroed(&mut v, 3, 1).unwrap(); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[100, 200, 300, 0]); + drop(v); + } + + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + #[test] + fn test_insert_vec_zeroed_zst() { + // Insert at start (no existing fake allocation). + let mut v: Vec<()> = Vec::new(); + <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap(); + assert_eq!(v.len(), 2); + assert_eq!(&*v, &[(), ()]); + drop(v); + + // Insert at start. + let mut v = vec![(), (), ()]; + <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap(); + assert_eq!(v.len(), 5); + assert_eq!(&*v, &[(), (), (), (), ()]); + drop(v); + + // Insert at middle. + let mut v = vec![(), (), ()]; + <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap(); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[(), (), (), ()]); + drop(v); + + // Insert at end. + let mut v = vec![(), (), ()]; + <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap(); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[(), (), (), ()]); + drop(v); + } + + #[test] + fn test_new_box_zeroed() { + assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0))); + } + + #[test] + fn test_new_box_zeroed_array() { + drop(<[u32; 0x1000]>::new_box_zeroed()); + } + + #[test] + fn test_new_box_zeroed_zst() { + // This test exists in order to exercise unsafe code, especially + // when running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(()))); + } + } + + #[test] + fn test_new_box_zeroed_with_elems() { + let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap(); + assert_eq!(s.len(), 3); + assert_eq!(&*s, &[0, 0, 0]); + s[1] = 3; + assert_eq!(&*s, &[0, 3, 0]); + } + + #[test] + fn test_new_box_zeroed_with_elems_empty() { + let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap(); + assert_eq!(s.len(), 0); + } + + #[test] + fn test_new_box_zeroed_with_elems_zst() { + let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap(); + assert_eq!(s.len(), 3); + assert!(s.get(10).is_none()); + // This test exists in order to exercise unsafe code, especially + // when running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(s[1], ()); + } + s[2] = (); + } + + #[test] + fn test_new_box_zeroed_with_elems_zst_empty() { + let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap(); + assert_eq!(s.len(), 0); + } + + #[test] + fn new_box_zeroed_with_elems_errors() { + assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError)); + + let max = >::try_from(isize::MAX).unwrap(); + assert_eq!( + <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::()) + 1), + Err(AllocError) + ); + } + } + + #[test] + #[allow(deprecated)] + fn test_deprecated_from_bytes() { + let val = 0u32; + let bytes = val.as_bytes(); + + assert!(u32::ref_from(bytes).is_some()); + // mut_from needs mut bytes + let mut val = 0u32; + let mut_bytes = val.as_mut_bytes(); + assert!(u32::mut_from(mut_bytes).is_some()); + + assert!(u32::read_from(bytes).is_some()); + + let (slc, rest) = ::slice_from_prefix(bytes, 0).unwrap(); + assert!(slc.is_empty()); + assert_eq!(rest.len(), 4); + + let (rest, slc) = ::slice_from_suffix(bytes, 0).unwrap(); + assert!(slc.is_empty()); + assert_eq!(rest.len(), 4); + + let (slc, rest) = ::mut_slice_from_prefix(mut_bytes, 0).unwrap(); + assert!(slc.is_empty()); + assert_eq!(rest.len(), 4); + + let (rest, slc) = ::mut_slice_from_suffix(mut_bytes, 0).unwrap(); + assert!(slc.is_empty()); + assert_eq!(rest.len(), 4); + } + + #[test] + fn test_try_ref_from_prefix_suffix() { + use crate::util::testutil::Align; + let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..]; + let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap(); + assert_eq!(*r, 0); + assert_eq!(rest.len(), 0); + + let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap(); + assert_eq!(*r, 0); + assert_eq!(rest.len(), 0); + } + + #[test] + fn test_raw_dangling() { + use crate::util::AsAddress; + let ptr: NonNull = u32::raw_dangling(); + assert_eq!(AsAddress::addr(ptr), 1); + + let ptr: NonNull<[u32]> = <[u32]>::raw_dangling(); + assert_eq!(AsAddress::addr(ptr), 1); + } + + #[test] + fn test_try_ref_from_prefix_with_elems() { + use crate::util::testutil::Align; + let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..]; + let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap(); + assert_eq!(r.len(), 2); + assert_eq!(rest.len(), 0); + } + + #[test] + fn test_try_ref_from_suffix_with_elems() { + use crate::util::testutil::Align; + let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..]; + let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap(); + assert_eq!(r.len(), 2); + assert_eq!(rest.len(), 0); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/macros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd0677d4501583c4c56e558c70f9fecf7159d366 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/macros.rs @@ -0,0 +1,1772 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +/// Safely transmutes a value of one type to a value of another type of the same +/// size. +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// const fn transmute(src: Src) -> Dst +/// where +/// Src: IntoBytes, +/// Dst: FromBytes, +/// size_of::() == size_of::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// However, unlike a function, this macro can only be invoked when the types of +/// `Src` and `Dst` are completely concrete. The types `Src` and `Dst` are +/// inferred from the calling context; they cannot be explicitly specified in +/// the macro invocation. +/// +/// Note that the `Src` produced by the expression `$e` will *not* be dropped. +/// Semantically, its bits will be copied into a new value of type `Dst`, the +/// original `Src` will be forgotten, and the value of type `Dst` will be +/// returned. +/// +/// # `#![allow(shrink)]` +/// +/// If `#![allow(shrink)]` is provided, `transmute!` additionally supports +/// transmutations that shrink the size of the value; e.g.: +/// +/// ``` +/// # use zerocopy::transmute; +/// let u: u32 = transmute!(#![allow(shrink)] 0u64); +/// assert_eq!(u, 0u32); +/// ``` +/// +/// # Examples +/// +/// ``` +/// # use zerocopy::transmute; +/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional); +/// +/// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]); +/// ``` +/// +/// # Use in `const` contexts +/// +/// This macro can be invoked in `const` contexts. +#[macro_export] +macro_rules! transmute { + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two types + // have the same size. `core::mem::transmute` uses compiler magic to enforce + // this so long as the types are concrete. + (#![allow(shrink)] $e:expr) => {{ + let mut e = $e; + if false { + // This branch, though never taken, ensures that the type of `e` is + // `IntoBytes` and that the type of the outer macro invocation + // expression is `FromBytes`. + + fn transmute(src: Src) -> Dst + where + Src: $crate::IntoBytes, + Dst: $crate::FromBytes, + { + let _ = src; + loop {} + } + loop {} + #[allow(unreachable_code)] + transmute(e) + } else { + use $crate::util::macro_util::core_reexport::mem::ManuallyDrop; + + // NOTE: `repr(packed)` is important! It ensures that the size of + // `Transmute` won't be rounded up to accommodate `Src`'s or `Dst`'s + // alignment, which would break the size comparison logic below. + // + // As an example of why this is problematic, consider `Src = [u8; + // 5]`, `Dst = u32`. The total size of `Transmute` would + // be 8, and so we would reject a `[u8; 5]` to `u32` transmute as + // being size-increasing, which it isn't. + #[repr(C, packed)] + union Transmute { + src: ManuallyDrop, + dst: ManuallyDrop, + } + + // SAFETY: `Transmute` is a `repr(C)` union whose `src` field has + // type `ManuallyDrop`. Thus, the `src` field starts at byte + // offset 0 within `Transmute` [1]. `ManuallyDrop` has the same + // layout and bit validity as `T`, so it is sound to transmute `Src` + // to `Transmute`. + // + // [1] https://doc.rust-lang.org/1.85.0/reference/type-layout.html#reprc-unions + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html: + // + // `ManuallyDrop` is guaranteed to have the same layout and bit + // validity as `T` + let u: Transmute<_, _> = unsafe { + // Clippy: We can't annotate the types; this macro is designed + // to infer the types from the calling context. + #[allow(clippy::missing_transmute_annotations)] + $crate::util::macro_util::core_reexport::mem::transmute(e) + }; + + if false { + // SAFETY: This code is never executed. + e = ManuallyDrop::into_inner(unsafe { u.src }); + // Suppress the `unused_assignments` lint on the previous line. + let _ = e; + loop {} + } else { + // SAFETY: Per the safety comment on `let u` above, the `dst` + // field in `Transmute` starts at byte offset 0, and has the + // same layout and bit validity as `Dst`. + // + // Transmuting `Src` to `Transmute` above using + // `core::mem::transmute` ensures that `size_of::() == + // size_of::>()`. A `#[repr(C, packed)]` + // union has the maximum size of all of its fields [1], so this + // is equivalent to `size_of::() >= size_of::()`. + // + // The outer `if`'s `false` branch ensures that `Src: IntoBytes` + // and `Dst: FromBytes`. This, combined with the size bound, + // ensures that this transmute is sound. + // + // [1] Per https://doc.rust-lang.org/1.85.0/reference/type-layout.html#reprc-unions: + // + // The union will have a size of the maximum size of all of + // its fields rounded to its alignment + let dst = unsafe { u.dst }; + $crate::util::macro_util::must_use(ManuallyDrop::into_inner(dst)) + } + } + }}; + ($e:expr) => {{ + let e = $e; + if false { + // This branch, though never taken, ensures that the type of `e` is + // `IntoBytes` and that the type of the outer macro invocation + // expression is `FromBytes`. + + fn transmute(src: Src) -> Dst + where + Src: $crate::IntoBytes, + Dst: $crate::FromBytes, + { + let _ = src; + loop {} + } + loop {} + #[allow(unreachable_code)] + transmute(e) + } else { + // SAFETY: `core::mem::transmute` ensures that the type of `e` and + // the type of this macro invocation expression have the same size. + // We know this transmute is safe thanks to the `IntoBytes` and + // `FromBytes` bounds enforced by the `false` branch. + let u = unsafe { + // Clippy: We can't annotate the types; this macro is designed + // to infer the types from the calling context. + #[allow(clippy::missing_transmute_annotations, unnecessary_transmutes)] + $crate::util::macro_util::core_reexport::mem::transmute(e) + }; + $crate::util::macro_util::must_use(u) + } + }}; +} + +/// Safely transmutes a mutable or immutable reference of one type to an +/// immutable reference of another type of the same size and compatible +/// alignment. +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// fn transmute_ref<'src, 'dst, Src, Dst>(src: &'src Src) -> &'dst Dst +/// where +/// 'src: 'dst, +/// Src: IntoBytes + Immutable + ?Sized, +/// Dst: FromBytes + Immutable + ?Sized, +/// align_of::() >= align_of::(), +/// size_compatible::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The types `Src` and `Dst` are inferred from the calling context; they cannot +/// be explicitly specified in the macro invocation. +/// +/// # Size compatibility +/// +/// `transmute_ref!` supports transmuting between `Sized` types, between unsized +/// (i.e., `?Sized`) types, and from a `Sized` type to an unsized type. It +/// supports any transmutation that preserves the number of bytes of the +/// referent, even if doing so requires updating the metadata stored in an +/// unsized "fat" reference: +/// +/// ``` +/// # use zerocopy::transmute_ref; +/// # use core::mem::size_of_val; // Not in the prelude on our MSRV +/// let src: &[[u8; 2]] = &[[0, 1], [2, 3]][..]; +/// let dst: &[u8] = transmute_ref!(src); +/// +/// assert_eq!(src.len(), 2); +/// assert_eq!(dst.len(), 4); +/// assert_eq!(dst, [0, 1, 2, 3]); +/// assert_eq!(size_of_val(src), size_of_val(dst)); +/// ``` +/// +/// # Errors +/// +/// Violations of the alignment and size compatibility checks are detected +/// *after* the compiler performs monomorphization. This has two important +/// consequences. +/// +/// First, it means that generic code will *never* fail these conditions: +/// +/// ``` +/// # use zerocopy::{transmute_ref, FromBytes, IntoBytes, Immutable}; +/// fn transmute_ref(src: &Src) -> &Dst +/// where +/// Src: IntoBytes + Immutable, +/// Dst: FromBytes + Immutable, +/// { +/// transmute_ref!(src) +/// } +/// ``` +/// +/// Instead, failures will only be detected once generic code is instantiated +/// with concrete types: +/// +/// ```compile_fail,E0080 +/// # use zerocopy::{transmute_ref, FromBytes, IntoBytes, Immutable}; +/// # +/// # fn transmute_ref(src: &Src) -> &Dst +/// # where +/// # Src: IntoBytes + Immutable, +/// # Dst: FromBytes + Immutable, +/// # { +/// # transmute_ref!(src) +/// # } +/// let src: &u16 = &0; +/// let dst: &u8 = transmute_ref(src); +/// ``` +/// +/// Second, the fact that violations are detected after monomorphization means +/// that `cargo check` will usually not detect errors, even when types are +/// concrete. Instead, `cargo build` must be used to detect such errors. +/// +/// # Examples +/// +/// Transmuting between `Sized` types: +/// +/// ``` +/// # use zerocopy::transmute_ref; +/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional); +/// +/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); +/// ``` +/// +/// Transmuting between unsized types: +/// +/// ``` +/// # use {zerocopy::*, zerocopy_derive::*}; +/// # type u16 = zerocopy::byteorder::native_endian::U16; +/// # type u32 = zerocopy::byteorder::native_endian::U32; +/// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] +/// #[repr(C)] +/// struct SliceDst { +/// t: T, +/// u: [U], +/// } +/// +/// type Src = SliceDst; +/// type Dst = SliceDst; +/// +/// let src = Src::ref_from_bytes(&[0, 1, 2, 3, 4, 5, 6, 7]).unwrap(); +/// let dst: &Dst = transmute_ref!(src); +/// +/// assert_eq!(src.t.as_bytes(), [0, 1, 2, 3]); +/// assert_eq!(src.u.len(), 2); +/// assert_eq!(src.u.as_bytes(), [4, 5, 6, 7]); +/// +/// assert_eq!(dst.t.as_bytes(), [0, 1]); +/// assert_eq!(dst.u, [2, 3, 4, 5, 6, 7]); +/// ``` +/// +/// # Use in `const` contexts +/// +/// This macro can be invoked in `const` contexts only when `Src: Sized` and +/// `Dst: Sized`. +#[macro_export] +macro_rules! transmute_ref { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two + // types have the same size or alignment. + + // Ensure that the source type is a reference or a mutable reference + // (note that mutable references are implicitly reborrowed here). + let e: &_ = $e; + + #[allow(unused, clippy::diverging_sub_expression)] + if false { + // This branch, though never taken, ensures that the type of `e` is + // `&T` where `T: IntoBytes + Immutable`, and that the type of this + // macro expression is `&U` where `U: FromBytes + Immutable`. + + struct AssertSrcIsIntoBytes<'a, T: ?::core::marker::Sized + $crate::IntoBytes>(&'a T); + struct AssertSrcIsImmutable<'a, T: ?::core::marker::Sized + $crate::Immutable>(&'a T); + struct AssertDstIsFromBytes<'a, U: ?::core::marker::Sized + $crate::FromBytes>(&'a U); + struct AssertDstIsImmutable<'a, T: ?::core::marker::Sized + $crate::Immutable>(&'a T); + + let _ = AssertSrcIsIntoBytes(e); + let _ = AssertSrcIsImmutable(e); + + if true { + #[allow(unused, unreachable_code)] + let u = AssertDstIsFromBytes(loop {}); + u.0 + } else { + #[allow(unused, unreachable_code)] + let u = AssertDstIsImmutable(loop {}); + u.0 + } + } else { + use $crate::util::macro_util::TransmuteRefDst; + let t = $crate::util::macro_util::Wrap::new(e); + + if false { + // This branch exists solely to force the compiler to infer the + // type of `Dst` *before* it attempts to resolve the method call + // to `transmute_ref` in the `else` branch. + // + // Without this, if `Src` is `Sized` but `Dst` is `!Sized`, the + // compiler will eagerly select the inherent impl of + // `transmute_ref` (which requires `Dst: Sized`) because inherent + // methods take priority over trait methods. It does this before + // it realizes `Dst` is `!Sized`, leading to a compile error when + // it checks the bounds later. + // + // By calling this helper (which returns `&Dst`), we force `Dst` + // to be fully resolved. By the time it gets to the `else` + // branch, the compiler knows `Dst` is `!Sized`, properly + // disqualifies the inherent method, and falls back to the trait + // implementation. + t.transmute_ref_inference_helper() + } else { + // SAFETY: The outer `if false` branch ensures that: + // - `Src: IntoBytes + Immutable` + // - `Dst: FromBytes + Immutable` + unsafe { + t.transmute_ref() + } + } + } + }} +} + +/// Safely transmutes a mutable reference of one type to a mutable reference of +/// another type of the same size and compatible alignment. +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// const fn transmute_mut<'src, 'dst, Src, Dst>(src: &'src mut Src) -> &'dst mut Dst +/// where +/// 'src: 'dst, +/// Src: FromBytes + IntoBytes + ?Sized, +/// Dst: FromBytes + IntoBytes + ?Sized, +/// align_of::() >= align_of::(), +/// size_compatible::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The types `Src` and `Dst` are inferred from the calling context; they cannot +/// be explicitly specified in the macro invocation. +/// +/// # Size compatibility +/// +/// `transmute_mut!` supports transmuting between `Sized` types, between unsized +/// (i.e., `?Sized`) types, and from a `Sized` type to an unsized type. It +/// supports any transmutation that preserves the number of bytes of the +/// referent, even if doing so requires updating the metadata stored in an +/// unsized "fat" reference: +/// +/// ``` +/// # use zerocopy::transmute_mut; +/// # use core::mem::size_of_val; // Not in the prelude on our MSRV +/// let src: &mut [[u8; 2]] = &mut [[0, 1], [2, 3]][..]; +/// let dst: &mut [u8] = transmute_mut!(src); +/// +/// assert_eq!(dst.len(), 4); +/// assert_eq!(dst, [0, 1, 2, 3]); +/// let dst_size = size_of_val(dst); +/// assert_eq!(src.len(), 2); +/// assert_eq!(size_of_val(src), dst_size); +/// ``` +/// +/// # Errors +/// +/// Violations of the alignment and size compatibility checks are detected +/// *after* the compiler performs monomorphization. This has two important +/// consequences. +/// +/// First, it means that generic code will *never* fail these conditions: +/// +/// ``` +/// # use zerocopy::{transmute_mut, FromBytes, IntoBytes, Immutable}; +/// fn transmute_mut(src: &mut Src) -> &mut Dst +/// where +/// Src: FromBytes + IntoBytes, +/// Dst: FromBytes + IntoBytes, +/// { +/// transmute_mut!(src) +/// } +/// ``` +/// +/// Instead, failures will only be detected once generic code is instantiated +/// with concrete types: +/// +/// ```compile_fail,E0080 +/// # use zerocopy::{transmute_mut, FromBytes, IntoBytes, Immutable}; +/// # +/// # fn transmute_mut(src: &mut Src) -> &mut Dst +/// # where +/// # Src: FromBytes + IntoBytes, +/// # Dst: FromBytes + IntoBytes, +/// # { +/// # transmute_mut!(src) +/// # } +/// let src: &mut u16 = &mut 0; +/// let dst: &mut u8 = transmute_mut(src); +/// ``` +/// +/// Second, the fact that violations are detected after monomorphization means +/// that `cargo check` will usually not detect errors, even when types are +/// concrete. Instead, `cargo build` must be used to detect such errors. +/// +/// +/// # Examples +/// +/// Transmuting between `Sized` types: +/// +/// ``` +/// # use zerocopy::transmute_mut; +/// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional); +/// +/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); +/// +/// two_dimensional.reverse(); +/// +/// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]); +/// ``` +/// +/// Transmuting between unsized types: +/// +/// ``` +/// # use {zerocopy::*, zerocopy_derive::*}; +/// # type u16 = zerocopy::byteorder::native_endian::U16; +/// # type u32 = zerocopy::byteorder::native_endian::U32; +/// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] +/// #[repr(C)] +/// struct SliceDst { +/// t: T, +/// u: [U], +/// } +/// +/// type Src = SliceDst; +/// type Dst = SliceDst; +/// +/// let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7]; +/// let src = Src::mut_from_bytes(&mut bytes[..]).unwrap(); +/// let dst: &mut Dst = transmute_mut!(src); +/// +/// assert_eq!(dst.t.as_bytes(), [0, 1]); +/// assert_eq!(dst.u, [2, 3, 4, 5, 6, 7]); +/// +/// assert_eq!(src.t.as_bytes(), [0, 1, 2, 3]); +/// assert_eq!(src.u.len(), 2); +/// assert_eq!(src.u.as_bytes(), [4, 5, 6, 7]); +/// +/// ``` +#[macro_export] +macro_rules! transmute_mut { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because, for backwards-compatibility on v0.8.x, we use the autoref + // specialization trick to dispatch to different `transmute_mut` + // implementations: one which doesn't require `Src: KnownLayout + Dst: + // KnownLayout` when `Src: Sized + Dst: Sized`, and one which requires + // `KnownLayout` bounds otherwise. + + // Ensure that the source type is a mutable reference. + let e: &mut _ = $e; + + #[allow(unused)] + use $crate::util::macro_util::TransmuteMutDst as _; + let t = $crate::util::macro_util::Wrap::new(e); + if false { + // This branch exists solely to force the compiler to infer the type + // of `Dst` *before* it attempts to resolve the method call to + // `transmute_mut` in the `else` branch. + // + // Without this, if `Src` is `Sized` but `Dst` is `!Sized`, the + // compiler will eagerly select the inherent impl of `transmute_mut` + // (which requires `Dst: Sized`) because inherent methods take + // priority over trait methods. It does this before it realizes + // `Dst` is `!Sized`, leading to a compile error when it checks the + // bounds later. + // + // By calling this helper (which returns `&mut Dst`), we force `Dst` + // to be fully resolved. By the time it gets to the `else` branch, + // the compiler knows `Dst` is `!Sized`, properly disqualifies the + // inherent method, and falls back to the trait implementation. + t.transmute_mut_inference_helper() + } else { + t.transmute_mut() + } + }} +} + +/// Conditionally transmutes a value of one type to a value of another type of +/// the same size. +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// fn try_transmute(src: Src) -> Result> +/// where +/// Src: IntoBytes, +/// Dst: TryFromBytes, +/// size_of::() == size_of::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// However, unlike a function, this macro can only be invoked when the types of +/// `Src` and `Dst` are completely concrete. The types `Src` and `Dst` are +/// inferred from the calling context; they cannot be explicitly specified in +/// the macro invocation. +/// +/// Note that the `Src` produced by the expression `$e` will *not* be dropped. +/// Semantically, its bits will be copied into a new value of type `Dst`, the +/// original `Src` will be forgotten, and the value of type `Dst` will be +/// returned. +/// +/// # Examples +/// +/// ``` +/// # use zerocopy::*; +/// // 0u8 → bool = false +/// assert_eq!(try_transmute!(0u8), Ok(false)); +/// +/// // 1u8 → bool = true +/// assert_eq!(try_transmute!(1u8), Ok(true)); +/// +/// // 2u8 → bool = error +/// assert!(matches!( +/// try_transmute!(2u8), +/// Result::::Err(ValidityError { .. }) +/// )); +/// ``` +#[macro_export] +macro_rules! try_transmute { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two + // types have the same size. `core::mem::transmute` uses compiler magic + // to enforce this so long as the types are concrete. + + let e = $e; + if false { + // Check that the sizes of the source and destination types are + // equal. + + // SAFETY: This code is never executed. + Ok(unsafe { + // Clippy: We can't annotate the types; this macro is designed + // to infer the types from the calling context. + #[allow(clippy::missing_transmute_annotations)] + $crate::util::macro_util::core_reexport::mem::transmute(e) + }) + } else { + $crate::util::macro_util::try_transmute::<_, _>(e) + } + }} +} + +/// Conditionally transmutes a mutable or immutable reference of one type to an +/// immutable reference of another type of the same size and compatible +/// alignment. +/// +/// *Note that while the **value** of the referent is checked for validity at +/// runtime, the **size** and **alignment** are checked at compile time. For +/// conversions which are fallible with respect to size and alignment, see the +/// methods on [`TryFromBytes`].* +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// fn try_transmute_ref(src: &Src) -> Result<&Dst, ValidityError<&Src, Dst>> +/// where +/// Src: IntoBytes + Immutable + ?Sized, +/// Dst: TryFromBytes + Immutable + ?Sized, +/// align_of::() >= align_of::(), +/// size_compatible::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The types `Src` and `Dst` are inferred from the calling context; they cannot +/// be explicitly specified in the macro invocation. +/// +/// [`TryFromBytes`]: crate::TryFromBytes +/// +/// # Size compatibility +/// +/// `try_transmute_ref!` supports transmuting between `Sized` types, between +/// unsized (i.e., `?Sized`) types, and from a `Sized` type to an unsized type. +/// It supports any transmutation that preserves the number of bytes of the +/// referent, even if doing so requires updating the metadata stored in an +/// unsized "fat" reference: +/// +/// ``` +/// # use zerocopy::try_transmute_ref; +/// # use core::mem::size_of_val; // Not in the prelude on our MSRV +/// let src: &[[u8; 2]] = &[[0, 1], [2, 3]][..]; +/// let dst: &[u8] = try_transmute_ref!(src).unwrap(); +/// +/// assert_eq!(src.len(), 2); +/// assert_eq!(dst.len(), 4); +/// assert_eq!(dst, [0, 1, 2, 3]); +/// assert_eq!(size_of_val(src), size_of_val(dst)); +/// ``` +/// +/// # Examples +/// +/// Transmuting between `Sized` types: +/// +/// ``` +/// # use zerocopy::*; +/// // 0u8 → bool = false +/// assert_eq!(try_transmute_ref!(&0u8), Ok(&false)); +/// +/// // 1u8 → bool = true +/// assert_eq!(try_transmute_ref!(&1u8), Ok(&true)); +/// +/// // 2u8 → bool = error +/// assert!(matches!( +/// try_transmute_ref!(&2u8), +/// Result::<&bool, _>::Err(ValidityError { .. }) +/// )); +/// ``` +/// +/// Transmuting between unsized types: +/// +/// ``` +/// # use {zerocopy::*, zerocopy_derive::*}; +/// # type u16 = zerocopy::byteorder::native_endian::U16; +/// # type u32 = zerocopy::byteorder::native_endian::U32; +/// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] +/// #[repr(C)] +/// struct SliceDst { +/// t: T, +/// u: [U], +/// } +/// +/// type Src = SliceDst; +/// type Dst = SliceDst; +/// +/// let src = Src::ref_from_bytes(&[0, 1, 0, 1, 0, 1, 0, 1]).unwrap(); +/// let dst: &Dst = try_transmute_ref!(src).unwrap(); +/// +/// assert_eq!(src.t.as_bytes(), [0, 1, 0, 1]); +/// assert_eq!(src.u.len(), 2); +/// assert_eq!(src.u.as_bytes(), [0, 1, 0, 1]); +/// +/// assert_eq!(dst.t.as_bytes(), [0, 1]); +/// assert_eq!(dst.u, [false, true, false, true, false, true]); +/// ``` +#[macro_export] +macro_rules! try_transmute_ref { + ($e:expr) => {{ + // Ensure that the source type is a reference or a mutable reference + // (note that mutable references are implicitly reborrowed here). + let e: &_ = $e; + + #[allow(unused_imports)] + use $crate::util::macro_util::TryTransmuteRefDst as _; + let t = $crate::util::macro_util::Wrap::new(e); + if false { + // This branch exists solely to force the compiler to infer the type + // of `Dst` *before* it attempts to resolve the method call to + // `try_transmute_ref` in the `else` branch. + // + // Without this, if `Src` is `Sized` but `Dst` is `!Sized`, the + // compiler will eagerly select the inherent impl of + // `try_transmute_ref` (which requires `Dst: Sized`) because + // inherent methods take priority over trait methods. It does this + // before it realizes `Dst` is `!Sized`, leading to a compile error + // when it checks the bounds later. + // + // By calling this helper (which returns `&Dst`), we force `Dst` + // to be fully resolved. By the time it gets to the `else` + // branch, the compiler knows `Dst` is `!Sized`, properly + // disqualifies the inherent method, and falls back to the trait + // implementation. + Ok(t.transmute_ref_inference_helper()) + } else { + t.try_transmute_ref() + } + }} +} + +/// Conditionally transmutes a mutable reference of one type to a mutable +/// reference of another type of the same size and compatible alignment. +/// +/// *Note that while the **value** of the referent is checked for validity at +/// runtime, the **size** and **alignment** are checked at compile time. For +/// conversions which are fallible with respect to size and alignment, see the +/// methods on [`TryFromBytes`].* +/// +/// This macro behaves like an invocation of this function: +/// +/// ```ignore +/// fn try_transmute_mut(src: &mut Src) -> Result<&mut Dst, ValidityError<&mut Src, Dst>> +/// where +/// Src: FromBytes + IntoBytes + ?Sized, +/// Dst: TryFromBytes + IntoBytes + ?Sized, +/// align_of::() >= align_of::(), +/// size_compatible::(), +/// { +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// The types `Src` and `Dst` are inferred from the calling context; they cannot +/// be explicitly specified in the macro invocation. +/// +/// [`TryFromBytes`]: crate::TryFromBytes +/// +/// # Size compatibility +/// +/// `try_transmute_mut!` supports transmuting between `Sized` types, between +/// unsized (i.e., `?Sized`) types, and from a `Sized` type to an unsized type. +/// It supports any transmutation that preserves the number of bytes of the +/// referent, even if doing so requires updating the metadata stored in an +/// unsized "fat" reference: +/// +/// ``` +/// # use zerocopy::try_transmute_mut; +/// # use core::mem::size_of_val; // Not in the prelude on our MSRV +/// let src: &mut [[u8; 2]] = &mut [[0, 1], [2, 3]][..]; +/// let dst: &mut [u8] = try_transmute_mut!(src).unwrap(); +/// +/// assert_eq!(dst.len(), 4); +/// assert_eq!(dst, [0, 1, 2, 3]); +/// let dst_size = size_of_val(dst); +/// assert_eq!(src.len(), 2); +/// assert_eq!(size_of_val(src), dst_size); +/// ``` +/// +/// # Examples +/// +/// Transmuting between `Sized` types: +/// +/// ``` +/// # use zerocopy::*; +/// // 0u8 → bool = false +/// let src = &mut 0u8; +/// assert_eq!(try_transmute_mut!(src), Ok(&mut false)); +/// +/// // 1u8 → bool = true +/// let src = &mut 1u8; +/// assert_eq!(try_transmute_mut!(src), Ok(&mut true)); +/// +/// // 2u8 → bool = error +/// let src = &mut 2u8; +/// assert!(matches!( +/// try_transmute_mut!(src), +/// Result::<&mut bool, _>::Err(ValidityError { .. }) +/// )); +/// ``` +/// +/// Transmuting between unsized types: +/// +/// ``` +/// # use {zerocopy::*, zerocopy_derive::*}; +/// # type u16 = zerocopy::byteorder::native_endian::U16; +/// # type u32 = zerocopy::byteorder::native_endian::U32; +/// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] +/// #[repr(C)] +/// struct SliceDst { +/// t: T, +/// u: [U], +/// } +/// +/// type Src = SliceDst; +/// type Dst = SliceDst; +/// +/// let mut bytes = [0, 1, 0, 1, 0, 1, 0, 1]; +/// let src = Src::mut_from_bytes(&mut bytes).unwrap(); +/// +/// assert_eq!(src.t.as_bytes(), [0, 1, 0, 1]); +/// assert_eq!(src.u.len(), 2); +/// assert_eq!(src.u.as_bytes(), [0, 1, 0, 1]); +/// +/// let dst: &Dst = try_transmute_mut!(src).unwrap(); +/// +/// assert_eq!(dst.t.as_bytes(), [0, 1]); +/// assert_eq!(dst.u, [false, true, false, true, false, true]); +/// ``` +#[macro_export] +macro_rules! try_transmute_mut { + ($e:expr) => {{ + // Ensure that the source type is a mutable reference. + let e: &mut _ = $e; + + #[allow(unused_imports)] + use $crate::util::macro_util::TryTransmuteMutDst as _; + let t = $crate::util::macro_util::Wrap::new(e); + if false { + // This branch exists solely to force the compiler to infer the type + // of `Dst` *before* it attempts to resolve the method call to + // `try_transmute_mut` in the `else` branch. + // + // Without this, if `Src` is `Sized` but `Dst` is `!Sized`, the + // compiler will eagerly select the inherent impl of + // `try_transmute_mut` (which requires `Dst: Sized`) because + // inherent methods take priority over trait methods. It does this + // before it realizes `Dst` is `!Sized`, leading to a compile error + // when it checks the bounds later. + // + // By calling this helper (which returns `&Dst`), we force `Dst` + // to be fully resolved. By the time it gets to the `else` + // branch, the compiler knows `Dst` is `!Sized`, properly + // disqualifies the inherent method, and falls back to the trait + // implementation. + Ok(t.transmute_mut_inference_helper()) + } else { + t.try_transmute_mut() + } + }} +} + +/// Includes a file and safely transmutes it to a value of an arbitrary type. +/// +/// The file will be included as a byte array, `[u8; N]`, which will be +/// transmuted to another type, `T`. `T` is inferred from the calling context, +/// and must implement [`FromBytes`]. +/// +/// The file is located relative to the current file (similarly to how modules +/// are found). The provided path is interpreted in a platform-specific way at +/// compile time. So, for instance, an invocation with a Windows path containing +/// backslashes `\` would not compile correctly on Unix. +/// +/// `include_value!` is ignorant of byte order. For byte order-aware types, see +/// the [`byteorder`] module. +/// +/// [`FromBytes`]: crate::FromBytes +/// [`byteorder`]: crate::byteorder +/// +/// # Examples +/// +/// Assume there are two files in the same directory with the following +/// contents: +/// +/// File `data` (no trailing newline): +/// +/// ```text +/// abcd +/// ``` +/// +/// File `main.rs`: +/// +/// ```rust +/// use zerocopy::include_value; +/// # macro_rules! include_value { +/// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) }; +/// # } +/// +/// fn main() { +/// let as_u32: u32 = include_value!("data"); +/// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); +/// let as_i32: i32 = include_value!("data"); +/// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); +/// } +/// ``` +/// +/// # Use in `const` contexts +/// +/// This macro can be invoked in `const` contexts. +#[doc(alias("include_bytes", "include_data", "include_type"))] +#[macro_export] +macro_rules! include_value { + ($file:expr $(,)?) => { + $crate::transmute!(*::core::include_bytes!($file)) + }; +} + +#[doc(hidden)] +#[macro_export] +macro_rules! cryptocorrosion_derive_traits { + ( + #[repr($repr:ident)] + $(#[$attr:meta])* + $vis:vis struct $name:ident $(<$($tyvar:ident),*>)? + $( + ( + $($tuple_field_vis:vis $tuple_field_ty:ty),* + ); + )? + + $( + { + $($field_vis:vis $field_name:ident: $field_ty:ty,)* + } + )? + ) => { + $crate::cryptocorrosion_derive_traits!(@assert_allowed_struct_repr #[repr($repr)]); + + $(#[$attr])* + #[repr($repr)] + $vis struct $name $(<$($tyvar),*>)? + $( + ( + $($tuple_field_vis $tuple_field_ty),* + ); + )? + + $( + { + $($field_vis $field_name: $field_ty,)* + } + )? + + // SAFETY: See inline. + unsafe impl $(<$($tyvar),*>)? $crate::TryFromBytes for $name$(<$($tyvar),*>)? + where + $( + $($tuple_field_ty: $crate::FromBytes,)* + )? + + $( + $($field_ty: $crate::FromBytes,)* + )? + { + #[inline(always)] + fn is_bit_valid(_: $crate::Maybe<'_, Self, A>) -> bool + where + A: $crate::invariant::Alignment, + { + // SAFETY: This macro only accepts `#[repr(C)]` and + // `#[repr(transparent)]` structs, and this `impl` block + // requires all field types to be `FromBytes`. Thus, all + // initialized byte sequences constitutes valid instances of + // `Self`. + true + } + + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` and + // `#[repr(transparent)]` structs, and this `impl` block requires all + // field types to be `FromBytes`, which is a sub-trait of `FromZeros`. + unsafe impl $(<$($tyvar),*>)? $crate::FromZeros for $name$(<$($tyvar),*>)? + where + $( + $($tuple_field_ty: $crate::FromBytes,)* + )? + + $( + $($field_ty: $crate::FromBytes,)* + )? + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` and + // `#[repr(transparent)]` structs, and this `impl` block requires all + // field types to be `FromBytes`. + unsafe impl $(<$($tyvar),*>)? $crate::FromBytes for $name$(<$($tyvar),*>)? + where + $( + $($tuple_field_ty: $crate::FromBytes,)* + )? + + $( + $($field_ty: $crate::FromBytes,)* + )? + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` and + // `#[repr(transparent)]` structs, this `impl` block requires all field + // types to be `IntoBytes`, and a padding check is used to ensures that + // there are no padding bytes. + unsafe impl $(<$($tyvar),*>)? $crate::IntoBytes for $name$(<$($tyvar),*>)? + where + $( + $($tuple_field_ty: $crate::IntoBytes,)* + )? + + $( + $($field_ty: $crate::IntoBytes,)* + )? + + (): $crate::util::macro_util::PaddingFree< + Self, + { + $crate::cryptocorrosion_derive_traits!( + @struct_padding_check #[repr($repr)] + $(($($tuple_field_ty),*))? + $({$($field_ty),*})? + ) + }, + >, + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` and + // `#[repr(transparent)]` structs, and this `impl` block requires all + // field types to be `Immutable`. + unsafe impl $(<$($tyvar),*>)? $crate::Immutable for $name$(<$($tyvar),*>)? + where + $( + $($tuple_field_ty: $crate::Immutable,)* + )? + + $( + $($field_ty: $crate::Immutable,)* + )? + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + }; + (@assert_allowed_struct_repr #[repr(transparent)]) => {}; + (@assert_allowed_struct_repr #[repr(C)]) => {}; + (@assert_allowed_struct_repr #[$_attr:meta]) => { + compile_error!("repr must be `#[repr(transparent)]` or `#[repr(C)]`"); + }; + ( + @struct_padding_check #[repr(transparent)] + $(($($tuple_field_ty:ty),*))? + $({$($field_ty:ty),*})? + ) => { + // SAFETY: `#[repr(transparent)]` structs cannot have the same layout as + // their single non-zero-sized field, and so cannot have any padding + // outside of that field. + 0 + }; + ( + @struct_padding_check #[repr(C)] + $(($($tuple_field_ty:ty),*))? + $({$($field_ty:ty),*})? + ) => { + $crate::struct_padding!( + Self, + [ + $($($tuple_field_ty),*)? + $($($field_ty),*)? + ] + ) + }; + ( + #[repr(C)] + $(#[$attr:meta])* + $vis:vis union $name:ident { + $( + $field_name:ident: $field_ty:ty, + )* + } + ) => { + $(#[$attr])* + #[repr(C)] + $vis union $name { + $( + $field_name: $field_ty, + )* + } + + // SAFETY: See inline. + unsafe impl $crate::TryFromBytes for $name + where + $( + $field_ty: $crate::FromBytes, + )* + { + #[inline(always)] + fn is_bit_valid(_: $crate::Maybe<'_, Self, A>) -> bool + where + A: $crate::invariant::Alignment, + { + // SAFETY: This macro only accepts `#[repr(C)]` unions, and this + // `impl` block requires all field types to be `FromBytes`. + // Thus, all initialized byte sequences constitutes valid + // instances of `Self`. + true + } + + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` unions, and this `impl` + // block requires all field types to be `FromBytes`, which is a + // sub-trait of `FromZeros`. + unsafe impl $crate::FromZeros for $name + where + $( + $field_ty: $crate::FromBytes, + )* + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` unions, and this `impl` + // block requires all field types to be `FromBytes`. + unsafe impl $crate::FromBytes for $name + where + $( + $field_ty: $crate::FromBytes, + )* + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` unions, this `impl` + // block requires all field types to be `IntoBytes`, and a padding check + // is used to ensures that there are no padding bytes before or after + // any field. + unsafe impl $crate::IntoBytes for $name + where + $( + $field_ty: $crate::IntoBytes, + )* + (): $crate::util::macro_util::PaddingFree< + Self, + { + $crate::union_padding!( + Self, + [$($field_ty),*] + ) + }, + >, + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + + // SAFETY: This macro only accepts `#[repr(C)]` unions, and this `impl` + // block requires all field types to be `Immutable`. + unsafe impl $crate::Immutable for $name + where + $( + $field_ty: $crate::Immutable, + )* + { + fn only_derive_is_allowed_to_implement_this_trait() {} + } + }; +} + +#[cfg(test)] +mod tests { + use crate::{ + byteorder::native_endian::{U16, U32}, + util::testutil::*, + *, + }; + + #[derive(KnownLayout, Immutable, FromBytes, IntoBytes, PartialEq, Debug)] + #[repr(C)] + struct SliceDst { + a: T, + b: [U], + } + + #[test] + fn test_transmute() { + // Test that memory is transmuted as expected. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: [[u8; 2]; 4] = transmute!(array_of_u8s); + assert_eq!(x, array_of_arrays); + let x: [u8; 8] = transmute!(array_of_arrays); + assert_eq!(x, array_of_u8s); + + // Test that memory is transmuted as expected when shrinking. + let x: [[u8; 2]; 3] = transmute!(#![allow(shrink)] array_of_u8s); + assert_eq!(x, [[0u8, 1], [2, 3], [4, 5]]); + + // Test that the source expression's value is forgotten rather than + // dropped. + #[derive(IntoBytes)] + #[repr(transparent)] + struct PanicOnDrop(()); + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!("PanicOnDrop::drop"); + } + } + #[allow(clippy::let_unit_value)] + let _: () = transmute!(PanicOnDrop(())); + #[allow(clippy::let_unit_value)] + let _: () = transmute!(#![allow(shrink)] PanicOnDrop(())); + + // Test that `transmute!` is legal in a const context. + const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; + const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; + const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S); + assert_eq!(X, ARRAY_OF_ARRAYS); + const X_SHRINK: [[u8; 2]; 3] = transmute!(#![allow(shrink)] ARRAY_OF_U8S); + assert_eq!(X_SHRINK, [[0u8, 1], [2, 3], [4, 5]]); + + // Test that `transmute!` works with `!Immutable` types. + let x: usize = transmute!(UnsafeCell::new(1usize)); + assert_eq!(x, 1); + let x: UnsafeCell = transmute!(1usize); + assert_eq!(x.into_inner(), 1); + let x: UnsafeCell = transmute!(UnsafeCell::new(1usize)); + assert_eq!(x.into_inner(), 1); + } + + // A `Sized` type which doesn't implement `KnownLayout` (it is "not + // `KnownLayout`", or `Nkl`). + // + // This permits us to test that `transmute_ref!` and `transmute_mut!` work + // for types which are `Sized + !KnownLayout`. When we added support for + // slice DSTs in #1924, this new support relied on `KnownLayout`, but we + // need to make sure to remain backwards-compatible with code which uses + // these macros with types which are `!KnownLayout`. + #[derive(FromBytes, IntoBytes, Immutable, PartialEq, Eq, Debug)] + #[repr(transparent)] + struct Nkl(T); + + #[test] + fn test_transmute_ref() { + // Test that memory is transmuted as expected. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s); + assert_eq!(*x, array_of_arrays); + let x: &[u8; 8] = transmute_ref!(&array_of_arrays); + assert_eq!(*x, array_of_u8s); + + // Test that `transmute_ref!` is legal in a const context. + const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; + const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; + #[allow(clippy::redundant_static_lifetimes)] + const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S); + assert_eq!(*X, ARRAY_OF_ARRAYS); + + // Test sized -> unsized transmutation. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let slice_of_arrays = &array_of_arrays[..]; + let x: &[[u8; 2]] = transmute_ref!(&array_of_u8s); + assert_eq!(x, slice_of_arrays); + + // Before 1.61.0, we can't define the `const fn transmute_ref` function + // that we do on and after 1.61.0. + #[cfg(no_zerocopy_generic_bounds_in_const_fn_1_61_0)] + { + // Test that `transmute_ref!` supports non-`KnownLayout` `Sized` + // types. + const ARRAY_OF_NKL_U8S: Nkl<[u8; 8]> = Nkl([0u8, 1, 2, 3, 4, 5, 6, 7]); + const ARRAY_OF_NKL_ARRAYS: Nkl<[[u8; 2]; 4]> = Nkl([[0, 1], [2, 3], [4, 5], [6, 7]]); + const X_NKL: &Nkl<[[u8; 2]; 4]> = transmute_ref!(&ARRAY_OF_NKL_U8S); + assert_eq!(*X_NKL, ARRAY_OF_NKL_ARRAYS); + } + + #[cfg(not(no_zerocopy_generic_bounds_in_const_fn_1_61_0))] + { + // Call through a generic function to make sure our autoref + // specialization trick works even when types are generic. + const fn transmute_ref(t: &T) -> &U + where + T: IntoBytes + Immutable, + U: FromBytes + Immutable, + { + transmute_ref!(t) + } + + // Test that `transmute_ref!` supports non-`KnownLayout` `Sized` + // types. + const ARRAY_OF_NKL_U8S: Nkl<[u8; 8]> = Nkl([0u8, 1, 2, 3, 4, 5, 6, 7]); + const ARRAY_OF_NKL_ARRAYS: Nkl<[[u8; 2]; 4]> = Nkl([[0, 1], [2, 3], [4, 5], [6, 7]]); + const X_NKL: &Nkl<[[u8; 2]; 4]> = transmute_ref(&ARRAY_OF_NKL_U8S); + assert_eq!(*X_NKL, ARRAY_OF_NKL_ARRAYS); + } + + // Test that `transmute_ref!` works on slice DSTs in and that memory is + // transmuted as expected. + let slice_dst_of_u8s = + SliceDst::::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + let slice_dst_of_u16s = + SliceDst::::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + let x: &SliceDst = transmute_ref!(slice_dst_of_u8s); + assert_eq!(x, slice_dst_of_u16s); + + let slice_dst_of_u8s = + SliceDst::::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + let x: &[u8] = transmute_ref!(slice_dst_of_u8s); + assert_eq!(x, [0, 1, 2, 3, 4, 5]); + + let x: &[u8] = transmute_ref!(slice_dst_of_u16s); + assert_eq!(x, [0, 1, 2, 3, 4, 5]); + + let x: &[U16] = transmute_ref!(slice_dst_of_u16s); + let slice_of_u16s: &[U16] = <[U16]>::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + assert_eq!(x, slice_of_u16s); + + // Test that transmuting from a type with larger trailing slice offset + // and larger trailing slice element works. + let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..]; + let slice_dst_big = SliceDst::::ref_from_bytes(bytes).unwrap(); + let slice_dst_small = SliceDst::::ref_from_bytes(bytes).unwrap(); + let x: &SliceDst = transmute_ref!(slice_dst_big); + assert_eq!(x, slice_dst_small); + + // Test that it's legal to transmute a reference while shrinking the + // lifetime (note that `X` has the lifetime `'static`). + let x: &[u8; 8] = transmute_ref!(X); + assert_eq!(*x, ARRAY_OF_U8S); + + // Test that `transmute_ref!` supports decreasing alignment. + let u = AU64(0); + let array = [0, 0, 0, 0, 0, 0, 0, 0]; + let x: &[u8; 8] = transmute_ref!(&u); + assert_eq!(*x, array); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: &u8 = transmute_ref!(&mut x); + assert_eq!(*y, 0); + } + + #[test] + fn test_try_transmute() { + // Test that memory is transmuted with `try_transmute` as expected. + let array_of_bools = [false, true, false, true, false, true, false, true]; + let array_of_arrays = [[0, 1], [0, 1], [0, 1], [0, 1]]; + let x: Result<[[u8; 2]; 4], _> = try_transmute!(array_of_bools); + assert_eq!(x, Ok(array_of_arrays)); + let x: Result<[bool; 8], _> = try_transmute!(array_of_arrays); + assert_eq!(x, Ok(array_of_bools)); + + // Test that `try_transmute!` works with `!Immutable` types. + let x: Result = try_transmute!(UnsafeCell::new(1usize)); + assert_eq!(x.unwrap(), 1); + let x: Result, _> = try_transmute!(1usize); + assert_eq!(x.unwrap().into_inner(), 1); + let x: Result, _> = try_transmute!(UnsafeCell::new(1usize)); + assert_eq!(x.unwrap().into_inner(), 1); + + #[derive(FromBytes, IntoBytes, Debug, PartialEq)] + #[repr(transparent)] + struct PanicOnDrop(T); + + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!("PanicOnDrop dropped"); + } + } + + // Since `try_transmute!` semantically moves its argument on failure, + // the `PanicOnDrop` is not dropped, and thus this shouldn't panic. + let x: Result = try_transmute!(PanicOnDrop(1usize)); + assert_eq!(x, Ok(1)); + + // Since `try_transmute!` semantically returns ownership of its argument + // on failure, the `PanicOnDrop` is returned rather than dropped, and + // thus this shouldn't panic. + let y: Result = try_transmute!(PanicOnDrop(2u8)); + // We have to use `map_err` instead of comparing against + // `Err(PanicOnDrop(2u8))` because the latter would create and then drop + // its `PanicOnDrop` temporary, which would cause a panic. + assert_eq!(y.as_ref().map_err(|p| &p.src.0), Err::<&bool, _>(&2u8)); + mem::forget(y); + } + + #[test] + fn test_try_transmute_ref() { + // Test that memory is transmuted with `try_transmute_ref` as expected. + let array_of_bools = &[false, true, false, true, false, true, false, true]; + let array_of_arrays = &[[0, 1], [0, 1], [0, 1], [0, 1]]; + let x: Result<&[[u8; 2]; 4], _> = try_transmute_ref!(array_of_bools); + assert_eq!(x, Ok(array_of_arrays)); + let x: Result<&[bool; 8], _> = try_transmute_ref!(array_of_arrays); + assert_eq!(x, Ok(array_of_bools)); + + // Test that it's legal to transmute a reference while shrinking the + // lifetime. + { + let x: Result<&[[u8; 2]; 4], _> = try_transmute_ref!(array_of_bools); + assert_eq!(x, Ok(array_of_arrays)); + } + + // Test that `try_transmute_ref!` supports decreasing alignment. + let u = AU64(0); + let array = [0u8, 0, 0, 0, 0, 0, 0, 0]; + let x: Result<&[u8; 8], _> = try_transmute_ref!(&u); + assert_eq!(x, Ok(&array)); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: Result<&u8, _> = try_transmute_ref!(&mut x); + assert_eq!(y, Ok(&0)); + + // Test that sized types work which don't implement `KnownLayout`. + let array_of_nkl_u8s = Nkl([0u8, 1, 2, 3, 4, 5, 6, 7]); + let array_of_nkl_arrays = Nkl([[0, 1], [2, 3], [4, 5], [6, 7]]); + let x: Result<&Nkl<[[u8; 2]; 4]>, _> = try_transmute_ref!(&array_of_nkl_u8s); + assert_eq!(x, Ok(&array_of_nkl_arrays)); + + // Test sized -> unsized transmutation. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let slice_of_arrays = &array_of_arrays[..]; + let x: Result<&[[u8; 2]], _> = try_transmute_ref!(&array_of_u8s); + assert_eq!(x, Ok(slice_of_arrays)); + + // Test unsized -> unsized transmutation. + let slice_dst_of_u8s = + SliceDst::::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + let slice_dst_of_u16s = + SliceDst::::ref_from_bytes(&[0, 1, 2, 3, 4, 5][..]).unwrap(); + let x: Result<&SliceDst, _> = try_transmute_ref!(slice_dst_of_u8s); + assert_eq!(x, Ok(slice_dst_of_u16s)); + } + + #[test] + fn test_try_transmute_mut() { + // Test that memory is transmuted with `try_transmute_mut` as expected. + let array_of_u8s = &mut [0u8, 1, 0, 1, 0, 1, 0, 1]; + let array_of_arrays = &mut [[0u8, 1], [0, 1], [0, 1], [0, 1]]; + let x: Result<&mut [[u8; 2]; 4], _> = try_transmute_mut!(array_of_u8s); + assert_eq!(x, Ok(array_of_arrays)); + + let array_of_bools = &mut [false, true, false, true, false, true, false, true]; + let array_of_arrays = &mut [[0u8, 1], [0, 1], [0, 1], [0, 1]]; + let x: Result<&mut [bool; 8], _> = try_transmute_mut!(array_of_arrays); + assert_eq!(x, Ok(array_of_bools)); + + // Test that it's legal to transmute a reference while shrinking the + // lifetime. + let array_of_bools = &mut [false, true, false, true, false, true, false, true]; + let array_of_arrays = &mut [[0u8, 1], [0, 1], [0, 1], [0, 1]]; + { + let x: Result<&mut [bool; 8], _> = try_transmute_mut!(array_of_arrays); + assert_eq!(x, Ok(array_of_bools)); + } + + // Test that `try_transmute_mut!` supports decreasing alignment. + let u = &mut AU64(0); + let array = &mut [0u8, 0, 0, 0, 0, 0, 0, 0]; + let x: Result<&mut [u8; 8], _> = try_transmute_mut!(u); + assert_eq!(x, Ok(array)); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: Result<&mut u8, _> = try_transmute_mut!(&mut x); + assert_eq!(y, Ok(&mut 0)); + + // Test that sized types work which don't implement `KnownLayout`. + let mut array_of_nkl_u8s = Nkl([0u8, 1, 2, 3, 4, 5, 6, 7]); + let mut array_of_nkl_arrays = Nkl([[0, 1], [2, 3], [4, 5], [6, 7]]); + let x: Result<&mut Nkl<[[u8; 2]; 4]>, _> = try_transmute_mut!(&mut array_of_nkl_u8s); + assert_eq!(x, Ok(&mut array_of_nkl_arrays)); + + // Test sized -> unsized transmutation. + let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let slice_of_arrays = &mut array_of_arrays[..]; + let x: Result<&mut [[u8; 2]], _> = try_transmute_mut!(&mut array_of_u8s); + assert_eq!(x, Ok(slice_of_arrays)); + + // Test unsized -> unsized transmutation. + let mut bytes = [0, 1, 2, 3, 4, 5, 6]; + let slice_dst_of_u8s = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let mut bytes = [0, 1, 2, 3, 4, 5, 6]; + let slice_dst_of_u16s = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let x: Result<&mut SliceDst, _> = try_transmute_mut!(slice_dst_of_u8s); + assert_eq!(x, Ok(slice_dst_of_u16s)); + } + + #[test] + fn test_transmute_mut() { + // Test that memory is transmuted as expected. + let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s); + assert_eq!(*x, array_of_arrays); + let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); + assert_eq!(*x, array_of_u8s); + + { + // Test that it's legal to transmute a reference while shrinking the + // lifetime. + let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); + assert_eq!(*x, array_of_u8s); + } + + // Test that `transmute_mut!` supports non-`KnownLayout` types. + let mut array_of_u8s = Nkl([0u8, 1, 2, 3, 4, 5, 6, 7]); + let mut array_of_arrays = Nkl([[0, 1], [2, 3], [4, 5], [6, 7]]); + let x: &mut Nkl<[[u8; 2]; 4]> = transmute_mut!(&mut array_of_u8s); + assert_eq!(*x, array_of_arrays); + let x: &mut Nkl<[u8; 8]> = transmute_mut!(&mut array_of_arrays); + assert_eq!(*x, array_of_u8s); + + // Test that `transmute_mut!` supports decreasing alignment. + let mut u = AU64(0); + let array = [0, 0, 0, 0, 0, 0, 0, 0]; + let x: &[u8; 8] = transmute_mut!(&mut u); + assert_eq!(*x, array); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: &u8 = transmute_mut!(&mut x); + assert_eq!(*y, 0); + + // Test that `transmute_mut!` works on slice DSTs in and that memory is + // transmuted as expected. + let mut bytes = [0, 1, 2, 3, 4, 5, 6]; + let slice_dst_of_u8s = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let mut bytes = [0, 1, 2, 3, 4, 5, 6]; + let slice_dst_of_u16s = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let x: &mut SliceDst = transmute_mut!(slice_dst_of_u8s); + assert_eq!(x, slice_dst_of_u16s); + + // Test that `transmute_mut!` works on slices that memory is transmuted + // as expected. + let array_of_u16s: &mut [u16] = &mut [0u16, 1, 2]; + let array_of_i16s: &mut [i16] = &mut [0i16, 1, 2]; + let x: &mut [i16] = transmute_mut!(array_of_u16s); + assert_eq!(x, array_of_i16s); + + // Test that transmuting from a type with larger trailing slice offset + // and larger trailing slice element works. + let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7]; + let slice_dst_big = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let mut bytes = [0, 1, 2, 3, 4, 5, 6, 7]; + let slice_dst_small = SliceDst::::mut_from_bytes(&mut bytes[..]).unwrap(); + let x: &mut SliceDst = transmute_mut!(slice_dst_big); + assert_eq!(x, slice_dst_small); + + // Test sized -> unsized transmutation. + let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let slice_of_arrays = &mut array_of_arrays[..]; + let x: &mut [[u8; 2]] = transmute_mut!(&mut array_of_u8s); + assert_eq!(x, slice_of_arrays); + } + + #[test] + fn test_macros_evaluate_args_once() { + let mut ctr = 0; + #[allow(clippy::useless_transmute)] + let _: usize = transmute!({ + ctr += 1; + 0usize + }); + assert_eq!(ctr, 1); + + let mut ctr = 0; + let _: &usize = transmute_ref!({ + ctr += 1; + &0usize + }); + assert_eq!(ctr, 1); + + let mut ctr: usize = 0; + let _: &mut usize = transmute_mut!({ + ctr += 1; + &mut ctr + }); + assert_eq!(ctr, 1); + + let mut ctr = 0; + #[allow(clippy::useless_transmute)] + let _: usize = try_transmute!({ + ctr += 1; + 0usize + }) + .unwrap(); + assert_eq!(ctr, 1); + } + + #[test] + fn test_include_value() { + const AS_U32: u32 = include_value!("../testdata/include_value/data"); + assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); + const AS_I32: i32 = include_value!("../testdata/include_value/data"); + assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); + } + + #[test] + #[allow(non_camel_case_types, unreachable_pub, dead_code)] + fn test_cryptocorrosion_derive_traits() { + // Test the set of invocations added in + // https://github.com/cryptocorrosion/cryptocorrosion/pull/85 + + fn assert_impls() {} + + cryptocorrosion_derive_traits! { + #[repr(C)] + #[derive(Clone, Copy)] + pub union vec128_storage { + d: [u32; 4], + q: [u64; 2], + } + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone, Debug, PartialEq)] + pub struct u32x4_generic([u32; 4]); + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone, Debug, PartialEq)] + pub struct u64x2_generic([u64; 2]); + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone, Debug, PartialEq)] + pub struct u128x1_generic([u128; 1]); + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone, Default)] + #[allow(non_camel_case_types)] + pub struct x2(pub [W; 2], PhantomData); + } + + enum NotZerocopy {} + assert_impls::>(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone, Default)] + #[allow(non_camel_case_types)] + pub struct x4(pub [W; 4]); + } + + assert_impls::>(); + + #[cfg(feature = "simd")] + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + { + #[cfg(target_arch = "x86")] + use core::arch::x86::{__m128i, __m256i}; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::{__m128i, __m256i}; + + cryptocorrosion_derive_traits! { + #[repr(C)] + #[derive(Copy, Clone)] + pub struct X4(__m128i, __m128i, __m128i, __m128i); + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(C)] + /// Generic wrapper for unparameterized storage of any of the + /// possible impls. Converting into and out of this type should + /// be essentially free, although it may be more aligned than a + /// particular impl requires. + #[allow(non_camel_case_types)] + #[derive(Copy, Clone)] + pub union vec128_storage { + u32x4: [u32; 4], + u64x2: [u64; 2], + u128x1: [u128; 1], + sse2: __m128i, + } + } + + assert_impls::(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[allow(non_camel_case_types)] + #[derive(Copy, Clone)] + pub struct vec { + x: __m128i, + s3: PhantomData, + s4: PhantomData, + ni: PhantomData, + } + } + + assert_impls::>(); + + cryptocorrosion_derive_traits! { + #[repr(transparent)] + #[derive(Copy, Clone)] + pub struct u32x4x2_avx2 { + x: __m256i, + ni: PhantomData, + } + } + + assert_impls::>(); + } + + // Make sure that our derive works for `#[repr(C)]` structs even though + // cryptocorrosion doesn't currently have any. + cryptocorrosion_derive_traits! { + #[repr(C)] + #[derive(Copy, Clone, Debug, PartialEq)] + pub struct ReprC(u8, u8, u16); + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/inner.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/inner.rs new file mode 100644 index 0000000000000000000000000000000000000000..3136149dfa6ec04503001129bd9dd3e3362ef643 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/inner.rs @@ -0,0 +1,734 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{marker::PhantomData, ops::Range, ptr::NonNull}; + +pub use _def::PtrInner; + +#[allow(unused_imports)] +use crate::util::polyfills::NumExt as _; +use crate::{ + layout::{CastType, MetadataCastError}, + pointer::cast, + util::AsAddress, + AlignmentError, CastError, KnownLayout, MetadataOf, SizeError, SplitAt, +}; + +mod _def { + use super::*; + /// The inner pointer stored inside a [`Ptr`][crate::Ptr]. + /// + /// `PtrInner<'a, T>` is [covariant] in `'a` and invariant in `T`. + /// + /// [covariant]: https://doc.rust-lang.org/reference/subtyping.html + #[allow(missing_debug_implementations)] + pub struct PtrInner<'a, T> + where + T: ?Sized, + { + /// # Invariants + /// + /// 0. If `ptr`'s referent is not zero sized, then `ptr` has valid + /// provenance for its referent, which is entirely contained in some + /// Rust allocation, `A`. + /// 1. If `ptr`'s referent is not zero sized, `A` is guaranteed to live + /// for at least `'a`. + /// + /// # Postconditions + /// + /// By virtue of these invariants, code may assume the following, which + /// are logical implications of the invariants: + /// - `ptr`'s referent is not larger than `isize::MAX` bytes \[1\] + /// - `ptr`'s referent does not wrap around the address space \[1\] + /// + /// \[1\] Per : + /// + /// For any allocated object with `base` address, `size`, and a set of + /// `addresses`, the following are guaranteed: + /// ... + /// - `size <= isize::MAX` + /// + /// As a consequence of these guarantees, given any address `a` within + /// the set of addresses of an allocated object: + /// ... + /// - It is guaranteed that, given `o = a - base` (i.e., the offset of + /// `a` within the allocated object), `base + o` will not wrap + /// around the address space (in other words, will not overflow + /// `usize`) + ptr: NonNull, + // SAFETY: `&'a UnsafeCell` is covariant in `'a` and invariant in `T` + // [1]. We use this construction rather than the equivalent `&mut T`, + // because our MSRV of 1.65 prohibits `&mut` types in const contexts. + // + // [1] https://doc.rust-lang.org/1.81.0/reference/subtyping.html#variance + _marker: PhantomData<&'a core::cell::UnsafeCell>, + } + + impl<'a, T: 'a + ?Sized> Copy for PtrInner<'a, T> {} + impl<'a, T: 'a + ?Sized> Clone for PtrInner<'a, T> { + #[inline(always)] + fn clone(&self) -> PtrInner<'a, T> { + // SAFETY: None of the invariants on `ptr` are affected by having + // multiple copies of a `PtrInner`. + *self + } + } + + impl<'a, T: 'a + ?Sized> PtrInner<'a, T> { + /// Constructs a `Ptr` from a [`NonNull`]. + /// + /// # Safety + /// + /// The caller promises that: + /// + /// 0. If `ptr`'s referent is not zero sized, then `ptr` has valid + /// provenance for its referent, which is entirely contained in some + /// Rust allocation, `A`. + /// 1. If `ptr`'s referent is not zero sized, `A` is guaranteed to live + /// for at least `'a`. + #[inline(always)] + #[must_use] + pub const unsafe fn new(ptr: NonNull) -> PtrInner<'a, T> { + // SAFETY: The caller has promised to satisfy all safety invariants + // of `PtrInner`. + Self { ptr, _marker: PhantomData } + } + + /// Converts this `PtrInner` to a [`NonNull`]. + /// + /// Note that this method does not consume `self`. The caller should + /// watch out for `unsafe` code which uses the returned `NonNull` in a + /// way that violates the safety invariants of `self`. + #[inline(always)] + #[must_use] + pub const fn as_non_null(&self) -> NonNull { + self.ptr + } + + /// Converts this `PtrInner` to a [`*mut T`]. + /// + /// Note that this method does not consume `self`. The caller should + /// watch out for `unsafe` code which uses the returned `*mut T` in a + /// way that violates the safety invariants of `self`. + #[inline(always)] + #[must_use] + pub const fn as_ptr(&self) -> *mut T { + self.ptr.as_ptr() + } + } +} + +impl<'a, T: ?Sized> PtrInner<'a, T> { + /// Constructs a `PtrInner` from a reference. + #[inline] + pub(crate) fn from_ref(ptr: &'a T) -> Self { + let ptr = NonNull::from(ptr); + // SAFETY: + // 0. If `ptr`'s referent is not zero sized, then `ptr`, by invariant on + // `&'a T` [1], has valid provenance for its referent, which is + // entirely contained in some Rust allocation, `A`. + // 1. If `ptr`'s referent is not zero sized, then `A`, by invariant on + // `&'a T`, is guaranteed to live for at least `'a`. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety: + // + // For all types, `T: ?Sized`, and for all `t: &T` or `t: &mut T`, + // when such values cross an API boundary, the following invariants + // must generally be upheld: + // ... + // - if `size_of_val(t) > 0`, then `t` is dereferenceable for + // `size_of_val(t)` many bytes + // + // If `t` points at address `a`, being “dereferenceable” for N bytes + // means that the memory range `[a, a + N)` is all contained within a + // single allocated object. + unsafe { Self::new(ptr) } + } + + /// Constructs a `PtrInner` from a mutable reference. + #[inline] + pub(crate) fn from_mut(ptr: &'a mut T) -> Self { + let ptr = NonNull::from(ptr); + // SAFETY: + // 0. If `ptr`'s referent is not zero sized, then `ptr`, by invariant on + // `&'a mut T` [1], has valid provenance for its referent, which is + // entirely contained in some Rust allocation, `A`. + // 1. If `ptr`'s referent is not zero sized, then `A`, by invariant on + // `&'a mut T`, is guaranteed to live for at least `'a`. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety: + // + // For all types, `T: ?Sized`, and for all `t: &T` or `t: &mut T`, + // when such values cross an API boundary, the following invariants + // must generally be upheld: + // ... + // - if `size_of_val(t) > 0`, then `t` is dereferenceable for + // `size_of_val(t)` many bytes + // + // If `t` points at address `a`, being “dereferenceable” for N bytes + // means that the memory range `[a, a + N)` is all contained within a + // single allocated object. + unsafe { Self::new(ptr) } + } + + /// # Safety + /// + /// The caller may assume that the resulting `PtrInner` addresses the subset + /// of the bytes of `self`'s referent addressed by `C::project(self)`. + #[must_use] + #[inline(always)] + pub fn project>(self) -> PtrInner<'a, U> { + let projected_raw = C::project(self); + + // SAFETY: `self`'s referent lives at a `NonNull` address, and is either + // zero-sized or lives in an allocation. In either case, it does not + // wrap around the address space [1], and so none of the addresses + // contained in it or one-past-the-end of it are null. + // + // By invariant on `C: Project`, `C::project` is a provenance-preserving + // projection which preserves or shrinks the set of referent bytes, so + // `projected_raw` references a subset of `self`'s referent, and so it + // cannot be null. + // + // [1] https://doc.rust-lang.org/1.92.0/std/ptr/index.html#allocation + let projected_non_null = unsafe { NonNull::new_unchecked(projected_raw) }; + + // SAFETY: As described in the preceding safety comment, `projected_raw`, + // and thus `projected_non_null`, addresses a subset of `self`'s + // referent. Thus, `projected_non_null` either: + // - Addresses zero bytes or, + // - Addresses a subset of the referent of `self`. In this case, `self` + // has provenance for its referent, which lives in an allocation. + // Since `projected_non_null` was constructed using a sequence of + // provenance-preserving operations, it also has provenance for its + // referent and that referent lives in an allocation. By invariant on + // `self`, that allocation lives for `'a`. + unsafe { PtrInner::new(projected_non_null) } + } +} + +#[allow(clippy::needless_lifetimes)] +impl<'a, T> PtrInner<'a, T> +where + T: ?Sized + KnownLayout, +{ + /// Extracts the metadata of this `ptr`. + pub(crate) fn meta(self) -> MetadataOf { + let meta = T::pointer_to_metadata(self.as_ptr()); + // SAFETY: By invariant on `PtrInner`, `self.as_non_null()` addresses no + // more than `isize::MAX` bytes. + unsafe { MetadataOf::new_unchecked(meta) } + } + + /// Produces a `PtrInner` with the same address and provenance as `self` but + /// the given `meta`. + /// + /// # Safety + /// + /// The caller promises that if `self`'s referent is not zero sized, then + /// a pointer constructed from its address with the given `meta` metadata + /// will address a subset of the allocation pointed to by `self`. + #[inline] + pub(crate) unsafe fn with_meta(self, meta: T::PointerMetadata) -> Self + where + T: KnownLayout, + { + let raw = T::raw_from_ptr_len(self.as_non_null().cast(), meta); + + // SAFETY: + // + // Lemma 0: `raw` either addresses zero bytes, or addresses a subset of + // the allocation pointed to by `self` and has the same + // provenance as `self`. Proof: `raw` is constructed using + // provenance-preserving operations, and the caller has + // promised that, if `self`'s referent is not zero-sized, the + // resulting pointer addresses a subset of the allocation + // pointed to by `self`. + // + // 0. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `ptr` is derived from some valid Rust allocation, + // `A`. + // 1. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `ptr` has valid provenance for `A`. + // 2. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `ptr` addresses a byte range which is entirely + // contained in `A`. + // 3. Per Lemma 0 and by invariant on `self`, `ptr` addresses a byte + // range whose length fits in an `isize`. + // 4. Per Lemma 0 and by invariant on `self`, `ptr` addresses a byte + // range which does not wrap around the address space. + // 5. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `A` is guaranteed to live for at least `'a`. + unsafe { PtrInner::new(raw) } + } +} + +#[allow(clippy::needless_lifetimes)] +impl<'a, T> PtrInner<'a, T> +where + T: ?Sized + KnownLayout, +{ + /// Splits `T` in two. + /// + /// # Safety + /// + /// The caller promises that: + /// - `l_len.get() <= self.meta()`. + /// + /// ## (Non-)Overlap + /// + /// Given `let (left, right) = ptr.split_at(l_len)`, it is guaranteed that + /// `left` and `right` are contiguous and non-overlapping if + /// `l_len.padding_needed_for() == 0`. This is true for all `[T]`. + /// + /// If `l_len.padding_needed_for() != 0`, then the left pointer will overlap + /// the right pointer to satisfy `T`'s padding requirements. + pub(crate) unsafe fn split_at_unchecked( + self, + l_len: crate::util::MetadataOf, + ) -> (Self, PtrInner<'a, [T::Elem]>) + where + T: SplitAt, + { + let l_len = l_len.get(); + + // SAFETY: The caller promises that `l_len.get() <= self.meta()`. + // Trivially, `0 <= l_len`. + let left = unsafe { self.with_meta(l_len) }; + + let right = self.trailing_slice(); + // SAFETY: The caller promises that `l_len <= self.meta() = slf.meta()`. + // Trivially, `slf.meta() <= slf.meta()`. + let right = unsafe { right.slice_unchecked(l_len..self.meta().get()) }; + + // SAFETY: If `l_len.padding_needed_for() == 0`, then `left` and `right` + // are non-overlapping. Proof: `left` is constructed `slf` with `l_len` + // as its (exclusive) upper bound. If `l_len.padding_needed_for() == 0`, + // then `left` requires no trailing padding following its final element. + // Since `right` is constructed from `slf`'s trailing slice with `l_len` + // as its (inclusive) lower bound, no byte is referred to by both + // pointers. + // + // Conversely, `l_len.padding_needed_for() == N`, where `N + // > 0`, `left` requires `N` bytes of trailing padding following its + // final element. Since `right` is constructed from the trailing slice + // of `slf` with `l_len` as its (inclusive) lower bound, the first `N` + // bytes of `right` are aliased by `left`. + (left, right) + } + + /// Produces the trailing slice of `self`. + pub(crate) fn trailing_slice(self) -> PtrInner<'a, [T::Elem]> + where + T: SplitAt, + { + let offset = crate::trailing_slice_layout::().offset; + + let bytes = self.as_non_null().cast::().as_ptr(); + + // SAFETY: + // - By invariant on `T: KnownLayout`, `T::LAYOUT` describes `T`'s + // layout. `offset` is the offset of the trailing slice within `T`, + // which is by definition in-bounds or one byte past the end of any + // `T`, regardless of metadata. By invariant on `PtrInner`, `self` + // (and thus `bytes`) points to a byte range of size `<= isize::MAX`, + // and so `offset <= isize::MAX`. Since `size_of::() == 1`, + // `offset * size_of::() <= isize::MAX`. + // - If `offset > 0`, then by invariant on `PtrInner`, `self` (and thus + // `bytes`) points to a byte range entirely contained within the same + // allocated object as `self`. As explained above, this offset results + // in a pointer to or one byte past the end of this allocated object. + let bytes = unsafe { bytes.add(offset) }; + + // SAFETY: By the preceding safety argument, `bytes` is within or one + // byte past the end of the same allocated object as `self`, which + // ensures that it is non-null. + let bytes = unsafe { NonNull::new_unchecked(bytes) }; + + let ptr = KnownLayout::raw_from_ptr_len(bytes, self.meta().get()); + + // SAFETY: + // 0. If `ptr`'s referent is not zero sized, then `ptr` is derived from + // some valid Rust allocation, `A`, because `ptr` is derived from + // the same allocated object as `self`. + // 1. If `ptr`'s referent is not zero sized, then `ptr` has valid + // provenance for `A` because `raw` is derived from the same + // allocated object as `self` via provenance-preserving operations. + // 2. If `ptr`'s referent is not zero sized, then `ptr` addresses a byte + // range which is entirely contained in `A`, by previous safety proof + // on `bytes`. + // 3. `ptr` addresses a byte range whose length fits in an `isize`, by + // consequence of #2. + // 4. `ptr` addresses a byte range which does not wrap around the + // address space, by consequence of #2. + // 5. If `ptr`'s referent is not zero sized, then `A` is guaranteed to + // live for at least `'a`, because `ptr` is derived from `self`. + unsafe { PtrInner::new(ptr) } + } +} + +#[allow(clippy::needless_lifetimes)] +impl<'a, T> PtrInner<'a, [T]> { + /// Creates a pointer which addresses the given `range` of self. + /// + /// # Safety + /// + /// `range` is a valid range (`start <= end`) and `end <= self.meta()`. + pub(crate) unsafe fn slice_unchecked(self, range: Range) -> Self { + let base = self.as_non_null().cast::().as_ptr(); + + // SAFETY: The caller promises that `start <= end <= self.meta()`. By + // invariant, if `self`'s referent is not zero-sized, then `self` refers + // to a byte range which is contained within a single allocation, which + // is no more than `isize::MAX` bytes long, and which does not wrap + // around the address space. Thus, this pointer arithmetic remains + // in-bounds of the same allocation, and does not wrap around the + // address space. The offset (in bytes) does not overflow `isize`. + // + // If `self`'s referent is zero-sized, then these conditions are + // trivially satisfied. + let base = unsafe { base.add(range.start) }; + + // SAFETY: The caller promises that `start <= end`, and so this will not + // underflow. + #[allow(unstable_name_collisions)] + let len = unsafe { range.end.unchecked_sub(range.start) }; + + let ptr = core::ptr::slice_from_raw_parts_mut(base, len); + + // SAFETY: By invariant, `self`'s referent is either a ZST or lives + // entirely in an allocation. `ptr` points inside of or one byte past + // the end of that referent. Thus, in either case, `ptr` is non-null. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + + // SAFETY: + // + // Lemma 0: `ptr` addresses a subset of the bytes addressed by `self`, + // and has the same provenance. Proof: The caller guarantees + // that `start <= end <= self.meta()`. Thus, `base` is + // in-bounds of `self`, and `base + (end - start)` is also + // in-bounds of self. Finally, `ptr` is constructed using + // provenance-preserving operations. + // + // 0. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `ptr` has valid provenance for its referent, + // which is entirely contained in some Rust allocation, `A`. + // 1. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not + // zero sized, then `A` is guaranteed to live for at least `'a`. + unsafe { PtrInner::new(ptr) } + } + + /// Iteratively projects the elements `PtrInner` from `PtrInner<[T]>`. + pub(crate) fn iter(&self) -> impl Iterator> { + // FIXME(#429): Once `NonNull::cast` documents that it preserves + // provenance, cite those docs. + let base = self.as_non_null().cast::().as_ptr(); + (0..self.meta().get()).map(move |i| { + // FIXME(https://github.com/rust-lang/rust/issues/74265): Use + // `NonNull::get_unchecked_mut`. + + // SAFETY: If the following conditions are not satisfied + // `pointer::cast` may induce Undefined Behavior [1]: + // + // > - The computed offset, `count * size_of::()` bytes, must not + // > overflow `isize``. + // > - If the computed offset is non-zero, then `self` must be + // > derived from a pointer to some allocated object, and the + // > entire memory range between `self` and the result must be in + // > bounds of that allocated object. In particular, this range + // > must not “wrap around” the edge of the address space. + // + // [1] https://doc.rust-lang.org/std/primitive.pointer.html#method.add + // + // We satisfy both of these conditions here: + // - By invariant on `Ptr`, `self` addresses a byte range whose + // length fits in an `isize`. Since `elem` is contained in `self`, + // the computed offset of `elem` must fit within `isize.` + // - If the computed offset is non-zero, then this means that the + // referent is not zero-sized. In this case, `base` points to an + // allocated object (by invariant on `self`). Thus: + // - By contract, `self.meta()` accurately reflects the number of + // elements in the slice. `i` is in bounds of `c.meta()` by + // construction, and so the result of this addition cannot + // overflow past the end of the allocation referred to by `c`. + // - By invariant on `Ptr`, `self` addresses a byte range which + // does not wrap around the address space. Since `elem` is + // contained in `self`, the computed offset of `elem` must wrap + // around the address space. + // + // FIXME(#429): Once `pointer::add` documents that it preserves + // provenance, cite those docs. + let elem = unsafe { base.add(i) }; + + // SAFETY: `elem` must not be null. `base` is constructed from a + // `NonNull` pointer, and the addition that produces `elem` must not + // overflow or wrap around, so `elem >= base > 0`. + // + // FIXME(#429): Once `NonNull::new_unchecked` documents that it + // preserves provenance, cite those docs. + let elem = unsafe { NonNull::new_unchecked(elem) }; + + // SAFETY: The safety invariants of `Ptr::new` (see definition) are + // satisfied: + // 0. If `elem`'s referent is not zero sized, then `elem` has valid + // provenance for its referent, because it derived from `self` + // using a series of provenance-preserving operations, and + // because `self` has valid provenance for its referent. By the + // same argument, `elem`'s referent is entirely contained within + // the same allocated object as `self`'s referent. + // 1. If `elem`'s referent is not zero sized, then the allocation of + // `elem` is guaranteed to live for at least `'a`, because `elem` + // is entirely contained in `self`, which lives for at least `'a` + // by invariant on `Ptr`. + unsafe { PtrInner::new(elem) } + }) + } +} + +impl<'a, T, const N: usize> PtrInner<'a, [T; N]> { + /// Casts this pointer-to-array into a slice. + /// + /// # Safety + /// + /// Callers may assume that the returned `PtrInner` references the same + /// address and length as `self`. + #[allow(clippy::wrong_self_convention)] + pub(crate) fn as_slice(self) -> PtrInner<'a, [T]> { + let start = self.as_non_null().cast::().as_ptr(); + let slice = core::ptr::slice_from_raw_parts_mut(start, N); + // SAFETY: `slice` is not null, because it is derived from `start` + // which is non-null. + let slice = unsafe { NonNull::new_unchecked(slice) }; + // SAFETY: Lemma: In the following safety arguments, note that `slice` + // is derived from `self` in two steps: first, by casting `self: [T; N]` + // to `start: T`, then by constructing a pointer to a slice starting at + // `start` of length `N`. As a result, `slice` references exactly the + // same allocation as `self`, if any. + // + // 0. By the above lemma, if `slice`'s referent is not zero sized, then + // `slice` has the same referent as `self`. By invariant on `self`, + // this referent is entirely contained within some allocation, `A`. + // Because `slice` was constructed using provenance-preserving + // operations, it has provenance for its entire referent. + // 1. By the above lemma, if `slice`'s referent is not zero sized, then + // `A` is guaranteed to live for at least `'a`, because it is derived + // from the same allocation as `self`, which, by invariant on + // `PtrInner`, lives for at least `'a`. + unsafe { PtrInner::new(slice) } + } +} + +impl<'a> PtrInner<'a, [u8]> { + /// Attempts to cast `self` to a `U` using the given cast type. + /// + /// If `U` is a slice DST and pointer metadata (`meta`) is provided, then + /// the cast will only succeed if it would produce an object with the given + /// metadata. + /// + /// Returns `None` if the resulting `U` would be invalidly-aligned, if no + /// `U` can fit in `self`, or if the provided pointer metadata describes an + /// invalid instance of `U`. On success, returns a pointer to the + /// largest-possible `U` which fits in `self`. + /// + /// # Safety + /// + /// The caller may assume that this implementation is correct, and may rely + /// on that assumption for the soundness of their code. In particular, the + /// caller may assume that, if `try_cast_into` returns `Some((ptr, + /// remainder))`, then `ptr` and `remainder` refer to non-overlapping byte + /// ranges within `self`, and that `ptr` and `remainder` entirely cover + /// `self`. Finally: + /// - If this is a prefix cast, `ptr` has the same address as `self`. + /// - If this is a suffix cast, `remainder` has the same address as `self`. + #[inline] + pub(crate) fn try_cast_into( + self, + cast_type: CastType, + meta: Option, + ) -> Result<(PtrInner<'a, U>, PtrInner<'a, [u8]>), CastError> + where + U: 'a + ?Sized + KnownLayout, + { + // PANICS: By invariant, the byte range addressed by + // `self.as_non_null()` does not wrap around the address space. This + // implies that the sum of the address (represented as a `usize`) and + // length do not overflow `usize`, as required by + // `validate_cast_and_convert_metadata`. Thus, this call to + // `validate_cast_and_convert_metadata` will only panic if `U` is a DST + // whose trailing slice element is zero-sized. + let maybe_metadata = MetadataOf::::validate_cast_and_convert_metadata( + AsAddress::addr(self.as_ptr()), + self.meta(), + cast_type, + meta, + ); + + let (elems, split_at) = match maybe_metadata { + Ok((elems, split_at)) => (elems, split_at), + Err(MetadataCastError::Alignment) => { + // SAFETY: Since `validate_cast_and_convert_metadata` returned + // an alignment error, `U` must have an alignment requirement + // greater than one. + let err = unsafe { AlignmentError::<_, U>::new_unchecked(self) }; + return Err(CastError::Alignment(err)); + } + Err(MetadataCastError::Size) => return Err(CastError::Size(SizeError::new(self))), + }; + + // SAFETY: `validate_cast_and_convert_metadata` promises to return + // `split_at <= self.meta()`. + // + // Lemma 0: `l_slice` and `r_slice` are non-overlapping. Proof: By + // contract on `PtrInner::split_at_unchecked`, the produced `PtrInner`s + // are always non-overlapping if `self` is a `[T]`; here it is a `[u8]`. + let (l_slice, r_slice) = unsafe { self.split_at_unchecked(split_at) }; + + let (target, remainder) = match cast_type { + CastType::Prefix => (l_slice, r_slice), + CastType::Suffix => (r_slice, l_slice), + }; + + let base = target.as_non_null().cast::(); + + let ptr = U::raw_from_ptr_len(base, elems.get()); + + // SAFETY: + // 0. By invariant, if `target`'s referent is not zero sized, then + // `target` has provenance valid for some Rust allocation, `A`. + // Because `ptr` is derived from `target` via provenance-preserving + // operations, `ptr` will also have provenance valid for its entire + // referent. + // 1. `validate_cast_and_convert_metadata` promises that the object + // described by `elems` and `split_at` lives at a byte range which is + // a subset of the input byte range. Thus, by invariant, if + // `target`'s referent is not zero sized, then `target` refers to an + // allocation which is guaranteed to live for at least `'a`, and thus + // so does `ptr`. + Ok((unsafe { PtrInner::new(ptr) }, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::*; + + #[test] + fn test_meta() { + let arr = [1; 16]; + let dst = <[u8]>::ref_from_bytes(&arr[..]).unwrap(); + let ptr = PtrInner::from_ref(dst); + assert_eq!(ptr.meta().get(), 16); + + // SAFETY: 8 is less than 16 + let ptr = unsafe { ptr.with_meta(8) }; + + assert_eq!(ptr.meta().get(), 8); + } + + #[test] + fn test_split_at() { + fn test_split_at() { + #[derive(FromBytes, KnownLayout, SplitAt, Immutable)] + #[repr(C)] + struct SliceDst { + prefix: [u8; OFFSET], + trailing: [u8], + } + + let n: usize = BUFFER_SIZE - OFFSET; + let arr = [1; BUFFER_SIZE]; + let dst = SliceDst::::ref_from_bytes(&arr[..]).unwrap(); + let ptr = PtrInner::from_ref(dst); + for i in 0..=n { + assert_eq!(ptr.meta().get(), n); + // SAFETY: `i` is in bounds by construction. + let i = unsafe { MetadataOf::new_unchecked(i) }; + // SAFETY: `i` is in bounds by construction. + let (l, r) = unsafe { ptr.split_at_unchecked(i) }; + // SAFETY: Points to a valid value by construction. + #[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)] + // Clippy false positive + let l_sum: usize = l + .trailing_slice() + .iter() + .map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_ptr()) } as usize) + .sum(); + // SAFETY: Points to a valid value by construction. + #[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)] + // Clippy false positive + let r_sum: usize = r + .iter() + .map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_ptr()) } as usize) + .sum(); + assert_eq!(l_sum, i.get()); + assert_eq!(r_sum, n - i.get()); + assert_eq!(l_sum + r_sum, n); + } + } + + test_split_at::<0, 16>(); + test_split_at::<1, 17>(); + test_split_at::<2, 18>(); + } + + #[test] + fn test_trailing_slice() { + fn test_trailing_slice() { + #[derive(FromBytes, KnownLayout, SplitAt, Immutable)] + #[repr(C)] + struct SliceDst { + prefix: [u8; OFFSET], + trailing: [u8], + } + + let n: usize = BUFFER_SIZE - OFFSET; + let arr = [1; BUFFER_SIZE]; + let dst = SliceDst::::ref_from_bytes(&arr[..]).unwrap(); + let ptr = PtrInner::from_ref(dst); + + assert_eq!(ptr.meta().get(), n); + let trailing = ptr.trailing_slice(); + assert_eq!(trailing.meta().get(), n); + + assert_eq!( + // SAFETY: We assume this to be sound for the sake of this test, + // which will fail, here, in miri, if the safety precondition of + // `offset_of` is not satisfied. + unsafe { + #[allow(clippy::as_conversions)] + let offset = (trailing.as_ptr() as *mut u8).offset_from(ptr.as_ptr() as *mut _); + offset + }, + isize::try_from(OFFSET).unwrap(), + ); + + // SAFETY: Points to a valid value by construction. + #[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)] + // Clippy false positive + let trailing: usize = trailing + .iter() + .map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_ptr()) } as usize) + .sum(); + + assert_eq!(trailing, n); + } + + test_trailing_slice::<0, 16>(); + test_trailing_slice::<1, 17>(); + test_trailing_slice::<2, 18>(); + } + #[test] + fn test_ptr_inner_clone() { + let mut x = 0u8; + let p = PtrInner::from_mut(&mut x); + #[allow(clippy::clone_on_copy)] + let p2 = p.clone(); + assert_eq!(p.as_non_null(), p2.as_non_null()); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/invariant.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/invariant.rs new file mode 100644 index 0000000000000000000000000000000000000000..de5d9cd295c6ab8347f66fc0dc5f9bf55d357405 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/invariant.rs @@ -0,0 +1,298 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(missing_copy_implementations, missing_debug_implementations)] + +//! The parameterized invariants of a [`Ptr`][super::Ptr]. +//! +//! Invariants are encoded as ([`Aliasing`], [`Alignment`], [`Validity`]) +//! triples implementing the [`Invariants`] trait. + +/// The invariants of a [`Ptr`][super::Ptr]. +pub trait Invariants: Sealed { + type Aliasing: Aliasing; + type Alignment: Alignment; + type Validity: Validity; +} + +impl Invariants for (A, AA, V) { + type Aliasing = A; + type Alignment = AA; + type Validity = V; +} + +/// The aliasing invariant of a [`Ptr`][super::Ptr]. +/// +/// All aliasing invariants must permit reading from the bytes of a pointer's +/// referent which are not covered by [`UnsafeCell`]s. +/// +/// [`UnsafeCell`]: core::cell::UnsafeCell +pub trait Aliasing: Sealed { + /// Is `Self` [`Exclusive`]? + #[doc(hidden)] + const IS_EXCLUSIVE: bool; +} + +/// The alignment invariant of a [`Ptr`][super::Ptr]. +pub trait Alignment: Sealed { + #[doc(hidden)] + #[must_use] + fn read(ptr: crate::Ptr<'_, T, I>) -> T + where + T: Copy + Read, + I: Invariants, + I::Aliasing: Reference; +} + +/// The validity invariant of a [`Ptr`][super::Ptr]. +/// +/// # Safety +/// +/// In this section, we will use `Ptr` as a shorthand for `Ptr>` for brevity. +/// +/// Each `V: Validity` defines a set of bit values which may appear in the +/// referent of a `Ptr`, denoted `S(T, V)`. Each `V: Validity`, in its +/// documentation, provides a definition of `S(T, V)` which must be valid for +/// all `T: ?Sized`. Any `V: Validity` must guarantee that this set is only a +/// function of the *bit validity* of the referent type, `T`, and not of any +/// other property of `T`. As a consequence, given `V: Validity`, `T`, and `U` +/// where `T` and `U` have the same bit validity, `S(V, T) = S(V, U)`. +/// +/// It is guaranteed that the referent of any `ptr: Ptr` is a member of +/// `S(T, V)`. Unsafe code must ensure that this guarantee will be upheld for +/// any existing `Ptr`s or any `Ptr`s that that code creates. +/// +/// An important implication of this guarantee is that it restricts what +/// transmutes are sound, where "transmute" is used in this context to refer to +/// changing the referent type or validity invariant of a `Ptr`, as either +/// change may change the set of bit values permitted to appear in the referent. +/// In particular, the following are necessary (but not sufficient) conditions +/// in order for a transmute from `src: Ptr` to `dst: Ptr` to be +/// sound: +/// - If `S(T, V) = S(U, W)`, then no restrictions apply; otherwise, +/// - If `dst` permits mutation of its referent (e.g. via `Exclusive` aliasing +/// or interior mutation under `Shared` aliasing), then it must hold that +/// `S(T, V) ⊇ S(U, W)` - in other words, the transmute must not expand the +/// set of allowed referent bit patterns. A violation of this requirement +/// would permit using `dst` to write `x` where `x ∈ S(U, W)` but `x ∉ S(T, +/// V)`, which would violate the guarantee that `src`'s referent may only +/// contain values in `S(T, V)`. +/// - If the referent may be mutated without going through `dst` while `dst` is +/// live (e.g. via interior mutation on a `Shared`-aliased `Ptr` or `&` +/// reference), then it must hold that `S(T, V) ⊆ S(U, W)` - in other words, +/// the transmute must not shrink the set of allowed referent bit patterns. A +/// violation of this requirement would permit using `src` or another +/// mechanism (e.g. a `&` reference used to derive `src`) to write `x` where +/// `x ∈ S(T, V)` but `x ∉ S(U, W)`, which would violate the guarantee that +/// `dst`'s referent may only contain values in `S(U, W)`. +pub unsafe trait Validity: Sealed { + const KIND: ValidityKind; +} + +pub enum ValidityKind { + Uninit, + AsInitialized, + Initialized, + Valid, +} + +/// An [`Aliasing`] invariant which is either [`Shared`] or [`Exclusive`]. +/// +/// # Safety +/// +/// Given `A: Reference`, callers may assume that either `A = Shared` or `A = +/// Exclusive`. +pub trait Reference: Aliasing + Sealed {} + +/// The `Ptr<'a, T>` adheres to the aliasing rules of a `&'a T`. +/// +/// The referent of a shared-aliased `Ptr` may be concurrently referenced by any +/// number of shared-aliased `Ptr` or `&T` references, or by any number of +/// `Ptr` or `&U` references as permitted by `T`'s library safety invariants, +/// and may not be concurrently referenced by any exclusively-aliased `Ptr`s or +/// `&mut` references. The referent must not be mutated, except via +/// [`UnsafeCell`]s, and only when permitted by `T`'s library safety invariants. +/// +/// [`UnsafeCell`]: core::cell::UnsafeCell +pub enum Shared {} +impl Aliasing for Shared { + const IS_EXCLUSIVE: bool = false; +} +impl Reference for Shared {} + +/// The `Ptr<'a, T>` adheres to the aliasing rules of a `&'a mut T`. +/// +/// The referent of an exclusively-aliased `Ptr` may not be concurrently +/// referenced by any other `Ptr`s or references, and may not be accessed (read +/// or written) other than via this `Ptr`. +pub enum Exclusive {} +impl Aliasing for Exclusive { + const IS_EXCLUSIVE: bool = true; +} +impl Reference for Exclusive {} + +/// It is unknown whether the pointer is aligned. +pub enum Unaligned {} + +impl Alignment for Unaligned { + #[inline(always)] + fn read(ptr: crate::Ptr<'_, T, I>) -> T + where + T: Copy + Read, + I: Invariants, + I::Aliasing: Reference, + { + (*ptr.into_unalign().as_ref()).into_inner() + } +} + +/// The referent is aligned: for `Ptr`, the referent's address is a multiple +/// of the `T`'s alignment. +pub enum Aligned {} +impl Alignment for Aligned { + #[inline(always)] + fn read(ptr: crate::Ptr<'_, T, I>) -> T + where + T: Copy + Read, + I: Invariants, + I::Aliasing: Reference, + { + *ptr.as_ref() + } +} + +/// Any bit pattern is allowed in the `Ptr`'s referent, including uninitialized +/// bytes. +pub enum Uninit {} +// SAFETY: `Uninit`'s validity is well-defined for all `T: ?Sized`, and is not a +// function of any property of `T` other than its bit validity (in fact, it's +// not even a property of `T`'s bit validity, but this is more than we are +// required to uphold). +unsafe impl Validity for Uninit { + const KIND: ValidityKind = ValidityKind::Uninit; +} + +/// The byte ranges initialized in `T` are also initialized in the referent of a +/// `Ptr`. +/// +/// Formally: uninitialized bytes may only be present in `Ptr`'s referent +/// where they are guaranteed to be present in `T`. This is a dynamic property: +/// if, at a particular byte offset, a valid enum discriminant is set, the +/// subsequent bytes may only have uninitialized bytes as specified by the +/// corresponding enum. +/// +/// Formally, given `len = size_of_val_raw(ptr)`, at every byte offset, `b`, in +/// the range `[0, len)`: +/// - If, in any instance `t: T` of length `len`, the byte at offset `b` in `t` +/// is initialized, then the byte at offset `b` within `*ptr` must be +/// initialized. +/// - Let `c` be the contents of the byte range `[0, b)` in `*ptr`. Let `S` be +/// the subset of valid instances of `T` of length `len` which contain `c` in +/// the offset range `[0, b)`. If, in any instance of `t: T` in `S`, the byte +/// at offset `b` in `t` is initialized, then the byte at offset `b` in `*ptr` +/// must be initialized. +/// +/// Pragmatically, this means that if `*ptr` is guaranteed to contain an enum +/// type at a particular offset, and the enum discriminant stored in `*ptr` +/// corresponds to a valid variant of that enum type, then it is guaranteed +/// that the appropriate bytes of `*ptr` are initialized as defined by that +/// variant's bit validity (although note that the variant may contain another +/// enum type, in which case the same rules apply depending on the state of +/// its discriminant, and so on recursively). +pub enum AsInitialized {} +// SAFETY: `AsInitialized`'s validity is well-defined for all `T: ?Sized`, and +// is not a function of any property of `T` other than its bit validity. +unsafe impl Validity for AsInitialized { + const KIND: ValidityKind = ValidityKind::AsInitialized; +} + +/// The byte ranges in the referent are fully initialized. In other words, if +/// the referent is `N` bytes long, then it contains a bit-valid `[u8; N]`. +pub enum Initialized {} +// SAFETY: `Initialized`'s validity is well-defined for all `T: ?Sized`, and is +// not a function of any property of `T` other than its bit validity (in fact, +// it's not even a property of `T`'s bit validity, but this is more than we are +// required to uphold). +unsafe impl Validity for Initialized { + const KIND: ValidityKind = ValidityKind::Initialized; +} + +/// The referent of a `Ptr` is valid for `T`, upholding bit validity and any +/// library safety invariants. +pub enum Valid {} +// SAFETY: `Valid`'s validity is well-defined for all `T: ?Sized`, and is not a +// function of any property of `T` other than its bit validity. +unsafe impl Validity for Valid { + const KIND: ValidityKind = ValidityKind::Valid; +} + +/// # Safety +/// +/// `DT: CastableFrom` is sound if `SV = DV = Uninit` or `SV = DV = +/// Initialized`. +pub unsafe trait CastableFrom {} + +// SAFETY: `SV = DV = Uninit`. +unsafe impl CastableFrom for DT {} +// SAFETY: `SV = DV = Initialized`. +unsafe impl CastableFrom for DT {} + +/// [`Ptr`](crate::Ptr) referents that permit unsynchronized read operations. +/// +/// `T: Read` implies that a pointer to `T` with aliasing `A` permits +/// unsynchronized read operations. This can be because `A` is [`Exclusive`] or +/// because `T` does not permit interior mutation. +/// +/// # Safety +/// +/// `T: Read` if either of the following conditions holds: +/// - `A` is [`Exclusive`] +/// - `T` implements [`Immutable`](crate::Immutable) +/// +/// As a consequence, if `T: Read`, then any `Ptr` is +/// permitted to perform unsynchronized reads from its referent. +pub trait Read {} + +impl Read for T {} +impl Read for T {} + +/// Unsynchronized reads are permitted because only one live [`Ptr`](crate::Ptr) +/// or reference may exist to the referent bytes at a time. +#[derive(Copy, Clone, Debug)] +#[doc(hidden)] +pub enum BecauseExclusive {} + +/// Unsynchronized reads are permitted because no live [`Ptr`](crate::Ptr)s or +/// references permit interior mutation. +#[derive(Copy, Clone, Debug)] +#[doc(hidden)] +pub enum BecauseImmutable {} + +use sealed::Sealed; +mod sealed { + use super::*; + + pub trait Sealed {} + + impl Sealed for Shared {} + impl Sealed for Exclusive {} + + impl Sealed for Unaligned {} + impl Sealed for Aligned {} + + impl Sealed for Uninit {} + impl Sealed for AsInitialized {} + impl Sealed for Initialized {} + impl Sealed for Valid {} + + impl Sealed for (A, AA, V) {} + + impl Sealed for BecauseImmutable {} + impl Sealed for BecauseExclusive {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a2df03f78b8952b6de5643ff9e8ff034412e0be --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/mod.rs @@ -0,0 +1,408 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Abstractions over raw pointers. + +mod inner; +#[doc(hidden)] +pub mod invariant; +mod ptr; +mod transmute; + +#[doc(hidden)] +pub use {inner::PtrInner, transmute::*}; +#[doc(hidden)] +pub use { + invariant::{BecauseExclusive, BecauseImmutable, Read}, + ptr::*, +}; + +use crate::wrappers::ReadOnly; + +/// A shorthand for a maybe-valid, maybe-aligned reference. Used as the argument +/// to [`TryFromBytes::is_bit_valid`]. +/// +/// [`TryFromBytes::is_bit_valid`]: crate::TryFromBytes::is_bit_valid +pub type Maybe<'a, T, Alignment = invariant::Unaligned> = + Ptr<'a, ReadOnly, (invariant::Shared, Alignment, invariant::Initialized)>; + +/// Checks if the referent is zeroed. +pub(crate) fn is_zeroed(ptr: Ptr<'_, T, I>) -> bool +where + T: crate::Immutable + crate::KnownLayout, + I: invariant::Invariants, + I::Aliasing: invariant::Reference, +{ + ptr.as_bytes().as_ref().iter().all(|&byte| byte == 0) +} + +#[doc(hidden)] +pub mod cast { + use core::{marker::PhantomData, mem}; + + use crate::{ + layout::{SizeInfo, TrailingSliceLayout}, + HasField, KnownLayout, PtrInner, + }; + + /// A pointer cast or projection. + /// + /// # Safety + /// + /// The implementation of `project` must satisfy its safety post-condition. + pub unsafe trait Project { + /// Projects a pointer from `Src` to `Dst`. + /// + /// Users should generally not call `project` directly, and instead + /// should use high-level APIs like [`PtrInner::project`] or + /// [`Ptr::project`]. + /// + /// [`Ptr::project`]: crate::pointer::Ptr::project + /// + /// # Safety + /// + /// The returned pointer refers to a non-strict subset of the bytes of + /// `src`'s referent, and has the same provenance as `src`. + fn project(src: PtrInner<'_, Src>) -> *mut Dst; + } + + /// A [`Project`] which preserves the address of the referent – a pointer + /// cast. + /// + /// # Safety + /// + /// A `Cast` projection must preserve the address of the referent. It may + /// shrink the set of referent bytes, and it may change the referent's type. + pub unsafe trait Cast: Project {} + + /// A [`Cast`] which does not shrink the set of referent bytes. + /// + /// # Safety + /// + /// A `CastExact` projection must preserve the set of referent bytes. + pub unsafe trait CastExact: Cast {} + + /// A no-op pointer cast. + #[derive(Default, Copy, Clone)] + #[allow(missing_debug_implementations)] + pub struct IdCast; + + // SAFETY: `project` returns its argument unchanged, and so it is a + // provenance-preserving projection which preserves the set of referent + // bytes. + unsafe impl Project for IdCast { + #[inline(always)] + fn project(src: PtrInner<'_, T>) -> *mut T { + src.as_ptr() + } + } + + // SAFETY: The `Project::project` impl preserves referent address. + unsafe impl Cast for IdCast {} + + // SAFETY: The `Project::project` impl preserves referent size. + unsafe impl CastExact for IdCast {} + + /// A pointer cast which preserves or shrinks the set of referent bytes of + /// a statically-sized referent. + /// + /// # Safety + /// + /// The implementation of [`Project`] uses a compile-time assertion to + /// guarantee that `Dst` is no larger than `Src`. Thus, `CastSized` has a + /// sound implementation of [`Project`] for all `Src` and `Dst` – the caller + /// may pass any `Src` and `Dst` without being responsible for soundness. + #[allow(missing_debug_implementations, missing_copy_implementations)] + pub enum CastSized {} + + // SAFETY: By the `static_assert!`, `Dst` is no larger than `Src`, + // and so all casts preserve or shrink the set of referent bytes. All + // operations preserve provenance. + unsafe impl Project for CastSized { + #[inline(always)] + fn project(src: PtrInner<'_, Src>) -> *mut Dst { + static_assert!(Src, Dst => mem::size_of::() >= mem::size_of::()); + src.as_ptr().cast::() + } + } + + // SAFETY: The `Project::project` impl preserves referent address. + unsafe impl Cast for CastSized {} + + /// A pointer cast which preserves the set of referent bytes of a + /// statically-sized referent. + /// + /// # Safety + /// + /// The implementation of [`Project`] uses a compile-time assertion to + /// guarantee that `Dst` has the same size as `Src`. Thus, `CastSizedExact` + /// has a sound implementation of [`Project`] for all `Src` and `Dst` – the + /// caller may pass any `Src` and `Dst` without being responsible for + /// soundness. + #[allow(missing_debug_implementations, missing_copy_implementations)] + pub enum CastSizedExact {} + + // SAFETY: By the `static_assert!`, `Dst` has the same size as `Src`, + // and so all casts preserve the set of referent bytes. All operations + // preserve provenance. + unsafe impl Project for CastSizedExact { + #[inline(always)] + fn project(src: PtrInner<'_, Src>) -> *mut Dst { + static_assert!(Src, Dst => mem::size_of::() == mem::size_of::()); + src.as_ptr().cast::() + } + } + + // SAFETY: The `Project::project_raw` impl preserves referent address. + unsafe impl Cast for CastSizedExact {} + + // SAFETY: By the `static_assert!`, `Project::project_raw` impl preserves + // referent size. + unsafe impl CastExact for CastSizedExact {} + + /// A pointer cast which preserves or shrinks the set of referent bytes of + /// a dynamically-sized referent. + /// + /// # Safety + /// + /// The implementation of [`Project`] uses a compile-time assertion to + /// guarantee that the cast preserves the set of referent bytes. Thus, + /// `CastUnsized` has a sound implementation of [`Project`] for all `Src` + /// and `Dst` – the caller may pass any `Src` and `Dst` without being + /// responsible for soundness. + #[allow(missing_debug_implementations, missing_copy_implementations)] + pub enum CastUnsized {} + + // SAFETY: By the `static_assert!`, `Src` and `Dst` are either: + // - Both sized and equal in size + // - Both slice DSTs with the same trailing slice offset and element size + // and with align_of::() == align_of::(). These ensure that any + // given pointer metadata encodes the same size for both `Src` and `Dst` + // (note that the alignment is required as it affects the amount of + // trailing padding). Thus, `project` preserves the set of referent bytes. + unsafe impl Project for CastUnsized + where + Src: ?Sized + KnownLayout, + Dst: ?Sized + KnownLayout, + { + #[inline(always)] + fn project(src: PtrInner<'_, Src>) -> *mut Dst { + // FIXME: Do we want this to support shrinking casts as well? If so, + // we'll need to remove the `CastExact` impl. + static_assert!(Src: ?Sized + KnownLayout, Dst: ?Sized + KnownLayout => { + let src = ::LAYOUT; + let dst = ::LAYOUT; + match (src.size_info, dst.size_info) { + (SizeInfo::Sized { size: src_size }, SizeInfo::Sized { size: dst_size }) => src_size == dst_size, + ( + SizeInfo::SliceDst(TrailingSliceLayout { offset: src_offset, elem_size: src_elem_size }), + SizeInfo::SliceDst(TrailingSliceLayout { offset: dst_offset, elem_size: dst_elem_size }) + ) => src.align.get() == dst.align.get() && src_offset == dst_offset && src_elem_size == dst_elem_size, + _ => false, + } + }); + + let metadata = Src::pointer_to_metadata(src.as_ptr()); + Dst::raw_from_ptr_len(src.as_non_null().cast::(), metadata).as_ptr() + } + } + + // SAFETY: The `Project::project` impl preserves referent address. + unsafe impl Cast for CastUnsized + where + Src: ?Sized + KnownLayout, + Dst: ?Sized + KnownLayout, + { + } + + // SAFETY: By the `static_assert!` in `Project::project`, `Src` and `Dst` + // are either: + // - Both sized and equal in size + // - Both slice DSTs with the same alignment, trailing slice offset, and + // element size. These ensure that any given pointer metadata encodes the + // same size for both `Src` and `Dst` (note that the alignment is required + // as it affects the amount of trailing padding). + unsafe impl CastExact for CastUnsized + where + Src: ?Sized + KnownLayout, + Dst: ?Sized + KnownLayout, + { + } + + /// A field projection + /// + /// A `Projection` is a [`Project`] which implements projection by + /// delegating to an implementation of [`HasField::project`]. + #[allow(missing_debug_implementations, missing_copy_implementations)] + pub struct Projection { + _never: core::convert::Infallible, + _phantom: PhantomData, + } + + // SAFETY: `HasField::project` has the same safety post-conditions as + // `Project::project`. + unsafe impl Project + for Projection + where + T: HasField, + { + #[inline(always)] + fn project(src: PtrInner<'_, T>) -> *mut T::Type { + T::project(src) + } + } + + // SAFETY: All `repr(C)` union fields exist at offset 0 within the union [1], + // and so any union projection is actually a cast (ie, preserves address). + // + // [1] Per + // https://doc.rust-lang.org/1.92.0/reference/type-layout.html#reprc-unions, + // it's not *technically* guaranteed that non-maximally-sized fields + // are at offset 0, but it's clear that this is the intention of `repr(C)` + // unions. It says: + // + // > A union declared with `#[repr(C)]` will have the same size and + // > alignment as an equivalent C union declaration in the C language for + // > the target platform. + // + // Note that this only mentions size and alignment, not layout. However, + // C unions *do* guarantee that all fields start at offset 0. [2] + // + // This is also reinforced by + // https://doc.rust-lang.org/1.92.0/reference/items/unions.html#r-items.union.fields.offset: + // + // > Fields might have a non-zero offset (except when the C + // > representation is used); in that case the bits starting at the + // > offset of the fields are read + // + // [2] Per https://port70.net/~nsz/c/c11/n1570.html#6.7.2.1p16: + // + // > The size of a union is sufficient to contain the largest of its + // > members. The value of at most one of the members can be stored in a + // > union object at any time. A pointer to a union object, suitably + // > converted, points to each of its members (or if a member is a + // > bit-field, then to the unit in which it resides), and vice versa. + // + // FIXME(https://github.com/rust-lang/unsafe-code-guidelines/issues/595): + // Cite the documentation once it's updated. + unsafe impl Cast + for Projection + where + T: HasField, + { + } + + /// A transitive sequence of projections. + /// + /// Given `TU: Project` and `UV: Project`, `TransitiveProject<_, TU, UV>` is + /// a [`Project`] which projects by applying `TU` followed by `UV`. + /// + /// If `TU: Cast` and `UV: Cast`, then `TransitiveProject<_, TU, UV>: Cast`. + #[allow(missing_debug_implementations)] + pub struct TransitiveProject { + _never: core::convert::Infallible, + _projections: PhantomData<(TU, UV)>, + // On our MSRV (1.56), the debuginfo for a tuple containing both an + // uninhabited type and a DST causes an ICE. We split `U` from `TU` and + // `UV` to avoid this situation. + _u: PhantomData, + } + + // SAFETY: Since `TU::project` and `UV::project` are each + // provenance-preserving operations which preserve or shrink the set of + // referent bytes, so is their composition. + unsafe impl Project for TransitiveProject + where + T: ?Sized, + U: ?Sized, + V: ?Sized, + TU: Project, + UV: Project, + { + #[inline(always)] + fn project(t: PtrInner<'_, T>) -> *mut V { + t.project::<_, TU>().project::<_, UV>().as_ptr() + } + } + + // SAFETY: Since the `Project::project` impl delegates to `TU::project` and + // `UV::project`, and since `TU` and `UV` are `Cast`, the `Project::project` + // impl preserves the address of the referent. + unsafe impl Cast for TransitiveProject + where + T: ?Sized, + U: ?Sized, + V: ?Sized, + TU: Cast, + UV: Cast, + { + } + + // SAFETY: Since the `Project::project` impl delegates to `TU::project` and + // `UV::project`, and since `TU` and `UV` are `CastExact`, the `Project::project` + // impl preserves the set of referent bytes. + unsafe impl CastExact for TransitiveProject + where + T: ?Sized, + U: ?Sized, + V: ?Sized, + TU: CastExact, + UV: CastExact, + { + } + + /// A cast from `T` to `[u8]`. + #[allow(missing_copy_implementations, missing_debug_implementations)] + pub struct AsBytesCast; + + // SAFETY: `project` constructs a pointer with the same address as `src` + // and with a referent of the same size as `*src`. It does this using + // provenance-preserving operations. + // + // FIXME(https://github.com/rust-lang/unsafe-code-guidelines/issues/594): + // Technically, this proof assumes that `*src` is contiguous (the same is + // true of other proofs in this codebase). Is this guaranteed anywhere? + unsafe impl Project for AsBytesCast { + #[inline(always)] + fn project(src: PtrInner<'_, T>) -> *mut [u8] { + let bytes = match T::size_of_val_raw(src.as_non_null()) { + Some(bytes) => bytes, + // SAFETY: `KnownLayout::size_of_val_raw` promises to always + // return `Some` so long as the resulting size fits in a + // `usize`. By invariant on `PtrInner`, `src` refers to a range + // of bytes whose size fits in an `isize`, which implies that it + // also fits in a `usize`. + None => unsafe { core::hint::unreachable_unchecked() }, + }; + + core::ptr::slice_from_raw_parts_mut(src.as_ptr().cast::(), bytes) + } + } + + // SAFETY: The `Project::project` impl preserves referent address. + unsafe impl Cast for AsBytesCast {} + + // SAFETY: The `Project::project` impl preserves the set of referent bytes. + unsafe impl CastExact for AsBytesCast {} + + /// A cast from any type to `()`. + #[allow(missing_copy_implementations, missing_debug_implementations)] + pub struct CastToUnit; + + // SAFETY: The `project` implementation projects to a subset of its + // argument's referent using provenance-preserving operations. + unsafe impl Project for CastToUnit { + #[inline(always)] + fn project(src: PtrInner<'_, T>) -> *mut () { + src.as_ptr().cast::<()>() + } + } + + // SAFETY: The `project` implementation preserves referent address. + unsafe impl Cast for CastToUnit {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/ptr.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/ptr.rs new file mode 100644 index 0000000000000000000000000000000000000000..83c116747b0f49509be20f751341df006dc03eb9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/ptr.rs @@ -0,0 +1,1528 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{ + fmt::{Debug, Formatter}, + marker::PhantomData, +}; + +use crate::{ + pointer::{ + inner::PtrInner, + invariant::*, + transmute::{MutationCompatible, SizeEq, TransmuteFromPtr}, + }, + AlignmentError, CastError, CastType, KnownLayout, SizeError, TryFromBytes, ValidityError, +}; + +/// Module used to gate access to [`Ptr`]'s fields. +mod def { + #[cfg(doc)] + use super::super::invariant; + use super::*; + + /// A raw pointer with more restrictions. + /// + /// `Ptr` is similar to [`NonNull`], but it is more restrictive in the + /// following ways (note that these requirements only hold of non-zero-sized + /// referents): + /// - It must derive from a valid allocation. + /// - It must reference a byte range which is contained inside the + /// allocation from which it derives. + /// - As a consequence, the byte range it references must have a size + /// which does not overflow `isize`. + /// + /// Depending on how `Ptr` is parameterized, it may have additional + /// invariants: + /// - `ptr` conforms to the aliasing invariant of + /// [`I::Aliasing`](invariant::Aliasing). + /// - `ptr` conforms to the alignment invariant of + /// [`I::Alignment`](invariant::Alignment). + /// - `ptr` conforms to the validity invariant of + /// [`I::Validity`](invariant::Validity). + /// + /// `Ptr<'a, T>` is [covariant] in `'a` and invariant in `T`. + /// + /// [`NonNull`]: core::ptr::NonNull + /// [covariant]: https://doc.rust-lang.org/reference/subtyping.html + pub struct Ptr<'a, T, I> + where + T: ?Sized, + I: Invariants, + { + /// # Invariants + /// + /// 0. `ptr` conforms to the aliasing invariant of + /// [`I::Aliasing`](invariant::Aliasing). + /// 1. `ptr` conforms to the alignment invariant of + /// [`I::Alignment`](invariant::Alignment). + /// 2. `ptr` conforms to the validity invariant of + /// [`I::Validity`](invariant::Validity). + // SAFETY: `PtrInner<'a, T>` is covariant in `'a` and invariant in `T`. + ptr: PtrInner<'a, T>, + _invariants: PhantomData, + } + + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + /// Constructs a new `Ptr` from a [`PtrInner`]. + /// + /// # Safety + /// + /// The caller promises that: + /// + /// 0. `ptr` conforms to the aliasing invariant of + /// [`I::Aliasing`](invariant::Aliasing). + /// 1. `ptr` conforms to the alignment invariant of + /// [`I::Alignment`](invariant::Alignment). + /// 2. `ptr` conforms to the validity invariant of + /// [`I::Validity`](invariant::Validity). + pub(crate) unsafe fn from_inner(ptr: PtrInner<'a, T>) -> Ptr<'a, T, I> { + // SAFETY: The caller has promised to satisfy all safety invariants + // of `Ptr`. + Self { ptr, _invariants: PhantomData } + } + + /// Converts this `Ptr` to a [`PtrInner`]. + /// + /// Note that this method does not consume `self`. The caller should + /// watch out for `unsafe` code which uses the returned value in a way + /// that violates the safety invariants of `self`. + pub(crate) fn as_inner(&self) -> PtrInner<'a, T> { + self.ptr + } + } +} + +#[allow(unreachable_pub)] // This is a false positive on our MSRV toolchain. +pub use def::Ptr; + +/// External trait implementations on [`Ptr`]. +mod _external { + use super::*; + + /// SAFETY: Shared pointers are safely `Copy`. `Ptr`'s other invariants + /// (besides aliasing) are unaffected by the number of references that exist + /// to `Ptr`'s referent. The notable cases are: + /// - Alignment is a property of the referent type (`T`) and the address, + /// both of which are unchanged + /// - Let `S(T, V)` be the set of bit values permitted to appear in the + /// referent of a `Ptr>`. Since this copy + /// does not change `I::Validity` or `T`, `S(T, I::Validity)` is also + /// unchanged. + /// + /// We are required to guarantee that the referents of the original `Ptr` + /// and of the copy (which, of course, are actually the same since they + /// live in the same byte address range) both remain in the set `S(T, + /// I::Validity)`. Since this invariant holds on the original `Ptr`, it + /// cannot be violated by the original `Ptr`, and thus the original `Ptr` + /// cannot be used to violate this invariant on the copy. The inverse + /// holds as well. + impl<'a, T, I> Copy for Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + } + + /// SAFETY: See the safety comment on `Copy`. + impl<'a, T, I> Clone for Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + #[inline] + fn clone(&self) -> Self { + *self + } + } + + impl<'a, T, I> Debug for Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + self.as_inner().as_non_null().fmt(f) + } + } +} + +/// Methods for converting to and from `Ptr` and Rust's safe reference types. +mod _conversions { + use super::*; + use crate::pointer::cast::{CastExact, CastSized, IdCast}; + + /// `&'a T` → `Ptr<'a, T>` + impl<'a, T> Ptr<'a, T, (Shared, Aligned, Valid)> + where + T: 'a + ?Sized, + { + /// Constructs a `Ptr` from a shared reference. + #[doc(hidden)] + #[inline(always)] + pub fn from_ref(ptr: &'a T) -> Self { + let inner = PtrInner::from_ref(ptr); + // SAFETY: + // 0. `ptr`, by invariant on `&'a T`, conforms to the aliasing + // invariant of `Shared`. + // 1. `ptr`, by invariant on `&'a T`, conforms to the alignment + // invariant of `Aligned`. + // 2. `ptr`'s referent, by invariant on `&'a T`, is a bit-valid `T`. + // This satisfies the requirement that a `Ptr` + // point to a bit-valid `T`. Even if `T` permits interior + // mutation, this invariant guarantees that the returned `Ptr` + // can only ever be used to modify the referent to store + // bit-valid `T`s, which ensures that the returned `Ptr` cannot + // be used to violate the soundness of the original `ptr: &'a T` + // or of any other references that may exist to the same + // referent. + unsafe { Self::from_inner(inner) } + } + } + + /// `&'a mut T` → `Ptr<'a, T>` + impl<'a, T> Ptr<'a, T, (Exclusive, Aligned, Valid)> + where + T: 'a + ?Sized, + { + /// Constructs a `Ptr` from an exclusive reference. + #[doc(hidden)] + #[inline(always)] + pub fn from_mut(ptr: &'a mut T) -> Self { + let inner = PtrInner::from_mut(ptr); + // SAFETY: + // 0. `ptr`, by invariant on `&'a mut T`, conforms to the aliasing + // invariant of `Exclusive`. + // 1. `ptr`, by invariant on `&'a mut T`, conforms to the alignment + // invariant of `Aligned`. + // 2. `ptr`'s referent, by invariant on `&'a mut T`, is a bit-valid + // `T`. This satisfies the requirement that a `Ptr` point to a bit-valid `T`. This invariant guarantees + // that the returned `Ptr` can only ever be used to modify the + // referent to store bit-valid `T`s, which ensures that the + // returned `Ptr` cannot be used to violate the soundness of the + // original `ptr: &'a mut T`. + unsafe { Self::from_inner(inner) } + } + } + + /// `Ptr<'a, T>` → `&'a T` + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + I::Aliasing: Reference, + { + /// Converts `self` to a shared reference. + // This consumes `self`, not `&self`, because `self` is, logically, a + // pointer. For `I::Aliasing = invariant::Shared`, `Self: Copy`, and so + // this doesn't prevent the caller from still using the pointer after + // calling `as_ref`. + #[allow(clippy::wrong_self_convention)] + pub(crate) fn as_ref(self) -> &'a T { + let raw = self.as_inner().as_non_null(); + // SAFETY: `self` satisfies the `Aligned` invariant, so we know that + // `raw` is validly-aligned for `T`. + #[cfg(miri)] + unsafe { + crate::util::miri_promise_symbolic_alignment( + raw.as_ptr().cast(), + core::mem::align_of_val_raw(raw.as_ptr()), + ); + } + // SAFETY: This invocation of `NonNull::as_ref` satisfies its + // documented safety preconditions: + // + // 1. The pointer is properly aligned. This is ensured by-contract + // on `Ptr`, because the `I::Alignment` is `Aligned`. + // + // 2. If the pointer's referent is not zero-sized, then the pointer + // must be “dereferenceable” in the sense defined in the module + // documentation; i.e.: + // + // > The memory range of the given size starting at the pointer + // > must all be within the bounds of a single allocated object. + // > [2] + // + // This is ensured by contract on all `PtrInner`s. + // + // 3. The pointer must point to a validly-initialized instance of + // `T`. This is ensured by-contract on `Ptr`, because the + // `I::Validity` is `Valid`. + // + // 4. You must enforce Rust’s aliasing rules. This is ensured by + // contract on `Ptr`, because `I::Aliasing: Reference`. Either it + // is `Shared` or `Exclusive`. If it is `Shared`, other + // references may not mutate the referent outside of + // `UnsafeCell`s. + // + // [1]: https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.as_ref + // [2]: https://doc.rust-lang.org/std/ptr/index.html#safety + unsafe { raw.as_ref() } + } + } + + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + I::Aliasing: Reference, + { + /// Reborrows `self`, producing another `Ptr`. + /// + /// Since `self` is borrowed mutably, this prevents any methods from + /// being called on `self` as long as the returned `Ptr` exists. + #[doc(hidden)] + #[inline] + #[allow(clippy::needless_lifetimes)] // Allows us to name the lifetime in the safety comment below. + pub fn reborrow<'b>(&'b mut self) -> Ptr<'b, T, I> + where + 'a: 'b, + { + // SAFETY: The following all hold by invariant on `self`, and thus + // hold of `ptr = self.as_inner()`: + // 0. SEE BELOW. + // 1. `ptr` conforms to the alignment invariant of + // [`I::Alignment`](invariant::Alignment). + // 2. `ptr` conforms to the validity invariant of + // [`I::Validity`](invariant::Validity). `self` and the returned + // `Ptr` permit the same bit values in their referents since they + // have the same referent type (`T`) and the same validity + // (`I::Validity`). Thus, regardless of what mutation is + // permitted (`Exclusive` aliasing or `Shared`-aliased interior + // mutation), neither can be used to write a value to the + // referent which violates the other's validity invariant. + // + // For aliasing (0 above), since `I::Aliasing: Reference`, + // there are two cases for `I::Aliasing`: + // - For `invariant::Shared`: `'a` outlives `'b`, and so the + // returned `Ptr` does not permit accessing the referent any + // longer than is possible via `self`. For shared aliasing, it is + // sound for multiple `Ptr`s to exist simultaneously which + // reference the same memory, so creating a new one is not + // problematic. + // - For `invariant::Exclusive`: Since `self` is `&'b mut` and we + // return a `Ptr` with lifetime `'b`, `self` is inaccessible to + // the caller for the lifetime `'b` - in other words, `self` is + // inaccessible to the caller as long as the returned `Ptr` + // exists. Since `self` is an exclusive `Ptr`, no other live + // references or `Ptr`s may exist which refer to the same memory + // while `self` is live. Thus, as long as the returned `Ptr` + // exists, no other references or `Ptr`s which refer to the same + // memory may be live. + unsafe { Ptr::from_inner(self.as_inner()) } + } + + /// Reborrows `self` as shared, producing another `Ptr` with `Shared` + /// aliasing. + /// + /// Since `self` is borrowed mutably, this prevents any methods from + /// being called on `self` as long as the returned `Ptr` exists. + #[doc(hidden)] + #[inline] + #[allow(clippy::needless_lifetimes)] // Allows us to name the lifetime in the safety comment below. + pub fn reborrow_shared<'b>(&'b mut self) -> Ptr<'b, T, (Shared, I::Alignment, I::Validity)> + where + 'a: 'b, + { + // SAFETY: The following all hold by invariant on `self`, and thus + // hold of `ptr = self.as_inner()`: + // 0. SEE BELOW. + // 1. `ptr` conforms to the alignment invariant of + // [`I::Alignment`](invariant::Alignment). + // 2. `ptr` conforms to the validity invariant of + // [`I::Validity`](invariant::Validity). `self` and the returned + // `Ptr` permit the same bit values in their referents since they + // have the same referent type (`T`) and the same validity + // (`I::Validity`). Thus, regardless of what mutation is + // permitted (`Exclusive` aliasing or `Shared`-aliased interior + // mutation), neither can be used to write a value to the + // referent which violates the other's validity invariant. + // + // For aliasing (0 above), since `I::Aliasing: Reference`, + // there are two cases for `I::Aliasing`: + // - For `invariant::Shared`: `'a` outlives `'b`, and so the + // returned `Ptr` does not permit accessing the referent any + // longer than is possible via `self`. For shared aliasing, it is + // sound for multiple `Ptr`s to exist simultaneously which + // reference the same memory, so creating a new one is not + // problematic. + // - For `invariant::Exclusive`: Since `self` is `&'b mut` and we + // return a `Ptr` with lifetime `'b`, `self` is inaccessible to + // the caller for the lifetime `'b` - in other words, `self` is + // inaccessible to the caller as long as the returned `Ptr` + // exists. Since `self` is an exclusive `Ptr`, no other live + // references or `Ptr`s may exist which refer to the same memory + // while `self` is live. Thus, as long as the returned `Ptr` + // exists, no other references or `Ptr`s which refer to the same + // memory may be live. + unsafe { Ptr::from_inner(self.as_inner()) } + } + } + + /// `Ptr<'a, T>` → `&'a mut T` + impl<'a, T> Ptr<'a, T, (Exclusive, Aligned, Valid)> + where + T: 'a + ?Sized, + { + /// Converts `self` to a mutable reference. + #[allow(clippy::wrong_self_convention)] + pub(crate) fn as_mut(self) -> &'a mut T { + let mut raw = self.as_inner().as_non_null(); + // SAFETY: `self` satisfies the `Aligned` invariant, so we know that + // `raw` is validly-aligned for `T`. + #[cfg(miri)] + unsafe { + crate::util::miri_promise_symbolic_alignment( + raw.as_ptr().cast(), + core::mem::align_of_val_raw(raw.as_ptr()), + ); + } + // SAFETY: This invocation of `NonNull::as_mut` satisfies its + // documented safety preconditions: + // + // 1. The pointer is properly aligned. This is ensured by-contract + // on `Ptr`, because the `ALIGNMENT_INVARIANT` is `Aligned`. + // + // 2. If the pointer's referent is not zero-sized, then the pointer + // must be “dereferenceable” in the sense defined in the module + // documentation; i.e.: + // + // > The memory range of the given size starting at the pointer + // > must all be within the bounds of a single allocated object. + // > [2] + // + // This is ensured by contract on all `PtrInner`s. + // + // 3. The pointer must point to a validly-initialized instance of + // `T`. This is ensured by-contract on `Ptr`, because the + // validity invariant is `Valid`. + // + // 4. You must enforce Rust’s aliasing rules. This is ensured by + // contract on `Ptr`, because the `ALIASING_INVARIANT` is + // `Exclusive`. + // + // [1]: https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.as_mut + // [2]: https://doc.rust-lang.org/std/ptr/index.html#safety + unsafe { raw.as_mut() } + } + } + + /// `Ptr<'a, T>` → `Ptr<'a, U>` + impl<'a, T: ?Sized, I> Ptr<'a, T, I> + where + I: Invariants, + { + #[must_use] + #[inline(always)] + pub fn transmute(self) -> Ptr<'a, U, (I::Aliasing, Unaligned, V)> + where + V: Validity, + U: TransmuteFromPtr>::CastFrom, R> + + SizeEq + + ?Sized, + { + self.transmute_with::>::CastFrom, R>() + } + + pub(crate) fn transmute_with(self) -> Ptr<'a, U, (I::Aliasing, Unaligned, V)> + where + V: Validity, + U: TransmuteFromPtr + ?Sized, + C: CastExact, + { + // SAFETY: + // - By `C: CastExact`, `C` preserves referent address, and so we + // don't need to consider projections in the following safety + // arguments. + // - If aliasing is `Shared`, then by `U: TransmuteFromPtr`, at + // least one of the following holds: + // - `T: Immutable` and `U: Immutable`, in which case it is + // trivially sound for shared code to operate on a `&T` and `&U` + // at the same time, as neither can perform interior mutation + // - It is directly guaranteed that it is sound for shared code to + // operate on these references simultaneously + // - By `U: TransmuteFromPtr`, it + // is sound to perform this transmute using `C`. + unsafe { self.project_transmute_unchecked::<_, _, C>() } + } + + #[doc(hidden)] + #[inline(always)] + #[must_use] + pub fn recall_validity(self) -> Ptr<'a, T, (I::Aliasing, I::Alignment, V)> + where + V: Validity, + T: TransmuteFromPtr, + { + let ptr = self.transmute_with::(); + // SAFETY: `self` and `ptr` have the same address and referent type. + // Therefore, if `self` satisfies `I::Alignment`, then so does + // `ptr`. + unsafe { ptr.assume_alignment::() } + } + + /// Projects and/or transmutes to a different (unsized) referent type + /// without checking interior mutability. + /// + /// Callers should prefer [`cast`] or [`project`] where possible. + /// + /// [`cast`]: Ptr::cast + /// [`project`]: Ptr::project + /// + /// # Safety + /// + /// The caller promises that: + /// - If `I::Aliasing` is [`Shared`], it must not be possible for safe + /// code, operating on a `&T` and `&U`, with the referents of `self` + /// and `self.project_transmute_unchecked()`, respectively, to cause + /// undefined behavior. + /// - It is sound to project and/or transmute a pointer of type `T` with + /// aliasing `I::Aliasing` and validity `I::Validity` to a pointer of + /// type `U` with aliasing `I::Aliasing` and validity `V`. This is a + /// subtle soundness requirement that is a function of `T`, `U`, + /// `I::Aliasing`, `I::Validity`, and `V`, and may depend upon the + /// presence, absence, or specific location of `UnsafeCell`s in `T` + /// and/or `U`, and on whether interior mutation is ever permitted via + /// those `UnsafeCell`s. See [`Validity`] for more details. + #[doc(hidden)] + #[inline(always)] + #[must_use] + pub unsafe fn project_transmute_unchecked( + self, + ) -> Ptr<'a, U, (I::Aliasing, Unaligned, V)> + where + V: Validity, + P: crate::pointer::cast::Project, + { + let ptr = self.as_inner().project::<_, P>(); + + // SAFETY: + // + // The following safety arguments rely on the fact that `P: Project` + // guarantees that `P` is a referent-preserving or -shrinking + // projection. Thus, `ptr` addresses a subset of the bytes of + // `*self`, and so certain properties that hold of `*self` also hold + // of `*ptr`. + // + // 0. `ptr` conforms to the aliasing invariant of `I::Aliasing`: + // - `Exclusive`: `self` is the only `Ptr` or reference which is + // permitted to read or modify the referent for the lifetime + // `'a`. Since we consume `self` by value, the returned pointer + // remains the only `Ptr` or reference which is permitted to + // read or modify the referent for the lifetime `'a`. + // - `Shared`: Since `self` has aliasing `Shared`, we know that + // no other code may mutate the referent during the lifetime + // `'a`, except via `UnsafeCell`s, and except as permitted by + // `T`'s library safety invariants. The caller promises that + // any safe operations which can be permitted on a `&T` and a + // `&U` simultaneously must be sound. Thus, no operations on a + // `&U` could violate `&T`'s library safety invariants, and + // vice-versa. Since any mutation via shared references outside + // of `UnsafeCell`s is unsound, this must be impossible using + // `&T` and `&U`. + // - `Inaccessible`: There are no restrictions we need to uphold. + // 1. `ptr` trivially satisfies the alignment invariant `Unaligned`. + // 2. The caller promises that the returned pointer satisfies the + // validity invariant `V` with respect to its referent type, `U`. + unsafe { Ptr::from_inner(ptr) } + } + } + + /// `Ptr<'a, T, (_, _, _)>` → `Ptr<'a, Unalign, (_, Aligned, _)>` + impl<'a, T, I> Ptr<'a, T, I> + where + I: Invariants, + { + /// Converts a `Ptr` an unaligned `T` into a `Ptr` to an aligned + /// `Unalign`. + pub(crate) fn into_unalign( + self, + ) -> Ptr<'a, crate::Unalign, (I::Aliasing, Aligned, I::Validity)> { + // FIXME(#1359): This should be a `transmute_with` call. + // Unfortunately, to avoid blanket impl conflicts, we only implement + // `TransmuteFrom` for `Unalign` (and vice versa) specifically + // for `Valid` validity, not for all validity types. + + // SAFETY: + // - By `CastSized: Cast`, `CastSized` preserves referent address, + // and so we don't need to consider projections in the following + // safety arguments. + // - Since `Unalign` has the same layout as `T`, the returned + // pointer refers to `UnsafeCell`s at the same locations as + // `self`. + // - `Unalign` promises to have the same bit validity as `T`. By + // invariant on `Validity`, the set of bit patterns allowed in the + // referent of a `Ptr` is only a function of the + // validity of `X` and of `V`. Thus, the set of bit patterns + // allowed in the referent of a `Ptr` is + // the same as the set of bit patterns allowed in the referent of + // a `Ptr, (_, _, I::Validity)>`. As a result, `self` + // and the returned `Ptr` permit the same set of bit patterns in + // their referents, and so neither can be used to violate the + // validity of the other. + let ptr = unsafe { self.project_transmute_unchecked::<_, _, CastSized>() }; + ptr.bikeshed_recall_aligned() + } + } + + impl<'a, T, I> Ptr<'a, T, I> + where + T: ?Sized, + I: Invariants, + I::Aliasing: Reference, + { + /// Reads the referent. + #[must_use] + #[inline(always)] + pub fn read(self) -> T + where + T: Copy, + T: Read, + { + ::read(self) + } + + /// Views the value as an aligned reference. + /// + /// This is only available if `T` is [`Unaligned`]. + #[must_use] + #[inline] + pub fn unaligned_as_ref(self) -> &'a T + where + T: crate::Unaligned, + { + self.bikeshed_recall_aligned().as_ref() + } + } +} + +/// State transitions between invariants. +mod _transitions { + use super::*; + use crate::{ + pointer::{cast::IdCast, transmute::TryTransmuteFromPtr}, + ReadOnly, + }; + + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + /// Assumes that `self` satisfies the invariants `H`. + /// + /// # Safety + /// + /// The caller promises that `self` satisfies the invariants `H`. + unsafe fn assume_invariants(self) -> Ptr<'a, T, H> { + // SAFETY: The caller has promised to satisfy all parameterized + // invariants of `Ptr`. `Ptr`'s other invariants are satisfied + // by-contract by the source `Ptr`. + unsafe { Ptr::from_inner(self.as_inner()) } + } + + /// Helps the type system unify two distinct invariant types which are + /// actually the same. + pub(crate) fn unify_invariants< + H: Invariants, + >( + self, + ) -> Ptr<'a, T, H> { + // SAFETY: The associated type bounds on `H` ensure that the + // invariants are unchanged. + unsafe { self.assume_invariants::() } + } + + /// Assumes that `self`'s referent is validly-aligned for `T` if + /// required by `A`. + /// + /// # Safety + /// + /// The caller promises that `self`'s referent conforms to the alignment + /// invariant of `T` if required by `A`. + #[inline] + pub(crate) unsafe fn assume_alignment( + self, + ) -> Ptr<'a, T, (I::Aliasing, A, I::Validity)> { + // SAFETY: The caller promises that `self`'s referent is + // well-aligned for `T` if required by `A` . + unsafe { self.assume_invariants() } + } + + /// Checks the `self`'s alignment at runtime, returning an aligned `Ptr` + /// on success. + pub(crate) fn try_into_aligned( + self, + ) -> Result, AlignmentError> + where + T: Sized, + { + if let Err(err) = + crate::util::validate_aligned_to::<_, T>(self.as_inner().as_non_null()) + { + return Err(err.with_src(self)); + } + + // SAFETY: We just checked the alignment. + Ok(unsafe { self.assume_alignment::() }) + } + + /// Recalls that `self`'s referent is validly-aligned for `T`. + #[inline] + // FIXME(#859): Reconsider the name of this method before making it + // public. + pub(crate) fn bikeshed_recall_aligned( + self, + ) -> Ptr<'a, T, (I::Aliasing, Aligned, I::Validity)> + where + T: crate::Unaligned, + { + // SAFETY: The bound `T: Unaligned` ensures that `T` has no + // non-trivial alignment requirement. + unsafe { self.assume_alignment::() } + } + + /// Assumes that `self`'s referent conforms to the validity requirement + /// of `V`. + /// + /// # Safety + /// + /// The caller promises that `self`'s referent conforms to the validity + /// requirement of `V`. + #[doc(hidden)] + #[must_use] + #[inline] + pub unsafe fn assume_validity( + self, + ) -> Ptr<'a, T, (I::Aliasing, I::Alignment, V)> { + // SAFETY: The caller promises that `self`'s referent conforms to + // the validity requirement of `V`. + unsafe { self.assume_invariants() } + } + + /// A shorthand for `self.assume_validity()`. + /// + /// # Safety + /// + /// The caller promises to uphold the safety preconditions of + /// `self.assume_validity()`. + #[doc(hidden)] + #[must_use] + #[inline] + pub unsafe fn assume_initialized( + self, + ) -> Ptr<'a, T, (I::Aliasing, I::Alignment, Initialized)> { + // SAFETY: The caller has promised to uphold the safety + // preconditions. + unsafe { self.assume_validity::() } + } + + /// A shorthand for `self.assume_validity()`. + /// + /// # Safety + /// + /// The caller promises to uphold the safety preconditions of + /// `self.assume_validity()`. + #[doc(hidden)] + #[must_use] + #[inline] + pub unsafe fn assume_valid(self) -> Ptr<'a, T, (I::Aliasing, I::Alignment, Valid)> { + // SAFETY: The caller has promised to uphold the safety + // preconditions. + unsafe { self.assume_validity::() } + } + + /// Checks that `self`'s referent is validly initialized for `T`, + /// returning a `Ptr` with `Valid` on success. + /// + /// # Panics + /// + /// This method will panic if + /// [`T::is_bit_valid`][TryFromBytes::is_bit_valid] panics. + /// + /// # Safety + /// + /// On error, unsafe code may rely on this method's returned + /// `ValidityError` containing `self`. + #[inline] + pub(crate) fn try_into_valid( + mut self, + ) -> Result, ValidityError> + where + T: TryFromBytes + + Read + + TryTransmuteFromPtr, + ReadOnly: Read, + I::Aliasing: Reference, + I: Invariants, + { + // This call may panic. If that happens, it doesn't cause any + // soundness issues, as we have not generated any invalid state + // which we need to fix before returning. + if T::is_bit_valid(self.reborrow().transmute::<_, _, _>().reborrow_shared()) { + // SAFETY: If `T::is_bit_valid`, code may assume that `self` + // contains a bit-valid instance of `T`. By `T: + // TryTransmuteFromPtr`, so + // long as `self`'s referent conforms to the `Valid` validity + // for `T` (which we just confirmed), then this transmute is + // sound. + Ok(unsafe { self.assume_valid() }) + } else { + Err(ValidityError::new(self)) + } + } + + /// Forgets that `self`'s referent is validly-aligned for `T`. + #[doc(hidden)] + #[must_use] + #[inline] + pub fn forget_aligned(self) -> Ptr<'a, T, (I::Aliasing, Unaligned, I::Validity)> { + // SAFETY: `Unaligned` is less restrictive than `Aligned`. + unsafe { self.assume_invariants() } + } + } +} + +/// Casts of the referent type. +pub(crate) use _casts::TryWithError; +mod _casts { + use core::cell::UnsafeCell; + + use super::*; + use crate::{ + pointer::cast::{AsBytesCast, Cast}, + HasTag, ProjectField, + }; + + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized, + I: Invariants, + { + /// Casts to a different referent type without checking interior + /// mutability. + /// + /// Callers should prefer [`cast`][Ptr::cast] where possible. + /// + /// # Safety + /// + /// If `I::Aliasing` is [`Shared`], it must not be possible for safe + /// code, operating on a `&T` and `&U` with the same referent + /// simultaneously, to cause undefined behavior. + #[doc(hidden)] + #[inline(always)] + #[must_use] + pub unsafe fn cast_unchecked>( + self, + ) -> Ptr<'a, U, (I::Aliasing, Unaligned, I::Validity)> + where + U: 'a + CastableFrom + ?Sized, + { + // SAFETY: + // - By `C: Cast`, `C` preserves the address of the referent. + // - If `I::Aliasing` is [`Shared`], the caller promises that it + // is not possible for safe code, operating on a `&T` and `&U` + // with the same referent simultaneously, to cause undefined + // behavior. + // - By `U: CastableFrom`, + // `I::Validity` is either `Uninit` or `Initialized`. In both + // cases, the bit validity `I::Validity` has the same semantics + // regardless of referent type. In other words, the set of allowed + // referent values for `Ptr` and `Ptr` are identical. As a consequence, neither + // `self` nor the returned `Ptr` can be used to write values which + // are invalid for the other. + unsafe { self.project_transmute_unchecked::<_, _, C>() } + } + + /// Casts to a different referent type. + #[doc(hidden)] + #[inline(always)] + #[must_use] + pub fn cast(self) -> Ptr<'a, U, (I::Aliasing, Unaligned, I::Validity)> + where + T: MutationCompatible, + U: 'a + ?Sized + CastableFrom, + C: Cast, + { + // SAFETY: Because `T: MutationCompatible`, one + // of the following holds: + // - `T: Read` and `U: Read`, in which + // case one of the following holds: + // - `I::Aliasing` is `Exclusive` + // - `T` and `U` are both `Immutable` + // - It is sound for safe code to operate on `&T` and `&U` with the + // same referent simultaneously. + unsafe { self.cast_unchecked::<_, C>() } + } + + #[inline(always)] + pub fn project( + mut self, + ) -> Result, T::Error> + where + T: ProjectField, + I::Aliasing: Reference, + { + use crate::pointer::cast::Projection; + match T::is_projectable(self.reborrow().project_tag()) { + Ok(()) => { + let inner = self.as_inner(); + let projected = inner.project::<_, Projection>(); + // SAFETY: By `T: ProjectField`, + // for `self: Ptr<'_, T, I>` such that `T::is_projectable` + // (which we've verified in this match arm), + // `T::project(self.as_inner())` conforms to + // `T::Invariants`. The `projected` pointer satisfies these + // invariants because it is produced by way of an + // abstraction that is equivalent to + // `T::project(ptr.as_inner())`: by invariant on + // `PtrInner::project`, `projected` is guaranteed to address + // the subset of the bytes of `inner`'s referent addressed + // by `Projection::project(inner)`, and by invariant on + // `Projection`, `Projection::project` is implemented by + // delegating to an implementation of `HasField::project`. + Ok(unsafe { Ptr::from_inner(projected) }) + } + Err(err) => Err(err), + } + } + + #[must_use] + #[inline(always)] + pub(crate) fn project_tag(self) -> Ptr<'a, T::Tag, I> + where + T: HasTag, + { + // SAFETY: By invariant on `Self::ProjectToTag`, this is a sound + // projection. + let tag = unsafe { self.project_transmute_unchecked::<_, _, T::ProjectToTag>() }; + // SAFETY: By invariant on `Self::ProjectToTag`, the projected + // pointer has the same alignment as `ptr`. + let tag = unsafe { tag.assume_alignment() }; + tag.unify_invariants() + } + + /// Attempts to transform the pointer, restoring the original on + /// failure. + /// + /// # Safety + /// + /// If `I::Aliasing != Shared`, then if `f` returns `Err(err)`, no copy + /// of `f`'s argument must exist outside of `err`. + #[inline(always)] + pub(crate) unsafe fn try_with_unchecked( + self, + f: F, + ) -> Result, E::Mapped> + where + U: 'a + ?Sized, + J: Invariants, + E: TryWithError, + F: FnOnce(Ptr<'a, T, I>) -> Result, E>, + { + let old_inner = self.as_inner(); + #[rustfmt::skip] + let res = f(self).map_err(#[inline(always)] move |err: E| { + err.map(#[inline(always)] |src| { + drop(src); + + // SAFETY: + // 0. Aliasing is either `Shared` or `Exclusive`: + // - If aliasing is `Shared`, then it cannot violate + // aliasing make another copy of this pointer (in fact, + // using `I::Aliasing = Shared`, we could have just + // cloned `self`). + // - If aliasing is `Exclusive`, then `f` is not allowed + // to make another copy of `self`. In `map_err`, we are + // consuming the only value in the returned `Result`. + // By invariant on `E: TryWithError`, that `err: + // E` only contains a single `Self` and no other + // non-ZST fields which could be `Ptr`s or references + // to `self`'s referent. By the same invariant, `map` + // consumes this single `Self` and passes it to this + // closure. Since `self` was, by invariant on + // `Exclusive`, the only `Ptr` or reference live for + // `'a` with this referent, and since we `drop(src)` + // above, there are no copies left, and so we are + // creating the only copy. + // 1. `self` conforms to `I::Aliasing` by invariant on + // `Ptr`, and `old_inner` has the same address, so it + // does too. + // 2. `f` could not have violated `self`'s validity without + // itself being unsound. Assuming that `f` is sound, the + // referent of `self` is still valid for `T`. + unsafe { Ptr::from_inner(old_inner) } + }) + }); + res + } + + /// Attempts to transform the pointer, restoring the original on + /// failure. + pub(crate) fn try_with(self, f: F) -> Result, E::Mapped> + where + U: 'a + ?Sized, + J: Invariants, + E: TryWithError, + F: FnOnce(Ptr<'a, T, I>) -> Result, E>, + I: Invariants, + { + // SAFETY: `I::Aliasing = Shared`, so the safety condition does not + // apply. + unsafe { self.try_with_unchecked(f) } + } + } + + /// # Safety + /// + /// `Self` only contains a single `Self::Inner`, and `Self::Mapped` only + /// contains a single `MappedInner`. Other than that, `Self` and + /// `Self::Mapped` contain no non-ZST fields. + /// + /// `map` must pass ownership of `self`'s sole `Self::Inner` to `f`. + pub(crate) unsafe trait TryWithError { + type Inner; + type Mapped; + fn map MappedInner>(self, f: F) -> Self::Mapped; + } + + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + KnownLayout + ?Sized, + I: Invariants, + { + /// Casts this pointer-to-initialized into a pointer-to-bytes. + #[allow(clippy::wrong_self_convention)] + #[must_use] + #[inline] + pub fn as_bytes(self) -> Ptr<'a, [u8], (I::Aliasing, Aligned, Valid)> + where + [u8]: TransmuteFromPtr, + { + self.transmute_with::<[u8], Valid, AsBytesCast, _>().bikeshed_recall_aligned() + } + } + + impl<'a, T, I, const N: usize> Ptr<'a, [T; N], I> + where + T: 'a, + I: Invariants, + { + /// Casts this pointer-to-array into a slice. + #[allow(clippy::wrong_self_convention)] + pub(crate) fn as_slice(self) -> Ptr<'a, [T], I> { + let slice = self.as_inner().as_slice(); + // SAFETY: Note that, by post-condition on `PtrInner::as_slice`, + // `slice` refers to the same byte range as `self.as_inner()`. + // + // 0. Thus, `slice` conforms to the aliasing invariant of + // `I::Aliasing` because `self` does. + // 1. By the above lemma, `slice` conforms to the alignment + // invariant of `I::Alignment` because `self` does. + // 2. Since `[T; N]` and `[T]` have the same bit validity [1][2], + // and since `self` and the returned `Ptr` have the same validity + // invariant, neither `self` nor the returned `Ptr` can be used + // to write a value to the referent which violates the other's + // validity invariant. + // + // [1] Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#array-layout: + // + // An array of `[T; N]` has a size of `size_of::() * N` and the + // same alignment of `T`. Arrays are laid out so that the + // zero-based `nth` element of the array is offset from the start + // of the array by `n * size_of::()` bytes. + // + // ... + // + // Slices have the same layout as the section of the array they + // slice. + // + // [2] Per https://doc.rust-lang.org/1.81.0/reference/types/array.html#array-types: + // + // All elements of arrays are always initialized + unsafe { Ptr::from_inner(slice) } + } + } + + /// For caller convenience, these methods are generic over alignment + /// invariant. In practice, the referent is always well-aligned, because the + /// alignment of `[u8]` is 1. + impl<'a, I> Ptr<'a, [u8], I> + where + I: Invariants, + { + /// Attempts to cast `self` to a `U` using the given cast type. + /// + /// If `U` is a slice DST and pointer metadata (`meta`) is provided, + /// then the cast will only succeed if it would produce an object with + /// the given metadata. + /// + /// Returns `None` if the resulting `U` would be invalidly-aligned, if + /// no `U` can fit in `self`, or if the provided pointer metadata + /// describes an invalid instance of `U`. On success, returns a pointer + /// to the largest-possible `U` which fits in `self`. + /// + /// # Safety + /// + /// The caller may assume that this implementation is correct, and may + /// rely on that assumption for the soundness of their code. In + /// particular, the caller may assume that, if `try_cast_into` returns + /// `Some((ptr, remainder))`, then `ptr` and `remainder` refer to + /// non-overlapping byte ranges within `self`, and that `ptr` and + /// `remainder` entirely cover `self`. Finally: + /// - If this is a prefix cast, `ptr` has the same address as `self`. + /// - If this is a suffix cast, `remainder` has the same address as + /// `self`. + #[inline(always)] + pub(crate) fn try_cast_into( + self, + cast_type: CastType, + meta: Option, + ) -> Result< + (Ptr<'a, U, (I::Aliasing, Aligned, Initialized)>, Ptr<'a, [u8], I>), + CastError, + > + where + I::Aliasing: Reference, + U: 'a + ?Sized + KnownLayout + Read, + { + let (inner, remainder) = + self.as_inner().try_cast_into(cast_type, meta).map_err(|err| { + err.map_src(|inner| + // SAFETY: `PtrInner::try_cast_into` promises to return its + // original argument on error, which was originally produced + // by `self.as_inner()`, which is guaranteed to satisfy + // `Ptr`'s invariants. + unsafe { Ptr::from_inner(inner) }) + })?; + + // SAFETY: + // 0. Since `U: Read`, either: + // - `I::Aliasing` is `Exclusive`, in which case both `src` and + // `ptr` conform to `Exclusive` + // - `I::Aliasing` is `Shared` and `U` is `Immutable` (we already + // know that `[u8]: Immutable`). In this case, neither `U` nor + // `[u8]` permit mutation, and so `Shared` aliasing is + // satisfied. + // 1. `ptr` conforms to the alignment invariant of `Aligned` because + // it is derived from `try_cast_into`, which promises that the + // object described by `target` is validly aligned for `U`. + // 2. By trait bound, `self` - and thus `target` - is a bit-valid + // `[u8]`. `Ptr<[u8], (_, _, Valid)>` and `Ptr<_, (_, _, + // Initialized)>` have the same bit validity, and so neither + // `self` nor `res` can be used to write a value to the referent + // which violates the other's validity invariant. + let res = unsafe { Ptr::from_inner(inner) }; + + // SAFETY: + // 0. `self` and `remainder` both have the type `[u8]`. Thus, they + // have `UnsafeCell`s at the same locations. Type casting does + // not affect aliasing. + // 1. `[u8]` has no alignment requirement. + // 2. `self` has validity `Valid` and has type `[u8]`. Since + // `remainder` references a subset of `self`'s referent, it is + // also a bit-valid `[u8]`. Thus, neither `self` nor `remainder` + // can be used to write a value to the referent which violates + // the other's validity invariant. + let remainder = unsafe { Ptr::from_inner(remainder) }; + + Ok((res, remainder)) + } + + /// Attempts to cast `self` into a `U`, failing if all of the bytes of + /// `self` cannot be treated as a `U`. + /// + /// In particular, this method fails if `self` is not validly-aligned + /// for `U` or if `self`'s size is not a valid size for `U`. + /// + /// # Safety + /// + /// On success, the caller may assume that the returned pointer + /// references the same byte range as `self`. + #[allow(unused)] + #[inline(always)] + pub(crate) fn try_cast_into_no_leftover( + self, + meta: Option, + ) -> Result, CastError> + where + I::Aliasing: Reference, + U: 'a + ?Sized + KnownLayout + Read, + [u8]: Read, + { + // SAFETY: The provided closure returns the only copy of `slf`. + unsafe { + self.try_with_unchecked(|slf| match slf.try_cast_into(CastType::Prefix, meta) { + Ok((slf, remainder)) => { + if remainder.len() == 0 { + Ok(slf) + } else { + Err(CastError::Size(SizeError::<_, U>::new(()))) + } + } + Err(err) => Err(err.map_src(|_slf| ())), + }) + } + } + } + + impl<'a, T, I> Ptr<'a, UnsafeCell, I> + where + T: 'a + ?Sized, + I: Invariants, + { + /// Converts this `Ptr` into a pointer to the underlying data. + /// + /// This call borrows the `UnsafeCell` mutably (at compile-time) which + /// guarantees that we possess the only reference. + /// + /// This is like [`UnsafeCell::get_mut`], but for `Ptr`. + /// + /// [`UnsafeCell::get_mut`]: core::cell::UnsafeCell::get_mut + #[must_use] + #[inline(always)] + pub fn get_mut(self) -> Ptr<'a, T, I> { + // SAFETY: As described below, `UnsafeCell` has the same size + // as `T: ?Sized` (same static size or same DST layout). Thus, + // `*const UnsafeCell as *const T` is a size-preserving cast. + define_cast!(unsafe { Cast = UnsafeCell => T }); + + // SAFETY: + // - Aliasing is `Exclusive`, and so we are not required to promise + // anything about the locations of `UnsafeCell`s. + // - `UnsafeCell` has the same bit validity as `T` [1]. + // Technically the term "representation" doesn't guarantee this, + // but the subsequent sentence in the documentation makes it clear + // that this is the intention. + // + // By invariant on `Validity`, since `T` and `UnsafeCell` have + // the same bit validity, then the set of values which may appear + // in the referent of a `Ptr` is the same as the set + // which may appear in the referent of a `Ptr, (_, + // _, V)>`. Thus, neither `self` nor `ptr` may be used to write a + // value to the referent which would violate the other's validity + // invariant. + // + // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: + // + // `UnsafeCell` has the same in-memory representation as its + // inner type `T`. A consequence of this guarantee is that it is + // possible to convert between `T` and `UnsafeCell`. + let ptr = unsafe { self.project_transmute_unchecked::<_, _, Cast>() }; + + // SAFETY: `UnsafeCell` has the same alignment as `T` [1], + // and so if `self` is guaranteed to be aligned, then so is the + // returned `Ptr`. + // + // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: + // + // `UnsafeCell` has the same in-memory representation as + // its inner type `T`. A consequence of this guarantee is that + // it is possible to convert between `T` and `UnsafeCell`. + let ptr = unsafe { ptr.assume_alignment::() }; + ptr.unify_invariants() + } + } +} + +/// Projections through the referent. +mod _project { + use super::*; + + impl<'a, T, I> Ptr<'a, [T], I> + where + T: 'a, + I: Invariants, + I::Aliasing: Reference, + { + /// Iteratively projects the elements `Ptr` from `Ptr<[T]>`. + pub(crate) fn iter(&self) -> impl Iterator> { + // SAFETY: + // 0. `elem` conforms to the aliasing invariant of `I::Aliasing` + // because projection does not impact the aliasing invariant. + // 1. `elem`, conditionally, conforms to the validity invariant of + // `I::Alignment`. If `elem` is projected from data well-aligned + // for `[T]`, `elem` will be valid for `T`. + // 2. `elem` conforms to the validity invariant of `I::Validity`. + // Per https://doc.rust-lang.org/1.81.0/reference/type-layout.html#array-layout: + // + // Slices have the same layout as the section of the array they + // slice. + // + // Arrays are laid out so that the zero-based `nth` element of + // the array is offset from the start of the array by `n * + // size_of::()` bytes. Thus, `elem` addresses a valid `T` + // within the slice. Since `self` satisfies `I::Validity`, `elem` + // also satisfies `I::Validity`. + self.as_inner().iter().map(|elem| unsafe { Ptr::from_inner(elem) }) + } + } + + #[allow(clippy::needless_lifetimes)] + impl<'a, T, I> Ptr<'a, T, I> + where + T: 'a + ?Sized + KnownLayout, + I: Invariants, + { + /// The number of slice elements in the object referenced by `self`. + pub(crate) fn len(&self) -> usize { + self.as_inner().meta().get() + } + } +} + +#[cfg(test)] +mod tests { + use core::mem::{self, MaybeUninit}; + + use super::*; + #[allow(unused)] // Needed on our MSRV, but considered unused on later toolchains. + use crate::util::AsAddress; + use crate::{pointer::BecauseImmutable, util::testutil::AU64, FromBytes, Immutable}; + + mod test_ptr_try_cast_into_soundness { + use super::*; + + // This test is designed so that if `Ptr::try_cast_into_xxx` are + // buggy, it will manifest as unsoundness that Miri can detect. + + // - If `size_of::() == 0`, `N == 4` + // - Else, `N == 4 * size_of::()` + // + // Each test will be run for each metadata in `metas`. + fn test(metas: I) + where + T: ?Sized + KnownLayout + Immutable + FromBytes, + I: IntoIterator> + Clone, + { + let mut bytes = [MaybeUninit::::uninit(); N]; + let initialized = [MaybeUninit::new(0u8); N]; + for start in 0..=bytes.len() { + for end in start..=bytes.len() { + // Set all bytes to uninitialized other than those in + // the range we're going to pass to `try_cast_from`. + // This allows Miri to detect out-of-bounds reads + // because they read uninitialized memory. Without this, + // some out-of-bounds reads would still be in-bounds of + // `bytes`, and so might spuriously be accepted. + bytes = [MaybeUninit::::uninit(); N]; + let bytes = &mut bytes[start..end]; + // Initialize only the byte range we're going to pass to + // `try_cast_from`. + bytes.copy_from_slice(&initialized[start..end]); + + let bytes = { + let bytes: *const [MaybeUninit] = bytes; + #[allow(clippy::as_conversions)] + let bytes = bytes as *const [u8]; + // SAFETY: We just initialized these bytes to valid + // `u8`s. + unsafe { &*bytes } + }; + + // SAFETY: The bytes in `slf` must be initialized. + unsafe fn validate_and_get_len< + T: ?Sized + KnownLayout + FromBytes + Immutable, + >( + slf: Ptr<'_, T, (Shared, Aligned, Initialized)>, + ) -> usize { + let t = slf.recall_validity().as_ref(); + + let bytes = { + let len = mem::size_of_val(t); + let t: *const T = t; + // SAFETY: + // - We know `t`'s bytes are all initialized + // because we just read it from `slf`, which + // points to an initialized range of bytes. If + // there's a bug and this doesn't hold, then + // that's exactly what we're hoping Miri will + // catch! + // - Since `T: FromBytes`, `T` doesn't contain + // any `UnsafeCell`s, so it's okay for `t: T` + // and a `&[u8]` to the same memory to be + // alive concurrently. + unsafe { core::slice::from_raw_parts(t.cast::(), len) } + }; + + // This assertion ensures that `t`'s bytes are read + // and compared to another value, which in turn + // ensures that Miri gets a chance to notice if any + // of `t`'s bytes are uninitialized, which they + // shouldn't be (see the comment above). + assert_eq!(bytes, vec![0u8; bytes.len()]); + + mem::size_of_val(t) + } + + for meta in metas.clone().into_iter() { + for cast_type in [CastType::Prefix, CastType::Suffix] { + if let Ok((slf, remaining)) = Ptr::from_ref(bytes) + .try_cast_into::(cast_type, meta) + { + // SAFETY: All bytes in `bytes` have been + // initialized. + let len = unsafe { validate_and_get_len(slf) }; + assert_eq!(remaining.len(), bytes.len() - len); + #[allow(unstable_name_collisions)] + let bytes_addr = bytes.as_ptr().addr(); + #[allow(unstable_name_collisions)] + let remaining_addr = remaining.as_inner().as_ptr().addr(); + match cast_type { + CastType::Prefix => { + assert_eq!(remaining_addr, bytes_addr + len) + } + CastType::Suffix => assert_eq!(remaining_addr, bytes_addr), + } + + if let Some(want) = meta { + let got = + KnownLayout::pointer_to_metadata(slf.as_inner().as_ptr()); + assert_eq!(got, want); + } + } + } + + if let Ok(slf) = Ptr::from_ref(bytes) + .try_cast_into_no_leftover::(meta) + { + // SAFETY: All bytes in `bytes` have been + // initialized. + let len = unsafe { validate_and_get_len(slf) }; + assert_eq!(len, bytes.len()); + + if let Some(want) = meta { + let got = KnownLayout::pointer_to_metadata(slf.as_inner().as_ptr()); + assert_eq!(got, want); + } + } + } + } + } + } + + #[derive(FromBytes, KnownLayout, Immutable)] + #[repr(C)] + struct SliceDst { + a: u8, + trailing: [T], + } + + // Each test case becomes its own `#[test]` function. We do this because + // this test in particular takes far, far longer to execute under Miri + // than all of our other tests combined. Previously, we had these + // execute sequentially in a single test function. We run Miri tests in + // parallel in CI, but this test being sequential meant that most of + // that parallelism was wasted, as all other tests would finish in a + // fraction of the total execution time, leaving this test to execute on + // a single thread for the remainder of the test. By putting each test + // case in its own function, we permit better use of available + // parallelism. + macro_rules! test { + ($test_name:ident: $ty:ty) => { + #[test] + #[allow(non_snake_case)] + fn $test_name() { + const S: usize = core::mem::size_of::<$ty>(); + const N: usize = if S == 0 { 4 } else { S * 4 }; + test::<$ty, _, N>([None]); + + // If `$ty` is a ZST, then we can't pass `None` as the + // pointer metadata, or else computing the correct trailing + // slice length will panic. + if S == 0 { + test::<[$ty], _, N>([Some(0), Some(1), Some(2), Some(3)]); + test::, _, N>([Some(0), Some(1), Some(2), Some(3)]); + } else { + test::<[$ty], _, N>([None, Some(0), Some(1), Some(2), Some(3)]); + test::, _, N>([None, Some(0), Some(1), Some(2), Some(3)]); + } + } + }; + ($ty:ident) => { + test!($ty: $ty); + }; + ($($ty:ident),*) => { $(test!($ty);)* } + } + + test!(empty_tuple: ()); + test!(u8, u16, u32, u64, u128, usize, AU64); + test!(i8, i16, i32, i64, i128, isize); + test!(f32, f64); + } + + #[test] + fn test_try_cast_into_explicit_count() { + macro_rules! test { + ($ty:ty, $bytes:expr, $elems:expr, $expect:expr) => {{ + let bytes = [0u8; $bytes]; + let ptr = Ptr::from_ref(&bytes[..]); + let res = + ptr.try_cast_into::<$ty, BecauseImmutable>(CastType::Prefix, Some($elems)); + if let Some(expect) = $expect { + let (ptr, _) = res.unwrap(); + assert_eq!(KnownLayout::pointer_to_metadata(ptr.as_inner().as_ptr()), expect); + } else { + let _ = res.unwrap_err(); + } + }}; + } + + #[derive(KnownLayout, Immutable)] + #[repr(C)] + struct ZstDst { + u: [u8; 8], + slc: [()], + } + + test!(ZstDst, 8, 0, Some(0)); + test!(ZstDst, 7, 0, None); + + test!(ZstDst, 8, usize::MAX, Some(usize::MAX)); + test!(ZstDst, 7, usize::MAX, None); + + #[derive(KnownLayout, Immutable)] + #[repr(C)] + struct Dst { + u: [u8; 8], + slc: [u8], + } + + test!(Dst, 8, 0, Some(0)); + test!(Dst, 7, 0, None); + + test!(Dst, 9, 1, Some(1)); + test!(Dst, 8, 1, None); + + // If we didn't properly check for overflow, this would cause the + // metadata to overflow to 0, and thus the cast would spuriously + // succeed. + test!(Dst, 8, usize::MAX - 8 + 1, None); + } + + #[test] + fn test_try_cast_into_no_leftover_restores_original_slice() { + let bytes = [0u8; 4]; + let ptr = Ptr::from_ref(&bytes[..]); + let res = ptr.try_cast_into_no_leftover::<[u8; 2], BecauseImmutable>(None); + match res { + Ok(_) => panic!("should have failed due to leftover bytes"), + Err(CastError::Size(e)) => { + assert_eq!(e.into_src().len(), 4, "Should return original slice length"); + } + Err(e) => panic!("wrong error type: {:?}", e), + } + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/transmute.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/transmute.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a5097e10b0a2cd365f39277f03749749bcc18e0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/pointer/transmute.rs @@ -0,0 +1,519 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{ + cell::{Cell, UnsafeCell}, + mem::{ManuallyDrop, MaybeUninit}, + num::Wrapping, +}; + +use crate::{ + pointer::{ + cast::{self, CastExact, CastSizedExact}, + invariant::*, + }, + FromBytes, Immutable, IntoBytes, Unalign, +}; + +/// Transmutations which are sound to attempt, conditional on validating the bit +/// validity of the destination type. +/// +/// If a `Ptr` transmutation is `TryTransmuteFromPtr`, then it is sound to +/// perform that transmutation so long as some additional mechanism is used to +/// validate that the referent is bit-valid for the destination type. That +/// validation mechanism could be a type bound (such as `TransmuteFrom`) or a +/// runtime validity check. +/// +/// # Safety +/// +/// ## Post-conditions +/// +/// Given `Dst: TryTransmuteFromPtr`, callers may assume +/// the following: +/// +/// Given `src: Ptr<'a, Src, (A, _, SV)>`, if the referent of `src` is +/// `DV`-valid for `Dst`, then it is sound to transmute `src` into `dst: Ptr<'a, +/// Dst, (A, Unaligned, DV)>` using `C`. +/// +/// ## Pre-conditions +/// +/// Given `src: Ptr` and `dst: Ptr`, +/// `Dst: TryTransmuteFromPtr` is sound if all of the +/// following hold: +/// - Forwards transmutation: Either of the following hold: +/// - So long as `dst` is active, no mutation of `dst`'s referent is allowed +/// except via `dst` itself +/// - The set of `DV`-valid referents of `dst` is a superset of the set of +/// `SV`-valid referents of `src` (NOTE: this condition effectively bans +/// shrinking or overwriting transmutes, which cannot satisfy this +/// condition) +/// - Reverse transmutation: Either of the following hold: +/// - `dst` does not permit mutation of its referent +/// - The set of `DV`-valid referents of `dst` is a subset of the set of +/// `SV`-valid referents of `src` (NOTE: this condition effectively bans +/// shrinking or overwriting transmutes, which cannot satisfy this +/// condition) +/// - No safe code, given access to `src` and `dst`, can cause undefined +/// behavior: Any of the following hold: +/// - `A` is `Exclusive` +/// - `Src: Immutable` and `Dst: Immutable` +/// - It is sound for shared code to operate on a `&Src` and `&Dst` which +/// reference the same byte range at the same time +/// +/// ## Proof +/// +/// Given: +/// - `src: Ptr<'a, Src, (A, _, SV)>` +/// - `src`'s referent is `DV`-valid for `Dst` +/// +/// We are trying to prove that it is sound to perform a cast from `src` to a +/// `dst: Ptr<'a, Dst, (A, Unaligned, DV)>` using `C`. We need to prove that +/// such a cast does not violate any of `src`'s invariants, and that it +/// satisfies all invariants of the destination `Ptr` type. +/// +/// First, by `C: CastExact`, `src`'s address is unchanged, so it still satisfies +/// its alignment. Since `dst`'s alignment is `Unaligned`, it trivially satisfies +/// its alignment. +/// +/// Second, aliasing is either `Exclusive` or `Shared`: +/// - If it is `Exclusive`, then both `src` and `dst` satisfy `Exclusive` +/// aliasing trivially: since `src` and `dst` have the same lifetime, `src` is +/// inaccessible so long as `dst` is alive, and no other live `Ptr`s or +/// references may reference the same referent. +/// - If it is `Shared`, then either: +/// - `Src: Immutable` and `Dst: Immutable`, and so neither `src` nor `dst` +/// permit interior mutation. +/// - It is explicitly sound for safe code to operate on a `&Src` and a `&Dst` +/// pointing to the same byte range at the same time. +/// +/// Third, `src`'s validity is satisfied. By invariant, `src`'s referent began +/// as an `SV`-valid `Src`. It is guaranteed to remain so, as either of the +/// following hold: +/// - `dst` does not permit mutation of its referent. +/// - The set of `DV`-valid referents of `dst` is a subset of the set of +/// `SV`-valid referents of `src`. Thus, any value written via `dst` is +/// guaranteed to be an `SV`-valid referent of `src`. +/// +/// Fourth, `dst`'s validity is satisfied. It is a given of this proof that the +/// referent is `DV`-valid for `Dst`. It is guaranteed to remain so, as either +/// of the following hold: +/// - So long as `dst` is active, no mutation of the referent is allowed except +/// via `dst` itself. +/// - The set of `DV`-valid referents of `dst` is a superset of the set of +/// `SV`-valid referents of `src`. Thus, any value written via `src` is +/// guaranteed to be a `DV`-valid referent of `dst`. +pub unsafe trait TryTransmuteFromPtr< + Src: ?Sized, + A: Aliasing, + SV: Validity, + DV: Validity, + C: CastExact, + R, +> +{ +} + +#[allow(missing_copy_implementations, missing_debug_implementations)] +pub enum BecauseMutationCompatible {} + +// SAFETY: +// - Forwards transmutation: By `Dst: MutationCompatible`, we +// know that at least one of the following holds: +// - So long as `dst: Ptr` is active, no mutation of its referent is +// allowed except via `dst` itself if either of the following hold: +// - Aliasing is `Exclusive`, in which case, so long as the `Dst` `Ptr` +// exists, no mutation is permitted except via that `Ptr` +// - Aliasing is `Shared`, `Src: Immutable`, and `Dst: Immutable`, in which +// case no mutation is possible via either `Ptr` +// - Since the underlying cast is size-preserving, `dst` addresses the same +// referent as `src`. By `Dst: TransmuteFrom`, the set of +// `DV`-valid referents of `dst` is a superset of the set of `SV`-valid +// referents of `src`. +// - Reverse transmutation: Since the underlying cast is size-preserving, `dst` +// addresses the same referent as `src`. By `Src: TransmuteFrom`, +// the set of `DV`-valid referents of `src` is a subset of the set of +// `SV`-valid referents of `dst`. +// - No safe code, given access to `src` and `dst`, can cause undefined +// behavior: By `Dst: MutationCompatible`, at least one of +// the following holds: +// - `A` is `Exclusive` +// - `Src: Immutable` and `Dst: Immutable` +// - `Dst: InvariantsEq`, which guarantees that `Src` and `Dst` have the +// same invariants, and permit interior mutation on the same byte ranges +unsafe impl + TryTransmuteFromPtr for Dst +where + A: Aliasing, + SV: Validity, + DV: Validity, + Src: TransmuteFrom + ?Sized, + Dst: MutationCompatible + ?Sized, + C: CastExact, +{ +} + +// SAFETY: +// - Forwards transmutation: Since aliasing is `Shared` and `Src: Immutable`, +// `src` does not permit mutation of its referent. +// - Reverse transmutation: Since aliasing is `Shared` and `Dst: Immutable`, +// `dst` does not permit mutation of its referent. +// - No safe code, given access to `src` and `dst`, can cause undefined +// behavior: `Src: Immutable` and `Dst: Immutable` +unsafe impl TryTransmuteFromPtr + for Dst +where + SV: Validity, + DV: Validity, + Src: Immutable + ?Sized, + Dst: Immutable + ?Sized, + C: CastExact, +{ +} + +/// Denotes that `src: Ptr` and `dst: Ptr`, +/// referencing the same referent at the same time, cannot be used by safe code +/// to break library safety invariants of `Src` or `Self`. +/// +/// # Safety +/// +/// At least one of the following must hold: +/// - `Src: Read` and `Self: Read` +/// - `Self: InvariantsEq`, and, for some `V`: +/// - `Dst: TransmuteFrom` +/// - `Src: TransmuteFrom` +pub unsafe trait MutationCompatible {} + +#[allow(missing_copy_implementations, missing_debug_implementations)] +pub enum BecauseRead {} + +// SAFETY: `Src: Read` and `Dst: Read`. +unsafe impl + MutationCompatible for Dst +where + Src: Read, + Dst: Read, +{ +} + +/// Denotes that two types have the same invariants. +/// +/// # Safety +/// +/// It is sound for safe code to operate on a `&T` and a `&Self` pointing to the +/// same referent at the same time - no such safe code can cause undefined +/// behavior. +pub unsafe trait InvariantsEq {} + +// SAFETY: Trivially sound to have multiple `&T` pointing to the same referent. +unsafe impl InvariantsEq for T {} + +// SAFETY: `Dst: InvariantsEq + TransmuteFrom`, and `Src: +// TransmuteFrom`. +unsafe impl + MutationCompatible for Dst +where + Src: TransmuteFrom, + Dst: TransmuteFrom + InvariantsEq, +{ +} + +#[allow(missing_debug_implementations, missing_copy_implementations)] +#[doc(hidden)] +pub enum BecauseInvariantsEq {} + +macro_rules! unsafe_impl_invariants_eq { + ($tyvar:ident => $t:ty, $u:ty) => {{ + crate::util::macros::__unsafe(); + // SAFETY: The caller promises that this is sound. + unsafe impl<$tyvar> InvariantsEq<$t> for $u {} + // SAFETY: The caller promises that this is sound. + unsafe impl<$tyvar> InvariantsEq<$u> for $t {} + }}; +} + +impl_transitive_transmute_from!(T => MaybeUninit => T => Wrapping); +impl_transitive_transmute_from!(T => Wrapping => T => MaybeUninit); + +// SAFETY: `ManuallyDrop` has the same size and bit validity as `T` [1], and +// implements `Deref` [2]. Thus, it is already possible for safe +// code to obtain a `&T` and a `&ManuallyDrop` to the same referent at the +// same time. +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: +// +// `ManuallyDrop` is guaranteed to have the same layout and bit +// validity as `T` +// +// [2] https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html#impl-Deref-for-ManuallyDrop%3CT%3E +unsafe impl InvariantsEq for ManuallyDrop {} +// SAFETY: See previous safety comment. +unsafe impl InvariantsEq> for T {} + +/// Transmutations which are always sound. +/// +/// `TransmuteFromPtr` is a shorthand for [`TryTransmuteFromPtr`] and +/// [`TransmuteFrom`]. +/// +/// # Safety +/// +/// `Dst: TransmuteFromPtr` is equivalent to `Dst: +/// TryTransmuteFromPtr + TransmuteFrom`. +pub unsafe trait TransmuteFromPtr< + Src: ?Sized, + A: Aliasing, + SV: Validity, + DV: Validity, + C: CastExact, + R, +>: TryTransmuteFromPtr + TransmuteFrom +{ +} + +// SAFETY: The `where` bounds are equivalent to the safety invariant on +// `TransmuteFromPtr`. +unsafe impl< + Src: ?Sized, + Dst: ?Sized, + A: Aliasing, + SV: Validity, + DV: Validity, + C: CastExact, + R, + > TransmuteFromPtr for Dst +where + Dst: TransmuteFrom + TryTransmuteFromPtr, +{ +} + +/// Denotes that any `SV`-valid `Src` may soundly be transmuted into a +/// `DV`-valid `Self`. +/// +/// # Safety +/// +/// Given `src: Ptr` and `dst: Ptr`, if the +/// referents of `src` and `dst` are the same size, then the set of bit patterns +/// allowed to appear in `src`'s referent must be a subset of the set allowed to +/// appear in `dst`'s referent. +/// +/// If the referents are not the same size, then `Dst: TransmuteFrom` conveys no safety guarantee. +pub unsafe trait TransmuteFrom {} + +/// Carries the ability to perform a size-preserving cast or conversion from a +/// raw pointer to `Src` to a raw pointer to `Self`. +/// +/// The cast/conversion is carried by the associated [`CastFrom`] type, and +/// may be a no-op cast (without updating pointer metadata) or a conversion +/// which updates pointer metadata. +/// +/// # Safety +/// +/// `SizeEq` on its own conveys no safety guarantee. Any safety guarantees come +/// from the safety invariants on the associated [`CastFrom`] type, specifically +/// the [`CastExact`] bound. +/// +/// [`CastFrom`]: SizeEq::CastFrom +/// [`CastExact`]: CastExact +pub trait SizeEq { + type CastFrom: CastExact; +} + +impl SizeEq for T { + type CastFrom = cast::IdCast; +} + +// SAFETY: Since `Src: IntoBytes`, the set of valid `Src`'s is the set of +// initialized bit patterns, which is exactly the set allowed in the referent of +// any `Initialized` `Ptr`. +unsafe impl TransmuteFrom for Dst +where + Src: IntoBytes + ?Sized, + Dst: ?Sized, +{ +} + +// SAFETY: Since `Dst: FromBytes`, any initialized bit pattern may appear in the +// referent of a `Ptr`. This is exactly equal to the set of +// bit patterns which may appear in the referent of any `Initialized` `Ptr`. +unsafe impl TransmuteFrom for Dst +where + Src: ?Sized, + Dst: FromBytes + ?Sized, +{ +} + +// FIXME(#2354): This seems like a smell - the soundness of this bound has +// nothing to do with `Src` or `Dst` - we're basically just saying `[u8; N]` is +// transmutable into `[u8; N]`. + +// SAFETY: The set of allowed bit patterns in the referent of any `Initialized` +// `Ptr` is the same regardless of referent type. +unsafe impl TransmuteFrom for Dst +where + Src: ?Sized, + Dst: ?Sized, +{ +} + +// FIXME(#2354): This seems like a smell - the soundness of this bound has +// nothing to do with `Dst` - we're basically just saying that any type is +// transmutable into `MaybeUninit<[u8; N]>`. + +// SAFETY: A `Dst` with validity `Uninit` permits any byte sequence, and +// therefore can be transmuted from any value. +unsafe impl TransmuteFrom for Dst +where + Src: ?Sized, + Dst: ?Sized, + V: Validity, +{ +} + +// SAFETY: +// - `ManuallyDrop` has the same size as `T` [1] +// - `ManuallyDrop` has the same validity as `T` [1] +// +// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: +// +// `ManuallyDrop` is guaranteed to have the same layout and bit validity as +// `T` +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(pub T: ?Sized => ManuallyDrop) }; + +// SAFETY: +// - `Unalign` promises to have the same size as `T`. +// - `Unalign` promises to have the same validity as `T`. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(pub T => Unalign) }; +// SAFETY: `Unalign` promises to have the same size and validity as `T`. +// Given `u: &Unalign`, it is already possible to obtain `let t = +// u.try_deref().unwrap()`. Because `Unalign` has the same size as `T`, the +// returned `&T` must point to the same referent as `u`, and thus it must be +// sound for these two references to exist at the same time since it's already +// possible for safe code to get into this state. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_invariants_eq!(T => T, Unalign) }; + +// SAFETY: +// - `Wrapping` has the same size as `T` [1]. +// - `Wrapping` has only one field, which is `pub` [2]. We are also +// guaranteed per that `Wrapping` has the same layout as `T` [1]. The only +// way for both of these to be true simultaneously is for `Wrapping` to +// have the same bit validity as `T`. In particular, in order to change the +// bit validity, one of the following would need to happen: +// - `Wrapping` could change its `repr`, but this would violate the layout +// guarantee. +// - `Wrapping` could add or change its fields, but this would be a +// stability-breaking change. +// +// [1] Per https://doc.rust-lang.org/1.85.0/core/num/struct.Wrapping.html#layout-1: +// +// `Wrapping` is guaranteed to have the same layout and ABI as `T`. +// +// [2] Definition from https://doc.rust-lang.org/1.85.0/core/num/struct.Wrapping.html: +// +// ``` +// #[repr(transparent)] +// pub struct Wrapping(pub T); +// ``` +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(pub T => Wrapping) }; + +// SAFETY: By the preceding safety proof, `Wrapping` and `T` have the same +// layout and bit validity. Since a `Wrapping`'s `T` field is `pub`, given +// `w: &Wrapping`, it's possible to do `let t = &w.t`, which means that it's +// already possible for safe code to obtain a `&Wrapping` and a `&T` pointing +// to the same referent at the same time. Thus, this must be sound. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_invariants_eq!(T => T, Wrapping) }; + +// SAFETY: +// - `UnsafeCell` has the same size as `T` [1]. +// - Per [1], `UnsafeCell` has the same bit validity as `T`. Technically the +// term "representation" doesn't guarantee this, but the subsequent sentence +// in the documentation makes it clear that this is the intention. +// +// [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: +// +// `UnsafeCell` has the same in-memory representation as its inner type +// `T`. A consequence of this guarantee is that it is possible to convert +// between `T` and `UnsafeCell`. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(pub T: ?Sized => UnsafeCell) }; + +// SAFETY: +// - `Cell` has the same size as `T` [1]. +// - Per [1], `Cell` has the same bit validity as `T`. Technically the term +// "representation" doesn't guarantee this, but it does promise to have the +// "same memory layout and caveats as `UnsafeCell`." The `UnsafeCell` docs +// [2] make it clear that bit validity is the intention even if that phrase +// isn't used. +// +// [1] Per https://doc.rust-lang.org/1.85.0/std/cell/struct.Cell.html#memory-layout: +// +// `Cell` has the same memory layout and caveats as `UnsafeCell`. In +// particular, this means that `Cell` has the same in-memory representation +// as its inner type `T`. +// +// [2] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: +// +// `UnsafeCell` has the same in-memory representation as its inner type +// `T`. A consequence of this guarantee is that it is possible to convert +// between `T` and `UnsafeCell`. +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(pub T: ?Sized => Cell) }; + +impl_transitive_transmute_from!(T: ?Sized => Cell => T => UnsafeCell); +impl_transitive_transmute_from!(T: ?Sized => UnsafeCell => T => Cell); + +// SAFETY: `MaybeUninit` has no validity requirements. Currently this is not +// explicitly guaranteed, but it's obvious from `MaybeUninit`'s documentation +// that this is the intention: +// https://doc.rust-lang.org/1.85.0/core/mem/union.MaybeUninit.html +unsafe impl TransmuteFrom for MaybeUninit {} + +impl SizeEq for MaybeUninit { + type CastFrom = CastSizedExact; +} + +impl SizeEq> for T { + type CastFrom = CastSizedExact; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::pointer::cast::Project as _; + + fn test_size_eq>(mut src: Src) { + let _: *mut Dst = + >::CastFrom::project(crate::pointer::PtrInner::from_mut(&mut src)); + } + + #[test] + fn test_transmute_coverage() { + // SizeEq for MaybeUninit + test_size_eq::>(0u8); + + // SizeEq> for T + test_size_eq::, u8>(MaybeUninit::::new(0)); + + // Transitive: MaybeUninit -> Wrapping + // T => MaybeUninit => T => Wrapping + test_size_eq::>(0u8); + + // T => Wrapping => T => MaybeUninit + test_size_eq::, MaybeUninit>(Wrapping(0u8)); + + // T: ?Sized => Cell => T => UnsafeCell + test_size_eq::, UnsafeCell>(Cell::new(0u8)); + + // T: ?Sized => UnsafeCell => T => Cell + test_size_eq::, Cell>(UnsafeCell::new(0u8)); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/ref.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/ref.rs new file mode 100644 index 0000000000000000000000000000000000000000..6354442b63005d03248bab7a1e476d7670cde2f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/ref.rs @@ -0,0 +1,1354 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. +use super::*; +use crate::pointer::{ + BecauseInvariantsEq, BecauseMutationCompatible, MutationCompatible, TransmuteFromPtr, +}; + +mod def { + use core::marker::PhantomData; + + use crate::{ + ByteSlice, ByteSliceMut, CloneableByteSlice, CopyableByteSlice, IntoByteSlice, + IntoByteSliceMut, + }; + + /// A typed reference derived from a byte slice. + /// + /// A `Ref` is a reference to a `T` which is stored in a byte slice, `B`. + /// Unlike a native reference (`&T` or `&mut T`), `Ref` has the same + /// mutability as the byte slice it was constructed from (`B`). + /// + /// # Examples + /// + /// `Ref` can be used to treat a sequence of bytes as a structured type, and + /// to read and write the fields of that type as if the byte slice reference + /// were simply a reference to that type. + /// + /// ```rust + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] + /// #[repr(C)] + /// struct UdpHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] + /// #[repr(C, packed)] + /// struct UdpPacket { + /// header: UdpHeader, + /// body: [u8], + /// } + /// + /// impl UdpPacket { + /// pub fn parse(bytes: B) -> Option> { + /// Ref::from_bytes(bytes).ok() + /// } + /// } + /// ``` + pub struct Ref( + // INVARIANTS: The referent (via `.deref`, `.deref_mut`, `.into`) byte + // slice is aligned to `T`'s alignment and its size corresponds to a + // valid size for `T`. + B, + PhantomData, + ); + + impl Ref { + /// Constructs a new `Ref`. + /// + /// # Safety + /// + /// `bytes` dereferences (via [`deref`], [`deref_mut`], and [`into`]) to + /// a byte slice which is aligned to `T`'s alignment and whose size is a + /// valid size for `T`. + /// + /// [`deref`]: core::ops::Deref::deref + /// [`deref_mut`]: core::ops::DerefMut::deref_mut + /// [`into`]: core::convert::Into::into + pub(crate) unsafe fn new_unchecked(bytes: B) -> Ref { + // INVARIANTS: The caller has promised that `bytes`'s referent is + // validly-aligned and has a valid size. + Ref(bytes, PhantomData) + } + } + + impl Ref { + /// Access the byte slice as a [`ByteSlice`]. + /// + /// # Safety + /// + /// The caller promises not to call methods on the returned + /// [`ByteSlice`] other than `ByteSlice` methods (for example, via + /// `Any::downcast_ref`). + /// + /// `as_byte_slice` promises to return a `ByteSlice` whose referent is + /// validly-aligned for `T` and has a valid size for `T`. + pub(crate) unsafe fn as_byte_slice(&self) -> &impl ByteSlice { + // INVARIANTS: The caller promises not to call methods other than + // those on `ByteSlice`. Since `B: ByteSlice`, dereference stability + // guarantees that calling `ByteSlice` methods will not change the + // address or length of `self.0`'s referent. + // + // SAFETY: By invariant on `self.0`, the alignment and size + // post-conditions are upheld. + &self.0 + } + } + + impl Ref { + /// Access the byte slice as a [`ByteSliceMut`]. + /// + /// # Safety + /// + /// The caller promises not to call methods on the returned + /// [`ByteSliceMut`] other than `ByteSliceMut` methods (for example, via + /// `Any::downcast_mut`). + /// + /// `as_byte_slice` promises to return a `ByteSlice` whose referent is + /// validly-aligned for `T` and has a valid size for `T`. + pub(crate) unsafe fn as_byte_slice_mut(&mut self) -> &mut impl ByteSliceMut { + // INVARIANTS: The caller promises not to call methods other than + // those on `ByteSliceMut`. Since `B: ByteSlice`, dereference + // stability guarantees that calling `ByteSlice` methods will not + // change the address or length of `self.0`'s referent. + // + // SAFETY: By invariant on `self.0`, the alignment and size + // post-conditions are upheld. + &mut self.0 + } + } + + impl<'a, B: IntoByteSlice<'a>, T: ?Sized> Ref { + /// Access the byte slice as an [`IntoByteSlice`]. + /// + /// # Safety + /// + /// The caller promises not to call methods on the returned + /// [`IntoByteSlice`] other than `IntoByteSlice` methods (for example, + /// via `Any::downcast_ref`). + /// + /// `as_byte_slice` promises to return a `ByteSlice` whose referent is + /// validly-aligned for `T` and has a valid size for `T`. + pub(crate) unsafe fn into_byte_slice(self) -> impl IntoByteSlice<'a> { + // INVARIANTS: The caller promises not to call methods other than + // those on `IntoByteSlice`. Since `B: ByteSlice`, dereference + // stability guarantees that calling `ByteSlice` methods will not + // change the address or length of `self.0`'s referent. + // + // SAFETY: By invariant on `self.0`, the alignment and size + // post-conditions are upheld. + self.0 + } + } + + impl<'a, B: IntoByteSliceMut<'a>, T: ?Sized> Ref { + /// Access the byte slice as an [`IntoByteSliceMut`]. + /// + /// # Safety + /// + /// The caller promises not to call methods on the returned + /// [`IntoByteSliceMut`] other than `IntoByteSliceMut` methods (for + /// example, via `Any::downcast_mut`). + /// + /// `as_byte_slice` promises to return a `ByteSlice` whose referent is + /// validly-aligned for `T` and has a valid size for `T`. + pub(crate) unsafe fn into_byte_slice_mut(self) -> impl IntoByteSliceMut<'a> { + // INVARIANTS: The caller promises not to call methods other than + // those on `IntoByteSliceMut`. Since `B: ByteSlice`, dereference + // stability guarantees that calling `ByteSlice` methods will not + // change the address or length of `self.0`'s referent. + // + // SAFETY: By invariant on `self.0`, the alignment and size + // post-conditions are upheld. + self.0 + } + } + + impl Clone for Ref { + #[inline] + fn clone(&self) -> Ref { + // INVARIANTS: Since `B: CloneableByteSlice`, `self.0.clone()` has + // the same address and length as `self.0`. Since `self.0` upholds + // the field invariants, so does `self.0.clone()`. + Ref(self.0.clone(), PhantomData) + } + } + + // INVARIANTS: Since `B: CopyableByteSlice`, the copied `Ref`'s `.0` has the + // same address and length as the original `Ref`'s `.0`. Since the original + // upholds the field invariants, so does the copy. + impl Copy for Ref {} +} + +#[allow(unreachable_pub)] // This is a false positive on our MSRV toolchain. +pub use def::Ref; + +use crate::pointer::{ + invariant::{Aligned, BecauseExclusive, Initialized, Unaligned, Valid}, + BecauseRead, PtrInner, +}; + +impl Ref +where + B: ByteSlice, +{ + #[must_use = "has no side effects"] + pub(crate) fn sized_from(bytes: B) -> Result, CastError> { + if bytes.len() != mem::size_of::() { + return Err(SizeError::new(bytes).into()); + } + if let Err(err) = util::validate_aligned_to::<_, T>(bytes.deref()) { + return Err(err.with_src(bytes).into()); + } + + // SAFETY: We just validated size and alignment. + Ok(unsafe { Ref::new_unchecked(bytes) }) + } +} + +impl Ref +where + B: SplitByteSlice, +{ + #[must_use = "has no side effects"] + pub(crate) fn sized_from_prefix(bytes: B) -> Result<(Ref, B), CastError> { + if bytes.len() < mem::size_of::() { + return Err(SizeError::new(bytes).into()); + } + if let Err(err) = util::validate_aligned_to::<_, T>(bytes.deref()) { + return Err(err.with_src(bytes).into()); + } + let (bytes, suffix) = + bytes.split_at(mem::size_of::()).map_err(|b| SizeError::new(b).into())?; + // SAFETY: We just validated alignment and that `bytes` is at least as + // large as `T`. `bytes.split_at(mem::size_of::())?` ensures that the + // new `bytes` is exactly the size of `T`. By safety postcondition on + // `SplitByteSlice::split_at` we can rely on `split_at` to produce the + // correct `bytes` and `suffix`. + let r = unsafe { Ref::new_unchecked(bytes) }; + Ok((r, suffix)) + } + + #[must_use = "has no side effects"] + pub(crate) fn sized_from_suffix(bytes: B) -> Result<(B, Ref), CastError> { + let bytes_len = bytes.len(); + let split_at = if let Some(split_at) = bytes_len.checked_sub(mem::size_of::()) { + split_at + } else { + return Err(SizeError::new(bytes).into()); + }; + let (prefix, bytes) = bytes.split_at(split_at).map_err(|b| SizeError::new(b).into())?; + if let Err(err) = util::validate_aligned_to::<_, T>(bytes.deref()) { + return Err(err.with_src(bytes).into()); + } + // SAFETY: Since `split_at` is defined as `bytes_len - size_of::()`, + // the `bytes` which results from `let (prefix, bytes) = + // bytes.split_at(split_at)?` has length `size_of::()`. After + // constructing `bytes`, we validate that it has the proper alignment. + // By safety postcondition on `SplitByteSlice::split_at` we can rely on + // `split_at` to produce the correct `prefix` and `bytes`. + let r = unsafe { Ref::new_unchecked(bytes) }; + Ok((prefix, r)) + } +} + +impl Ref +where + B: ByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + /// Constructs a `Ref` from a byte slice. + /// + /// If the length of `source` is not a [valid size of `T`][valid-size], or + /// if `source` is not appropriately aligned for `T`, this returns `Err`. If + /// [`T: Unaligned`][t-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_bytes(&b"UU"[..]); // ⚠ Compile Error! + /// ``` + #[must_use = "has no side effects"] + #[inline] + pub fn from_bytes(source: B) -> Result, CastError> { + static_assert_dst_is_not_zst!(T); + if let Err(e) = + Ptr::from_ref(source.deref()).try_cast_into_no_leftover::(None) + { + return Err(e.with_src(()).with_src(source)); + } + // SAFETY: `try_cast_into_no_leftover` validates size and alignment. + Ok(unsafe { Ref::new_unchecked(source) }) + } +} + +impl Ref +where + B: SplitByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + /// Constructs a `Ref` from the prefix of a byte slice. + /// + /// This method computes the [largest possible size of `T`][valid-size] that + /// can fit in the leading bytes of `source`, then attempts to return both a + /// `Ref` to those bytes, and a reference to the remaining bytes. If there + /// are insufficient bytes, or if `source` is not appropriately aligned, + /// this returns `Err`. If [`T: Unaligned`][t-unaligned], you can + /// [infallibly discard the alignment error][size-error-from]. + /// + /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_prefix(&b"UU"[..]); // ⚠ Compile Error! + /// ``` + #[must_use = "has no side effects"] + #[inline] + pub fn from_prefix(source: B) -> Result<(Ref, B), CastError> { + static_assert_dst_is_not_zst!(T); + let remainder = match Ptr::from_ref(source.deref()) + .try_cast_into::(CastType::Prefix, None) + { + Ok((_, remainder)) => remainder, + Err(e) => { + return Err(e.with_src(()).with_src(source)); + } + }; + + // SAFETY: `remainder` is constructed as a subset of `source`, and so it + // cannot have a larger size than `source`. Both of their `len` methods + // measure bytes (`source` deref's to `[u8]`, and `remainder` is a + // `Ptr<[u8]>`), so `source.len() >= remainder.len()`. Thus, this cannot + // underflow. + #[allow(unstable_name_collisions)] + let split_at = unsafe { source.len().unchecked_sub(remainder.len()) }; + let (bytes, suffix) = source.split_at(split_at).map_err(|b| SizeError::new(b).into())?; + // SAFETY: `try_cast_into` validates size and alignment, and returns a + // `split_at` that indicates how many bytes of `source` correspond to a + // valid `T`. By safety postcondition on `SplitByteSlice::split_at` we + // can rely on `split_at` to produce the correct `source` and `suffix`. + let r = unsafe { Ref::new_unchecked(bytes) }; + Ok((r, suffix)) + } + + /// Constructs a `Ref` from the suffix of a byte slice. + /// + /// This method computes the [largest possible size of `T`][valid-size] that + /// can fit in the trailing bytes of `source`, then attempts to return both + /// a `Ref` to those bytes, and a reference to the preceding bytes. If there + /// are insufficient bytes, or if that suffix of `source` is not + /// appropriately aligned, this returns `Err`. If [`T: + /// Unaligned`][t-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. + /// + /// [valid-size]: crate::KnownLayout#what-is-a-valid-size + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// [slice-dst]: KnownLayout#dynamically-sized-types + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_suffix(&b"UU"[..]); // ⚠ Compile Error! + /// ``` + #[must_use = "has no side effects"] + #[inline] + pub fn from_suffix(source: B) -> Result<(B, Ref), CastError> { + static_assert_dst_is_not_zst!(T); + let remainder = match Ptr::from_ref(source.deref()) + .try_cast_into::(CastType::Suffix, None) + { + Ok((_, remainder)) => remainder, + Err(e) => { + let e = e.with_src(()); + return Err(e.with_src(source)); + } + }; + + let split_at = remainder.len(); + let (prefix, bytes) = source.split_at(split_at).map_err(|b| SizeError::new(b).into())?; + // SAFETY: `try_cast_into` validates size and alignment, and returns a + // `split_at` that indicates how many bytes of `source` correspond to a + // valid `T`. By safety postcondition on `SplitByteSlice::split_at` we + // can rely on `split_at` to produce the correct `prefix` and `bytes`. + let r = unsafe { Ref::new_unchecked(bytes) }; + Ok((prefix, r)) + } +} + +impl Ref +where + B: ByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + /// Constructs a `Ref` from the given bytes with DST length equal to `count` + /// without copying. + /// + /// This method attempts to return a `Ref` to the prefix of `source` + /// interpreted as a `T` with `count` trailing elements, and a reference to + /// the remaining bytes. If the length of `source` is not equal to the size + /// of `Self` with `count` elements, or if `source` is not appropriately + /// aligned, this returns `Err`. If [`T: Unaligned`][t-unaligned], you can + /// [infallibly discard the alignment error][size-error-from]. + /// + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_bytes_with_elems(&b"UU"[..], 42); // ⚠ Compile Error! + /// ``` + #[inline] + pub fn from_bytes_with_elems(source: B, count: usize) -> Result, CastError> { + static_assert_dst_is_not_zst!(T); + let expected_len = match T::size_for_metadata(count) { + Some(len) => len, + None => return Err(SizeError::new(source).into()), + }; + if source.len() != expected_len { + return Err(SizeError::new(source).into()); + } + Self::from_bytes(source) + } +} + +impl Ref +where + B: SplitByteSlice, + T: KnownLayout + Immutable + ?Sized, +{ + /// Constructs a `Ref` from the prefix of the given bytes with DST + /// length equal to `count` without copying. + /// + /// This method attempts to return a `Ref` to the prefix of `source` + /// interpreted as a `T` with `count` trailing elements, and a reference to + /// the remaining bytes. If there are insufficient bytes, or if `source` is + /// not appropriately aligned, this returns `Err`. If [`T: + /// Unaligned`][t-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_prefix_with_elems(&b"UU"[..], 42); // ⚠ Compile Error! + /// ``` + #[inline] + pub fn from_prefix_with_elems( + source: B, + count: usize, + ) -> Result<(Ref, B), CastError> { + static_assert_dst_is_not_zst!(T); + let expected_len = match T::size_for_metadata(count) { + Some(len) => len, + None => return Err(SizeError::new(source).into()), + }; + let (prefix, bytes) = source.split_at(expected_len).map_err(SizeError::new)?; + Self::from_bytes(prefix).map(move |l| (l, bytes)) + } + + /// Constructs a `Ref` from the suffix of the given bytes with DST length + /// equal to `count` without copying. + /// + /// This method attempts to return a `Ref` to the suffix of `source` + /// interpreted as a `T` with `count` trailing elements, and a reference to + /// the preceding bytes. If there are insufficient bytes, or if that suffix + /// of `source` is not appropriately aligned, this returns `Err`. If [`T: + /// Unaligned`][t-unaligned], you can [infallibly discard the alignment + /// error][size-error-from]. + /// + /// [t-unaligned]: crate::Unaligned + /// [size-error-from]: error/struct.SizeError.html#method.from-1 + /// + /// # Compile-Time Assertions + /// + /// This method cannot yet be used on unsized types whose dynamically-sized + /// component is zero-sized. Attempting to use this method on such types + /// results in a compile-time assertion error; e.g.: + /// + /// ```compile_fail,E0080 + /// use zerocopy::*; + /// # use zerocopy_derive::*; + /// + /// #[derive(Immutable, KnownLayout)] + /// #[repr(C)] + /// struct ZSTy { + /// leading_sized: u16, + /// trailing_dst: [()], + /// } + /// + /// let _ = Ref::<_, ZSTy>::from_suffix_with_elems(&b"UU"[..], 42); // ⚠ Compile Error! + /// ``` + #[inline] + pub fn from_suffix_with_elems( + source: B, + count: usize, + ) -> Result<(B, Ref), CastError> { + static_assert_dst_is_not_zst!(T); + let expected_len = match T::size_for_metadata(count) { + Some(len) => len, + None => return Err(SizeError::new(source).into()), + }; + let split_at = if let Some(split_at) = source.len().checked_sub(expected_len) { + split_at + } else { + return Err(SizeError::new(source).into()); + }; + // SAFETY: The preceding `source.len().checked_sub(expected_len)` + // guarantees that `split_at` is in-bounds. + let (bytes, suffix) = unsafe { source.split_at_unchecked(split_at) }; + Self::from_bytes(suffix).map(move |l| (bytes, l)) + } +} + +impl<'a, B, T> Ref +where + B: 'a + IntoByteSlice<'a>, + T: FromBytes + KnownLayout + Immutable + ?Sized, +{ + /// Converts this `Ref` into a reference. + /// + /// `into_ref` consumes the `Ref`, and returns a reference to `T`. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::into_ref(r)` instead of `r.into_ref()`. This is so that + /// there is no conflict with a method on the inner type. + #[must_use = "has no side effects"] + #[inline(always)] + pub fn into_ref(r: Self) -> &'a T { + // Presumably unreachable, since we've guarded each constructor of `Ref`. + static_assert_dst_is_not_zst!(T); + + // SAFETY: We don't call any methods on `b` other than those provided by + // `IntoByteSlice`. + let b = unsafe { r.into_byte_slice() }; + let b = b.into_byte_slice(); + + if let crate::layout::SizeInfo::Sized { .. } = T::LAYOUT.size_info { + let ptr = Ptr::from_ref(b); + // SAFETY: We just checked that `T: Sized`. By invariant on `r`, + // `b`'s size is equal to `size_of::()`. + let ptr = unsafe { cast_for_sized::(ptr) }; + + // SAFETY: None of the preceding transformations modifies the + // address of the pointer, and by invariant on `r`, we know that it + // is validly-aligned. + let ptr = unsafe { ptr.assume_alignment::() }; + return ptr.as_ref(); + } + + // PANICS: By post-condition on `into_byte_slice`, `b`'s size and + // alignment are valid for `T`. By post-condition, `b.into_byte_slice()` + // produces a byte slice with identical address and length to that + // produced by `b.deref()`. + let ptr = Ptr::from_ref(b.into_byte_slice()) + .try_cast_into_no_leftover::(None) + .expect("zerocopy internal error: into_ref should be infallible"); + let ptr = ptr.recall_validity(); + ptr.as_ref() + } +} + +impl<'a, B, T> Ref +where + B: 'a + IntoByteSliceMut<'a>, + T: FromBytes + IntoBytes + KnownLayout + ?Sized, +{ + /// Converts this `Ref` into a mutable reference. + /// + /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::into_mut(r)` instead of `r.into_mut()`. This is so that + /// there is no conflict with a method on the inner type. + #[must_use = "has no side effects"] + #[inline(always)] + pub fn into_mut(r: Self) -> &'a mut T { + // Presumably unreachable, since we've guarded each constructor of `Ref`. + static_assert_dst_is_not_zst!(T); + + // SAFETY: We don't call any methods on `b` other than those provided by + // `IntoByteSliceMut`. + let b = unsafe { r.into_byte_slice_mut() }; + let b = b.into_byte_slice_mut(); + + if let crate::layout::SizeInfo::Sized { .. } = T::LAYOUT.size_info { + let ptr = Ptr::from_mut(b); + // SAFETY: We just checked that `T: Sized`. By invariant on `r`, + // `b`'s size is equal to `size_of::()`. + let ptr = unsafe { + cast_for_sized::< + T, + _, + (BecauseRead, BecauseExclusive), + (BecauseMutationCompatible, BecauseInvariantsEq), + >(ptr) + }; + + // SAFETY: None of the preceding transformations modifies the + // address of the pointer, and by invariant on `r`, we know that it + // is validly-aligned. + let ptr = unsafe { ptr.assume_alignment::() }; + return ptr.as_mut(); + } + + // PANICS: By post-condition on `into_byte_slice_mut`, `b`'s size and + // alignment are valid for `T`. By post-condition, + // `b.into_byte_slice_mut()` produces a byte slice with identical + // address and length to that produced by `b.deref_mut()`. + let ptr = Ptr::from_mut(b.into_byte_slice_mut()) + .try_cast_into_no_leftover::(None) + .expect("zerocopy internal error: into_ref should be infallible"); + let ptr = ptr.recall_validity::<_, (_, (_, _))>(); + ptr.as_mut() + } +} + +impl Ref +where + B: ByteSlice, + T: ?Sized, +{ + /// Gets the underlying bytes. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::bytes(r)` instead of `r.bytes()`. This is so that there is + /// no conflict with a method on the inner type. + #[inline] + pub fn bytes(r: &Self) -> &[u8] { + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSlice`. + unsafe { r.as_byte_slice().deref() } + } +} + +impl Ref +where + B: ByteSliceMut, + T: ?Sized, +{ + /// Gets the underlying bytes mutably. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::bytes_mut(r)` instead of `r.bytes_mut()`. This is so that + /// there is no conflict with a method on the inner type. + #[inline] + pub fn bytes_mut(r: &mut Self) -> &mut [u8] { + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSliceMut`. + unsafe { r.as_byte_slice_mut().deref_mut() } + } +} + +impl Ref +where + B: ByteSlice, + T: FromBytes, +{ + /// Reads a copy of `T`. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::read(r)` instead of `r.read()`. This is so that there is no + /// conflict with a method on the inner type. + #[must_use = "has no side effects"] + #[inline] + pub fn read(r: &Self) -> T { + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSlice`. + let b = unsafe { r.as_byte_slice() }; + + // SAFETY: By postcondition on `as_byte_slice`, we know that `b` is a + // valid size and alignment for `T`. By safety invariant on `ByteSlice`, + // we know that this is preserved via `.deref()`. Because `T: + // FromBytes`, it is sound to interpret these bytes as a `T`. + unsafe { ptr::read(b.deref().as_ptr().cast::()) } + } +} + +impl Ref +where + B: ByteSliceMut, + T: IntoBytes, +{ + /// Writes the bytes of `t` and then forgets `t`. + /// + /// Note: this is an associated function, which means that you have to call + /// it as `Ref::write(r, t)` instead of `r.write(t)`. This is so that there + /// is no conflict with a method on the inner type. + #[inline] + pub fn write(r: &mut Self, t: T) { + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSliceMut`. + let b = unsafe { r.as_byte_slice_mut() }; + + // SAFETY: By postcondition on `as_byte_slice_mut`, we know that `b` is + // a valid size and alignment for `T`. By safety invariant on + // `ByteSlice`, we know that this is preserved via `.deref()`. Writing + // `t` to the buffer will allow all of the bytes of `t` to be accessed + // as a `[u8]`, but because `T: IntoBytes`, we know that this is sound. + unsafe { ptr::write(b.deref_mut().as_mut_ptr().cast::(), t) } + } +} + +impl Deref for Ref +where + B: ByteSlice, + T: FromBytes + KnownLayout + Immutable + ?Sized, +{ + type Target = T; + #[inline] + fn deref(&self) -> &T { + // Presumably unreachable, since we've guarded each constructor of `Ref`. + static_assert_dst_is_not_zst!(T); + + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSlice`. + let b = unsafe { self.as_byte_slice() }; + let b = b.deref(); + + if let crate::layout::SizeInfo::Sized { .. } = T::LAYOUT.size_info { + let ptr = Ptr::from_ref(b); + // SAFETY: We just checked that `T: Sized`. By invariant on `r`, + // `b`'s size is equal to `size_of::()`. + let ptr = unsafe { cast_for_sized::(ptr) }; + + // SAFETY: None of the preceding transformations modifies the + // address of the pointer, and by invariant on `r`, we know that it + // is validly-aligned. + let ptr = unsafe { ptr.assume_alignment::() }; + return ptr.as_ref(); + } + + // PANICS: By postcondition on `as_byte_slice`, `b`'s size and alignment + // are valid for `T`, and by invariant on `ByteSlice`, these are + // preserved through `.deref()`, so this `unwrap` will not panic. + let ptr = Ptr::from_ref(b) + .try_cast_into_no_leftover::(None) + .expect("zerocopy internal error: Deref::deref should be infallible"); + let ptr = ptr.recall_validity(); + ptr.as_ref() + } +} + +impl DerefMut for Ref +where + B: ByteSliceMut, + // FIXME(#251): We can't remove `Immutable` here because it's required by + // the impl of `Deref`, which is a super-trait of `DerefMut`. Maybe we can + // add a separate inherent method for this? + T: FromBytes + IntoBytes + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + // Presumably unreachable, since we've guarded each constructor of `Ref`. + static_assert_dst_is_not_zst!(T); + + // SAFETY: We don't call any methods on `b` other than those provided by + // `ByteSliceMut`. + let b = unsafe { self.as_byte_slice_mut() }; + let b = b.deref_mut(); + + if let crate::layout::SizeInfo::Sized { .. } = T::LAYOUT.size_info { + let ptr = Ptr::from_mut(b); + // SAFETY: We just checked that `T: Sized`. By invariant on `r`, + // `b`'s size is equal to `size_of::()`. + let ptr = unsafe { + cast_for_sized::< + T, + _, + (BecauseRead, BecauseExclusive), + (BecauseMutationCompatible, BecauseInvariantsEq), + >(ptr) + }; + + // SAFETY: None of the preceding transformations modifies the + // address of the pointer, and by invariant on `r`, we know that it + // is validly-aligned. + let ptr = unsafe { ptr.assume_alignment::() }; + return ptr.as_mut(); + } + + // PANICS: By postcondition on `as_byte_slice_mut`, `b`'s size and + // alignment are valid for `T`, and by invariant on `ByteSlice`, these + // are preserved through `.deref_mut()`, so this `unwrap` will not + // panic. + let ptr = Ptr::from_mut(b) + .try_cast_into_no_leftover::(None) + .expect("zerocopy internal error: DerefMut::deref_mut should be infallible"); + let ptr = ptr.recall_validity::<_, (_, (_, BecauseExclusive))>(); + ptr.as_mut() + } +} + +impl Display for Ref +where + B: ByteSlice, + T: FromBytes + Display + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &T = self; + inner.fmt(fmt) + } +} + +impl Debug for Ref +where + B: ByteSlice, + T: FromBytes + Debug + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &T = self; + fmt.debug_tuple("Ref").field(&inner).finish() + } +} + +impl Eq for Ref +where + B: ByteSlice, + T: FromBytes + Eq + KnownLayout + Immutable + ?Sized, +{ +} + +impl PartialEq for Ref +where + B: ByteSlice, + T: FromBytes + PartialEq + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.deref().eq(other.deref()) + } +} + +impl Ord for Ref +where + B: ByteSlice, + T: FromBytes + Ord + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + let inner: &T = self; + let other_inner: &T = other; + inner.cmp(other_inner) + } +} + +impl PartialOrd for Ref +where + B: ByteSlice, + T: FromBytes + PartialOrd + KnownLayout + Immutable + ?Sized, +{ + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + let inner: &T = self; + let other_inner: &T = other; + inner.partial_cmp(other_inner) + } +} + +/// # Safety +/// +/// `T: Sized` and `ptr`'s referent must have size `size_of::()`. +#[inline(always)] +unsafe fn cast_for_sized<'a, T, A, R, S>( + ptr: Ptr<'a, [u8], (A, Aligned, Valid)>, +) -> Ptr<'a, T, (A, Unaligned, Valid)> +where + T: FromBytes + KnownLayout + ?Sized, + A: crate::invariant::Aliasing, + [u8]: MutationCompatible, + T: TransmuteFromPtr, +{ + use crate::pointer::cast::{Cast, Project}; + + enum CastForSized {} + + // SAFETY: `CastForSized` is only used below with the input `ptr`, which the + // caller promises has size `size_of::()`. Thus, the referent produced in + // this cast has the same size as `ptr`'s referent. All operations preserve + // provenance. + unsafe impl Project<[u8], T> for CastForSized { + #[inline(always)] + fn project(src: PtrInner<'_, [u8]>) -> *mut T { + T::raw_from_ptr_len( + src.as_non_null().cast(), + ::from_elem_count(0), + ) + .as_ptr() + } + } + + // SAFETY: The `Project::project` impl preserves referent address. + unsafe impl Cast<[u8], T> for CastForSized {} + + ptr.recall_validity::() + .cast::<_, CastForSized, _>() + .recall_validity::() +} + +#[cfg(test)] +#[allow(clippy::assertions_on_result_states)] +mod tests { + use core::convert::TryInto as _; + + use super::*; + use crate::util::testutil::*; + + #[test] + fn test_mut_slice_into_ref() { + // Prior to #1260/#1299, calling `into_ref` on a `&mut [u8]`-backed + // `Ref` was not supported. + let mut buf = [0u8]; + let r = Ref::<&mut [u8], u8>::from_bytes(&mut buf).unwrap(); + assert_eq!(Ref::into_ref(r), &0); + } + + #[test] + fn test_address() { + // Test that the `Deref` and `DerefMut` implementations return a + // reference which points to the right region of memory. + + let buf = [0]; + let r = Ref::<_, u8>::from_bytes(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr: *const u8 = r.deref(); + assert_eq!(buf_ptr, deref_ptr); + + let buf = [0]; + let r = Ref::<_, [u8]>::from_bytes(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr = r.deref().as_ptr(); + assert_eq!(buf_ptr, deref_ptr); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations, that reads via `deref` and `read` + // behave the same, and that writes via `deref_mut` and `write` behave the + // same. + fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { + // assert that the value starts at 0 + assert_eq!(*r, AU64(0)); + assert_eq!(Ref::read(&r), AU64(0)); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + *r = VAL1; + assert_eq!(Ref::bytes(&r), &VAL1.to_bytes()); + *r = AU64(0); + Ref::write(&mut r, VAL1); + assert_eq!(Ref::bytes(&r), &VAL1.to_bytes()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` + Ref::bytes_mut(&mut r).copy_from_slice(&VAL2.to_bytes()[..]); + assert_eq!(*r, VAL2); + assert_eq!(Ref::read(&r), VAL2); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations; pass a value with `typed_len` `AU64`s + // backed by an array of `typed_len * 8` bytes. + fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { + // Assert that the value starts out zeroed. + assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); + + // Check the backing storage is the exact same slice. + let untyped_len = typed_len * 8; + assert_eq!(Ref::bytes(&r).len(), untyped_len); + assert_eq!(Ref::bytes(&r).as_ptr(), r.as_ptr().cast::()); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + for typed in &mut *r { + *typed = VAL1; + } + assert_eq!(Ref::bytes(&r), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 + Ref::bytes_mut(&mut r).copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); + assert!(r.iter().copied().all(|x| x == VAL2)); + } + + #[test] + fn test_new_aligned_sized() { + // Test that a properly-aligned, properly-sized buffer works for new, + // new_from_prefix, and new_from_suffix, and that new_from_prefix and + // new_from_suffix return empty slices. Test that a properly-aligned + // buffer whose length is a multiple of the element size works for + // new_slice. + + // A buffer with an alignment of 8. + let mut buf = Align::<[u8; 8], AU64>::default(); + // `buf.t` should be aligned to 8, so this should always succeed. + test_new_helper(Ref::<_, AU64>::from_bytes(&mut buf.t[..]).unwrap()); + { + // In a block so that `r` and `suffix` don't live too long. + buf.set_default(); + let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper(r); + } + { + buf.set_default(); + let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper(r); + } + + // A buffer with alignment 8 and length 24. We choose this length very + // intentionally: if we instead used length 16, then the prefix and + // suffix lengths would be identical. In the past, we used length 16, + // which resulted in this test failing to discover the bug uncovered in + // #506. + let mut buf = Align::<[u8; 24], AU64>::default(); + // `buf.t` should be aligned to 8 and have a length which is a multiple + // of `size_of::()`, so this should always succeed. + test_new_helper_slice(Ref::<_, [AU64]>::from_bytes(&mut buf.t[..]).unwrap(), 3); + buf.set_default(); + let r = Ref::<_, [AU64]>::from_bytes_with_elems(&mut buf.t[..], 3).unwrap(); + test_new_helper_slice(r, 3); + + let ascending: [u8; 24] = (0..24).collect::>().try_into().unwrap(); + // 16 ascending bytes followed by 8 zeros. + let mut ascending_prefix = ascending; + ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + // 8 zeros followed by 16 ascending bytes. + let mut ascending_suffix = ascending; + ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + { + buf.t = ascending_suffix; + let (r, suffix) = Ref::<_, [AU64]>::from_prefix_with_elems(&mut buf.t[..], 1).unwrap(); + assert_eq!(suffix, &ascending[8..]); + test_new_helper_slice(r, 1); + } + { + buf.t = ascending_prefix; + let (prefix, r) = Ref::<_, [AU64]>::from_suffix_with_elems(&mut buf.t[..], 1).unwrap(); + assert_eq!(prefix, &ascending[..16]); + test_new_helper_slice(r, 1); + } + } + + #[test] + fn test_new_oversized() { + // Test that a properly-aligned, overly-sized buffer works for + // `new_from_prefix` and `new_from_suffix`, and that they return the + // remainder and prefix of the slice respectively. + + let mut buf = Align::<[u8; 16], AU64>::default(); + { + // In a block so that `r` and `suffix` don't live too long. `buf.t` + // should be aligned to 8, so this should always succeed. + let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); + assert_eq!(suffix.len(), 8); + test_new_helper(r); + } + { + buf.set_default(); + // `buf.t` should be aligned to 8, so this should always succeed. + let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); + assert_eq!(prefix.len(), 8); + test_new_helper(r); + } + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_new_error() { + // Fail because the buffer is too large. + + // A buffer with an alignment of 8. + let buf = Align::<[u8; 16], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::from_bytes(&buf.t[..]).is_err()); + + // Fail because the buffer is too small. + + // A buffer with an alignment of 8. + let buf = Align::<[u8; 4], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::from_bytes(&buf.t[..]).is_err()); + assert!(Ref::<_, AU64>::from_prefix(&buf.t[..]).is_err()); + assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); + + // Fail because the length is not a multiple of the element size. + + let buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but element size is 8. + assert!(Ref::<_, [AU64]>::from_bytes(&buf.t[..]).is_err()); + + // Fail because the buffer is too short. + let buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but the element size is 8 (and we're expecting + // two of them). For each function, we test with a length that would + // cause the size to overflow `usize`, and with a normal length that + // will fail thanks to the buffer being too short; these are different + // error paths, and while the error types are the same, the distinction + // shows up in code coverage metrics. + let n = (usize::MAX / mem::size_of::()) + 1; + assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[..], n).is_err()); + assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[..], 2).is_err()); + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], n).is_err()); + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], 2).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], n).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], 2).is_err()); + + // Fail because the alignment is insufficient. + + // A buffer with an alignment of 8. An odd buffer size is chosen so that + // the last byte of the buffer has odd alignment. + let buf = Align::<[u8; 13], AU64>::default(); + // Slicing from 1, we get a buffer with size 12 (so the length check + // should succeed) but an alignment of only 1, which is insufficient. + assert!(Ref::<_, AU64>::from_bytes(&buf.t[1..]).is_err()); + assert!(Ref::<_, AU64>::from_prefix(&buf.t[1..]).is_err()); + assert!(Ref::<_, [AU64]>::from_bytes(&buf.t[1..]).is_err()); + assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[1..], 1).is_err()); + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[1..], 1).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[1..], 1).is_err()); + // Slicing is unnecessary here because `new_from_suffix` uses the suffix + // of the slice, which has odd alignment. + assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); + + // Fail due to arithmetic overflow. + + let buf = Align::<[u8; 16], AU64>::default(); + let unreasonable_len = usize::MAX / mem::size_of::() + 1; + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], unreasonable_len).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], unreasonable_len).is_err()); + } + + #[test] + #[allow(unstable_name_collisions)] + #[allow(clippy::as_conversions)] + fn test_into_ref_mut() { + #[allow(unused)] + use crate::util::AsAddress as _; + + let mut buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, u64>::from_bytes(&buf.t[..]).unwrap(); + let rf = Ref::into_ref(r); + assert_eq!(rf, &0u64); + let buf_addr = (&buf.t as *const [u8; 8]).addr(); + assert_eq!((rf as *const u64).addr(), buf_addr); + + let r = Ref::<_, u64>::from_bytes(&mut buf.t[..]).unwrap(); + let rf = Ref::into_mut(r); + assert_eq!(rf, &mut 0u64); + assert_eq!((rf as *mut u64).addr(), buf_addr); + + *rf = u64::MAX; + assert_eq!(buf.t, [0xFF; 8]); + } + + #[test] + fn test_display_debug() { + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, u64>::from_bytes(&buf.t[..]).unwrap(); + assert_eq!(format!("{}", r), "0"); + assert_eq!(format!("{:?}", r), "Ref(0)"); + + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, [u64]>::from_bytes(&buf.t[..]).unwrap(); + assert_eq!(format!("{:?}", r), "Ref([0])"); + } + + #[test] + fn test_eq() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); + let buf2 = 0_u64; + let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); + assert_eq!(r1, r2); + } + + #[test] + fn test_ne() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); + assert_ne!(r1, r2); + } + + #[test] + fn test_ord() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); + assert!(r1 < r2); + assert_eq!(PartialOrd::partial_cmp(&r1, &r2), Some(Ordering::Less)); + assert_eq!(Ord::cmp(&r1, &r2), Ordering::Less); + } +} + +#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))] +mod benches { + use test::{self, Bencher}; + + use super::*; + use crate::util::testutil::*; + + #[bench] + fn bench_from_bytes_sized(b: &mut Bencher) { + let buf = Align::<[u8; 8], AU64>::default(); + // `buf.t` should be aligned to 8, so this should always succeed. + let bytes = &buf.t[..]; + b.iter(|| test::black_box(Ref::<_, AU64>::from_bytes(test::black_box(bytes)).unwrap())); + } + + #[bench] + fn bench_into_ref_sized(b: &mut Bencher) { + let buf = Align::<[u8; 8], AU64>::default(); + let bytes = &buf.t[..]; + let r = Ref::<_, AU64>::from_bytes(bytes).unwrap(); + b.iter(|| test::black_box(Ref::into_ref(test::black_box(r)))); + } + + #[bench] + fn bench_into_mut_sized(b: &mut Bencher) { + let mut buf = Align::<[u8; 8], AU64>::default(); + let buf = &mut buf.t[..]; + let _ = Ref::<_, AU64>::from_bytes(&mut *buf).unwrap(); + b.iter(move || { + // SAFETY: The preceding `from_bytes` succeeded, and so we know that + // `buf` is validly-aligned and has the correct length. + let r = unsafe { Ref::<&mut [u8], AU64>::new_unchecked(&mut *buf) }; + test::black_box(Ref::into_mut(test::black_box(r))); + }); + } + + #[bench] + fn bench_deref_sized(b: &mut Bencher) { + let buf = Align::<[u8; 8], AU64>::default(); + let bytes = &buf.t[..]; + let r = Ref::<_, AU64>::from_bytes(bytes).unwrap(); + b.iter(|| { + let temp = test::black_box(r); + test::black_box(temp.deref()); + }); + } + + #[bench] + fn bench_deref_mut_sized(b: &mut Bencher) { + let mut buf = Align::<[u8; 8], AU64>::default(); + let buf = &mut buf.t[..]; + let _ = Ref::<_, AU64>::from_bytes(&mut *buf).unwrap(); + b.iter(|| { + // SAFETY: The preceding `from_bytes` succeeded, and so we know that + // `buf` is validly-aligned and has the correct length. + let r = unsafe { Ref::<&mut [u8], AU64>::new_unchecked(&mut *buf) }; + let mut temp = test::black_box(r); + test::black_box(temp.deref_mut()); + }); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/split_at.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/split_at.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ba4dd124d04eba89a977b38b5d1bad1c2ac1d3b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/split_at.rs @@ -0,0 +1,966 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use super::*; +use crate::pointer::invariant::{Aligned, Exclusive, Invariants, Shared, Valid}; + +/// Types that can be split in two. +/// +/// This trait generalizes Rust's existing support for splitting slices to +/// support slices and slice-based dynamically-sized types ("slice DSTs"). +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(SplitAt)]`][derive]; e.g.: +/// +/// ``` +/// # use zerocopy_derive::{SplitAt, KnownLayout}; +/// #[derive(SplitAt, KnownLayout)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ..., +/// # */ +/// // `SplitAt` types must have at least one field. +/// field: T, +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `SplitAt`. +/// +/// # Safety +/// +/// This trait does not convey any safety guarantees to code outside this crate. +/// +/// You must not rely on the `#[doc(hidden)]` internals of `SplitAt`. Future +/// releases of zerocopy may make backwards-breaking changes to these items, +/// including changes that only affect soundness, which may cause code which +/// uses those items to silently become unsound. +/// +#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::SplitAt")] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.SplitAt.html"), +)] +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented(note = "Consider adding `#[derive(SplitAt)]` to `{Self}`") +)] +// # Safety +// +// The trailing slice is well-aligned for its element type. `Self` is `[T]`, or +// a `repr(C)` or `repr(transparent)` slice DST. +pub unsafe trait SplitAt: KnownLayout { + /// The element type of the trailing slice. + type Elem; + + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Unsafely splits `self` in two. + /// + /// # Safety + /// + /// The caller promises that `l_len` is not greater than the length of + /// `self`'s trailing slice. + #[inline] + #[must_use] + unsafe fn split_at_unchecked(&self, l_len: usize) -> Split<&Self> { + // SAFETY: By precondition on the caller, `l_len <= self.len()`. + unsafe { Split::<&Self>::new(self, l_len) } + } + + /// Attempts to split `self` in two. + /// + /// Returns `None` if `l_len` is greater than the length of `self`'s + /// trailing slice. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: [u8], + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at(packet.length as usize).unwrap(); + /// + /// // Use the `Immutable` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_immutable(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// ``` + #[inline] + #[must_use = "has no side effects"] + fn split_at(&self, l_len: usize) -> Option> { + MetadataOf::new_in_bounds(self, l_len).map( + #[inline(always)] + |l_len| { + // SAFETY: We have ensured that `l_len <= self.len()` (by + // post-condition on `MetadataOf::new_in_bounds`) + unsafe { Split::new(self, l_len.get()) } + }, + ) + } + + /// Unsafely splits `self` in two. + /// + /// # Safety + /// + /// The caller promises that `l_len` is not greater than the length of + /// `self`'s trailing slice. + #[inline] + #[must_use] + unsafe fn split_at_mut_unchecked(&mut self, l_len: usize) -> Split<&mut Self> { + // SAFETY: By precondition on the caller, `l_len <= self.len()`. + unsafe { Split::<&mut Self>::new(self, l_len) } + } + + /// Attempts to split `self` in two. + /// + /// Returns `None` if `l_len` is greater than the length of `self`'s + /// trailing slice, or if the given `l_len` would result in [the trailing + /// padding](KnownLayout#slice-dst-layout) of the left portion overlapping + /// the right portion. + /// + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: B, + /// } + /// + /// // These bytes encode a `Packet`. + /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// { + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at_mut(packet.length as usize).unwrap(); + /// + /// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_into_bytes(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// + /// rest.fill(0); + /// } + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]); + /// ``` + #[inline] + fn split_at_mut(&mut self, l_len: usize) -> Option> { + MetadataOf::new_in_bounds(self, l_len).map( + #[inline(always)] + |l_len| { + // SAFETY: We have ensured that `l_len <= self.len()` (by + // post-condition on `MetadataOf::new_in_bounds`) + unsafe { Split::new(self, l_len.get()) } + }, + ) + } +} + +// SAFETY: `[T]`'s trailing slice is `[T]`, which is trivially aligned. +unsafe impl SplitAt for [T] { + type Elem = T; + + #[inline] + #[allow(dead_code)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } +} + +/// A `T` that has been split into two possibly-overlapping parts. +/// +/// For some dynamically sized types, the padding that appears after the +/// trailing slice field [is a dynamic function of the trailing slice +/// length](KnownLayout#slice-dst-layout). If `T` is split at a length that +/// requires trailing padding, the trailing padding of the left part of the +/// split `T` will overlap the right part. If `T` is a mutable reference or +/// permits interior mutation, you must ensure that the left and right parts do +/// not overlap. You can do this at zero-cost using using +/// [`Self::via_immutable`], [`Self::via_into_bytes`], or +/// [`Self::via_unaligned`], or with a dynamic check by using +/// [`Self::via_runtime_check`]. +#[derive(Debug)] +pub struct Split { + /// A pointer to the source slice DST. + source: T, + /// The length of the future left half of `source`. + /// + /// # Safety + /// + /// If `source` is a pointer to a slice DST, `l_len` is no greater than + /// `source`'s length. + l_len: usize, +} + +impl Split { + /// Produces a `Split` of `source` with `l_len`. + /// + /// # Safety + /// + /// `l_len` is no greater than `source`'s length. + #[inline(always)] + unsafe fn new(source: T, l_len: usize) -> Self { + Self { source, l_len } + } +} + +impl<'a, T> Split<&'a T> +where + T: ?Sized + SplitAt, +{ + #[inline(always)] + fn into_ptr(self) -> Split> { + let source = Ptr::from_ref(self.source); + // SAFETY: `Ptr::from_ref(self.source)` points to exactly `self.source` + // and thus maintains the invariants of `self` with respect to `l_len`. + unsafe { Split::new(source, self.l_len) } + } + + /// Produces the split parts of `self`, using [`Immutable`] to ensure that + /// it is sound to have concurrent references to both parts. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: [u8], + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at(packet.length as usize).unwrap(); + /// + /// // Use the `Immutable` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_immutable(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_immutable(self) -> (&'a T, &'a [T::Elem]) + where + T: Immutable, + { + let (l, r) = self.into_ptr().via_immutable(); + (l.as_ref(), r.as_ref()) + } + + /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that + /// it is sound to have concurrent references to both parts. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, IntoBytes)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: B, + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::<[u8]>::ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at(packet.length as usize).unwrap(); + /// + /// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_into_bytes(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_into_bytes(self) -> (&'a T, &'a [T::Elem]) + where + T: IntoBytes, + { + let (l, r) = self.into_ptr().via_into_bytes(); + (l.as_ref(), r.as_ref()) + } + + /// Produces the split parts of `self`, using [`Unaligned`] to ensure that + /// it is sound to have concurrent references to both parts. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Unaligned)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: [u8], + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::ref_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at(packet.length as usize).unwrap(); + /// + /// // Use the `Unaligned` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_unaligned(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_unaligned(self) -> (&'a T, &'a [T::Elem]) + where + T: Unaligned, + { + let (l, r) = self.into_ptr().via_unaligned(); + (l.as_ref(), r.as_ref()) + } + + /// Produces the split parts of `self`, using a dynamic check to ensure that + /// it is sound to have concurrent references to both parts. You should + /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or + /// [`Self::via_unaligned`], which have no runtime cost. + /// + /// Note that this check is overly conservative if `T` is [`Immutable`]; for + /// some types, this check will reject some splits which + /// [`Self::via_immutable`] will accept. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes, IntoBytes, network_endian::U16}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Debug)] + /// #[repr(C, align(2))] + /// struct Packet { + /// length: U16, + /// body: [u8], + /// } + /// + /// // These bytes encode a `Packet`. + /// let bytes = [ + /// 4u16.to_be(), + /// 1u16.to_be(), + /// 2u16.to_be(), + /// 3u16.to_be(), + /// 4u16.to_be() + /// ]; + /// + /// let packet = Packet::ref_from_bytes(bytes.as_bytes()).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [0, 1, 0, 2, 0, 3, 0, 4]); + /// + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at(packet.length.into()).unwrap(); + /// + /// // Use a dynamic check to prove that it's okay to return concurrent + /// // references to `packet` and `rest`. + /// let (packet, rest) = split.via_runtime_check().unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [0, 1, 0, 2]); + /// assert_eq!(rest, [0, 3, 0, 4]); + /// + /// // Attempt to split `packet` at `length - 1`. + /// let idx = packet.length.get() - 1; + /// let split = packet.split_at(idx as usize).unwrap(); + /// + /// // Attempt (and fail) to use a dynamic check to prove that it's okay + /// // to return concurrent references to `packet` and `rest`. Note that + /// // this is a case of `via_runtime_check` being overly conservative. + /// // Although the left and right parts indeed overlap, the `Immutable` + /// // bound ensures that concurrently referencing these overlapping + /// // parts is sound. + /// assert!(split.via_runtime_check().is_err()); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_runtime_check(self) -> Result<(&'a T, &'a [T::Elem]), Self> { + match self.into_ptr().via_runtime_check() { + Ok((l, r)) => Ok((l.as_ref(), r.as_ref())), + Err(s) => Err(s.into_ref()), + } + } + + /// Unsafely produces the split parts of `self`. + /// + /// # Safety + /// + /// If `T` permits interior mutation, the trailing padding bytes of the left + /// portion must not overlap the right portion. For some dynamically sized + /// types, the padding that appears after the trailing slice field [is a + /// dynamic function of the trailing slice + /// length](KnownLayout#slice-dst-layout). Thus, for some types, this + /// condition is dependent on the length of the left portion. + #[must_use = "has no side effects"] + #[inline(always)] + pub unsafe fn via_unchecked(self) -> (&'a T, &'a [T::Elem]) { + // SAFETY: The aliasing of `self.into_ptr()` is not `Exclusive`, but the + // caller has promised that if `T` permits interior mutation then the + // left and right portions of `self` split at `l_len` do not overlap. + let (l, r) = unsafe { self.into_ptr().via_unchecked() }; + (l.as_ref(), r.as_ref()) + } +} + +impl<'a, T> Split<&'a mut T> +where + T: ?Sized + SplitAt, +{ + #[inline(always)] + fn into_ptr(self) -> Split> { + let source = Ptr::from_mut(self.source); + // SAFETY: `Ptr::from_mut(self.source)` points to exactly `self.source`, + // and thus maintains the invariants of `self` with respect to `l_len`. + unsafe { Split::new(source, self.l_len) } + } + + /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that + /// it is sound to have concurrent references to both parts. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: B, + /// } + /// + /// // These bytes encode a `Packet`. + /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// { + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at_mut(packet.length as usize).unwrap(); + /// + /// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_into_bytes(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// + /// rest.fill(0); + /// } + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_into_bytes(self) -> (&'a mut T, &'a mut [T::Elem]) + where + T: IntoBytes, + { + let (l, r) = self.into_ptr().via_into_bytes(); + (l.as_mut(), r.as_mut()) + } + + /// Produces the split parts of `self`, using [`Unaligned`] to ensure that + /// it is sound to have concurrent references to both parts. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Unaligned)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: B, + /// } + /// + /// // These bytes encode a `Packet`. + /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// { + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at_mut(packet.length as usize).unwrap(); + /// + /// // Use the `Unaligned` bound on `Packet` to prove that it's okay to + /// // return concurrent references to `packet` and `rest`. + /// let (packet, rest) = split.via_unaligned(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// + /// rest.fill(0); + /// } + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_unaligned(self) -> (&'a mut T, &'a mut [T::Elem]) + where + T: Unaligned, + { + let (l, r) = self.into_ptr().via_unaligned(); + (l.as_mut(), r.as_mut()) + } + + /// Produces the split parts of `self`, using a dynamic check to ensure that + /// it is sound to have concurrent references to both parts. You should + /// prefer using [`Self::via_into_bytes`] or [`Self::via_unaligned`], which + /// have no runtime cost. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::{SplitAt, FromBytes}; + /// # use zerocopy_derive::*; + /// + /// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Debug)] + /// #[repr(C)] + /// struct Packet { + /// length: u8, + /// body: B, + /// } + /// + /// // These bytes encode a `Packet`. + /// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// + /// { + /// // Attempt to split `packet` at `length`. + /// let split = packet.split_at_mut(packet.length as usize).unwrap(); + /// + /// // Use a dynamic check to prove that it's okay to return concurrent + /// // references to `packet` and `rest`. + /// let (packet, rest) = split.via_runtime_check().unwrap(); + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4]); + /// assert_eq!(rest, [5, 6, 7, 8, 9]); + /// + /// rest.fill(0); + /// } + /// + /// assert_eq!(packet.length, 4); + /// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]); + /// ``` + #[must_use = "has no side effects"] + #[inline(always)] + pub fn via_runtime_check(self) -> Result<(&'a mut T, &'a mut [T::Elem]), Self> { + match self.into_ptr().via_runtime_check() { + Ok((l, r)) => Ok((l.as_mut(), r.as_mut())), + Err(s) => Err(s.into_mut()), + } + } + + /// Unsafely produces the split parts of `self`. + /// + /// # Safety + /// + /// The trailing padding bytes of the left portion must not overlap the + /// right portion. For some dynamically sized types, the padding that + /// appears after the trailing slice field [is a dynamic function of the + /// trailing slice length](KnownLayout#slice-dst-layout). Thus, for some + /// types, this condition is dependent on the length of the left portion. + #[must_use = "has no side effects"] + #[inline(always)] + pub unsafe fn via_unchecked(self) -> (&'a mut T, &'a mut [T::Elem]) { + // SAFETY: The aliasing of `self.into_ptr()` is `Exclusive`, and the + // caller has promised that the left and right portions of `self` split + // at `l_len` do not overlap. + let (l, r) = unsafe { self.into_ptr().via_unchecked() }; + (l.as_mut(), r.as_mut()) + } +} + +impl<'a, T, I> Split> +where + T: ?Sized + SplitAt, + I: Invariants, +{ + fn into_ref(self) -> Split<&'a T> + where + I: Invariants, + { + // SAFETY: `self.source.as_ref()` points to exactly the same referent as + // `self.source` and thus maintains the invariants of `self` with + // respect to `l_len`. + unsafe { Split::new(self.source.as_ref(), self.l_len) } + } + + fn into_mut(self) -> Split<&'a mut T> + where + I: Invariants, + { + // SAFETY: `self.source.as_mut()` points to exactly the same referent as + // `self.source` and thus maintains the invariants of `self` with + // respect to `l_len`. + unsafe { Split::new(self.source.unify_invariants().as_mut(), self.l_len) } + } + + /// Produces the length of `self`'s left part. + #[inline(always)] + fn l_len(&self) -> MetadataOf { + // SAFETY: By invariant on `Split`, `self.l_len` is not greater than the + // length of `self.source`. + unsafe { MetadataOf::::new_unchecked(self.l_len) } + } + + /// Produces the split parts of `self`, using [`Immutable`] to ensure that + /// it is sound to have concurrent references to both parts. + #[inline(always)] + fn via_immutable(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) + where + T: Immutable, + I: Invariants, + { + // SAFETY: `Aliasing = Shared` and `T: Immutable`. + unsafe { self.via_unchecked() } + } + + /// Produces the split parts of `self`, using [`IntoBytes`] to ensure that + /// it is sound to have concurrent references to both parts. + #[inline(always)] + fn via_into_bytes(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) + where + T: IntoBytes, + { + // SAFETY: By `T: IntoBytes`, `T` has no padding for any length. + // Consequently, `T` can be split into non-overlapping parts at any + // index. + unsafe { self.via_unchecked() } + } + + /// Produces the split parts of `self`, using [`Unaligned`] to ensure that + /// it is sound to have concurrent references to both parts. + #[inline(always)] + fn via_unaligned(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) + where + T: Unaligned, + { + // SAFETY: By `T: SplitAt + Unaligned`, `T` is either a slice or a + // `repr(C)` or `repr(transparent)` slice DST that is well-aligned at + // any address and length. If `T` is a slice DST with alignment 1, + // `repr(C)` or `repr(transparent)` ensures that no padding is placed + // after the final element of the trailing slice. Consequently, `T` can + // be split into strictly non-overlapping parts any any index. + unsafe { self.via_unchecked() } + } + + /// Produces the split parts of `self`, using a dynamic check to ensure that + /// it is sound to have concurrent references to both parts. You should + /// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or + /// [`Self::via_unaligned`], which have no runtime cost. + #[inline(always)] + fn via_runtime_check(self) -> Result<(Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>), Self> { + let l_len = self.l_len(); + // FIXME(#1290): Once we require `KnownLayout` on all fields, add an + // `IS_IMMUTABLE` associated const, and add `T::IS_IMMUTABLE ||` to the + // below check. + if l_len.padding_needed_for() == 0 { + // SAFETY: By `T: SplitAt`, `T` is either `[T]`, or a `repr(C)` or + // `repr(transparent)` slice DST, for which the trailing padding + // needed to accommodate `l_len` trailing elements is + // `l_len.padding_needed_for()`. If no trailing padding is required, + // the left and right parts are strictly non-overlapping. + Ok(unsafe { self.via_unchecked() }) + } else { + Err(self) + } + } + + /// Unsafely produces the split parts of `self`. + /// + /// # Safety + /// + /// The caller promises that if `I::Aliasing` is [`Exclusive`] or `T` + /// permits interior mutation, then `l_len.padding_needed_for() == 0`. + #[inline(always)] + unsafe fn via_unchecked(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) { + let l_len = self.l_len(); + let inner = self.source.as_inner(); + + // SAFETY: By invariant on `Self::l_len`, `l_len` is not greater than + // the length of `inner`'s trailing slice. + let (left, right) = unsafe { inner.split_at_unchecked(l_len) }; + + // Lemma 0: `left` and `right` conform to the aliasing invariant + // `I::Aliasing`. Proof: If `I::Aliasing` is `Exclusive` or `T` permits + // interior mutation, the caller promises that `l_len.padding_needed_for() + // == 0`. Consequently, by post-condition on `PtrInner::split_at_unchecked`, + // there is no trailing padding after `left`'s final element that would + // overlap into `right`. If `I::Aliasing` is shared and `T` forbids interior + // mutation, then overlap between their referents is permissible. + + // SAFETY: + // 0. `left` conforms to the aliasing invariant of `I::Aliasing`, by Lemma 0. + // 1. `left` conforms to the alignment invariant of `I::Alignment, because + // the referents of `left` and `Self` have the same address and type + // (and, thus, alignment requirement). + // 2. `left` conforms to the validity invariant of `I::Validity`, neither + // the type nor bytes of `left`'s referent have been changed. + let left = unsafe { Ptr::from_inner(left) }; + + // SAFETY: + // 0. `right` conforms to the aliasing invariant of `I::Aliasing`, by Lemma + // 0. + // 1. `right` conforms to the alignment invariant of `I::Alignment, because + // if `ptr` with `I::Alignment = Aligned`, then by invariant on `T: + // SplitAt`, the trailing slice of `ptr` (from which `right` is derived) + // will also be well-aligned. + // 2. `right` conforms to the validity invariant of `I::Validity`, + // because `right: [T::Elem]` is derived from the trailing slice of + // `ptr`, which, by contract on `T: SplitAt::Elem`, has type + // `[T::Elem]`. The `left` part cannot be used to invalidate `right`, + // because the caller promises that if `I::Aliasing` is `Exclusive` + // or `T` permits interior mutation, then `l_len.padding_needed_for() + // == 0` and thus the parts will be non-overlapping. + let right = unsafe { Ptr::from_inner(right) }; + + (left, right) + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "derive")] + #[test] + fn test_split_at() { + use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt}; + + #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)] + #[repr(C)] + struct SliceDst { + prefix: [u8; OFFSET], + trailing: [u8], + } + + #[allow(clippy::as_conversions)] + fn test_split_at() { + // Test `split_at` + let n: usize = BUFFER_SIZE - OFFSET; + let arr = [1; BUFFER_SIZE]; + let dst = SliceDst::::ref_from_bytes(&arr[..]).unwrap(); + for i in 0..=n { + let (l, r) = dst.split_at(i).unwrap().via_runtime_check().unwrap(); + let l_sum: u8 = l.trailing.iter().sum(); + let r_sum: u8 = r.iter().sum(); + assert_eq!(l_sum, i as u8); + assert_eq!(r_sum, (n - i) as u8); + assert_eq!(l_sum + r_sum, n as u8); + } + + // Test `split_at_mut` + let n: usize = BUFFER_SIZE - OFFSET; + let mut arr = [1; BUFFER_SIZE]; + let dst = SliceDst::::mut_from_bytes(&mut arr[..]).unwrap(); + for i in 0..=n { + let (l, r) = dst.split_at_mut(i).unwrap().via_runtime_check().unwrap(); + let l_sum: u8 = l.trailing.iter().sum(); + let r_sum: u8 = r.iter().sum(); + assert_eq!(l_sum, i as u8); + assert_eq!(r_sum, (n - i) as u8); + assert_eq!(l_sum + r_sum, n as u8); + } + } + + test_split_at::<0, 16>(); + test_split_at::<1, 17>(); + test_split_at::<2, 18>(); + } + + #[cfg(feature = "derive")] + #[test] + #[allow(clippy::as_conversions)] + fn test_split_at_overlapping() { + use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt}; + + #[derive(FromBytes, KnownLayout, SplitAt, Immutable)] + #[repr(C, align(2))] + struct SliceDst { + prefix: u8, + trailing: [u8], + } + + const N: usize = 16; + + let arr = [1u16; N]; + let dst = SliceDst::ref_from_bytes(arr.as_bytes()).unwrap(); + + for i in 0..N { + let split = dst.split_at(i).unwrap().via_runtime_check(); + if i % 2 == 1 { + assert!(split.is_ok()); + } else { + assert!(split.is_err()); + } + } + } + #[test] + fn test_split_at_unchecked() { + use crate::SplitAt; + let mut arr = [1, 2, 3, 4]; + let slice = &arr[..]; + // SAFETY: 2 <= arr.len() (4) + let split = unsafe { SplitAt::split_at_unchecked(slice, 2) }; + // SAFETY: SplitAt::split_at_unchecked guarantees that the split is valid. + let (l, r) = unsafe { split.via_unchecked() }; + assert_eq!(l, &[1, 2]); + assert_eq!(r, &[3, 4]); + + let slice_mut = &mut arr[..]; + // SAFETY: 2 <= arr.len() (4) + let split = unsafe { SplitAt::split_at_mut_unchecked(slice_mut, 2) }; + // SAFETY: SplitAt::split_at_mut_unchecked guarantees that the split is valid. + let (l, r) = unsafe { split.via_unchecked() }; + assert_eq!(l, &mut [1, 2]); + assert_eq!(r, &mut [3, 4]); + } + + #[test] + fn test_split_at_via_methods() { + use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt}; + #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)] + #[repr(C)] + struct Packet { + length: u8, + body: [u8], + } + + let arr = [1, 2, 3, 4]; + let packet = Packet::ref_from_bytes(&arr[..]).unwrap(); + + let split1 = packet.split_at(2).unwrap(); + let (l, r) = split1.via_immutable(); + assert_eq!(l.length, 1); + assert_eq!(r, &[4]); + + let split2 = packet.split_at(2).unwrap(); + let (l, r) = split2.via_into_bytes(); + assert_eq!(l.length, 1); + assert_eq!(r, &[4]); + } + #[test] + fn test_split_at_via_unaligned() { + use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt, Unaligned}; + #[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Unaligned)] + #[repr(C)] + struct Packet { + length: u8, + body: [u8], + } + + let arr = [1, 2, 3, 4]; + let packet = Packet::ref_from_bytes(&arr[..]).unwrap(); + + let split = packet.split_at(2).unwrap(); + let (l, r) = split.via_unaligned(); + assert_eq!(l.length, 1); + assert_eq!(r, &[4]); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macro_util.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macro_util.rs new file mode 100644 index 0000000000000000000000000000000000000000..f105a95d0ec7baf04f9758fbfd10ba4e50e4c17b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macro_util.rs @@ -0,0 +1,1287 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Utilities used by macros and by `zerocopy-derive`. +//! +//! These are defined here `zerocopy` rather than in code generated by macros or +//! by `zerocopy-derive` so that they can be compiled once rather than +//! recompiled for every invocation (e.g., if they were defined in generated +//! code, then deriving `IntoBytes` and `FromBytes` on three different types +//! would result in the code in question being emitted and compiled six +//! different times). + +#![allow(missing_debug_implementations)] + +// FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): Remove +// this `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[cfg(not(target_pointer_width = "16"))] +use core::ptr::{self, NonNull}; +use core::{marker::PhantomData, mem, num::Wrapping}; + +use crate::{ + pointer::{ + cast::CastSized, + invariant::{Aligned, Initialized, Valid}, + BecauseImmutable, + }, + FromBytes, Immutable, IntoBytes, KnownLayout, Ptr, ReadOnly, TryFromBytes, ValidityError, +}; + +/// Projects the type of the field at `Index` in `Self` without regard for field +/// privacy. +/// +/// The `Index` parameter is any sort of handle that identifies the field; its +/// definition is the obligation of the implementer. +/// +/// # Safety +/// +/// Unsafe code may assume that this accurately reflects the definition of +/// `Self`. +pub unsafe trait Field { + /// The type of the field at `Index`. + type Type: ?Sized; +} + +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented( + message = "`{T}` has {PADDING_BYTES} total byte(s) of padding", + label = "types with padding cannot implement `IntoBytes`", + note = "consider using `zerocopy::Unalign` to lower the alignment of individual fields", + note = "consider adding explicit fields where padding would be", + note = "consider using `#[repr(packed)]` to remove padding" + ) +)] +pub trait PaddingFree {} +impl PaddingFree for () {} + +// FIXME(#1112): In the slice DST case, we should delegate to *both* +// `PaddingFree` *and* `DynamicPaddingFree` (and probably rename `PaddingFree` +// to `StaticPaddingFree` or something - or introduce a third trait with that +// name) so that we can have more clear error messages. + +#[cfg_attr( + not(no_zerocopy_diagnostic_on_unimplemented_1_78_0), + diagnostic::on_unimplemented( + message = "`{T}` has one or more padding bytes", + label = "types with padding cannot implement `IntoBytes`", + note = "consider using `zerocopy::Unalign` to lower the alignment of individual fields", + note = "consider adding explicit fields where padding would be", + note = "consider using `#[repr(packed)]` to remove padding" + ) +)] +pub trait DynamicPaddingFree {} +impl DynamicPaddingFree for () {} + +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[cfg(not(target_pointer_width = "16"))] +const _64K: usize = 1 << 16; + +// FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): Remove +// this `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[cfg(not(target_pointer_width = "16"))] +#[repr(C, align(65536))] +struct Aligned64kAllocation([u8; _64K]); + +/// A pointer to an aligned allocation of size 2^16. +/// +/// # Safety +/// +/// `ALIGNED_64K_ALLOCATION` is guaranteed to point to the entirety of an +/// allocation with size and alignment 2^16, and to have valid provenance. +// FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): Remove +// this `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[cfg(not(target_pointer_width = "16"))] +pub const ALIGNED_64K_ALLOCATION: NonNull<[u8]> = { + const REF: &Aligned64kAllocation = &Aligned64kAllocation([0; _64K]); + let ptr: *const Aligned64kAllocation = REF; + let ptr: *const [u8] = ptr::slice_from_raw_parts(ptr.cast(), _64K); + // SAFETY: + // - `ptr` is derived from a Rust reference, which is guaranteed to be + // non-null. + // - `ptr` is derived from an `&Aligned64kAllocation`, which has size and + // alignment `_64K` as promised. Its length is initialized to `_64K`, + // which means that it refers to the entire allocation. + // - `ptr` is derived from a Rust reference, which is guaranteed to have + // valid provenance. + // + // FIXME(#429): Once `NonNull::new_unchecked` docs document that it + // preserves provenance, cite those docs. + // FIXME: Replace this `as` with `ptr.cast_mut()` once our MSRV >= 1.65 + #[allow(clippy::as_conversions)] + unsafe { + NonNull::new_unchecked(ptr as *mut _) + } +}; + +/// Computes the offset of the base of the field `$trailing_field_name` within +/// the type `$ty`. +/// +/// `trailing_field_offset!` produces code which is valid in a `const` context. +// FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): Remove +// this `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! trailing_field_offset { + ($ty:ty, $trailing_field_name:tt) => {{ + let min_size = { + let zero_elems: *const [()] = + $crate::util::macro_util::core_reexport::ptr::slice_from_raw_parts( + $crate::util::macro_util::core_reexport::ptr::NonNull::<()>::dangling() + .as_ptr() + .cast_const(), + 0, + ); + // SAFETY: + // - If `$ty` is `Sized`, `size_of_val_raw` is always safe to call. + // - Otherwise: + // - If `$ty` is not a slice DST, this pointer conversion will + // fail due to "mismatched vtable kinds", and compilation will + // fail. + // - If `$ty` is a slice DST, we have constructed `zero_elems` to + // have zero trailing slice elements. Per the `size_of_val_raw` + // docs, "For the special case where the dynamic tail length is + // 0, this function is safe to call." [1] + // + // [1] https://doc.rust-lang.org/nightly/std/mem/fn.size_of_val_raw.html + unsafe { + #[allow(clippy::as_conversions)] + $crate::util::macro_util::core_reexport::mem::size_of_val_raw( + zero_elems as *const $ty, + ) + } + }; + + assert!(min_size <= _64K); + + #[allow(clippy::as_conversions)] + let ptr = ALIGNED_64K_ALLOCATION.as_ptr() as *const $ty; + + // SAFETY: + // - Thanks to the preceding `assert!`, we know that the value with zero + // elements fits in `_64K` bytes, and thus in the allocation addressed + // by `ALIGNED_64K_ALLOCATION`. The offset of the trailing field is + // guaranteed to be no larger than this size, so this field projection + // is guaranteed to remain in-bounds of its allocation. + // - Because the minimum size is no larger than `_64K` bytes, and + // because an object's size must always be a multiple of its alignment + // [1], we know that `$ty`'s alignment is no larger than `_64K`. The + // allocation addressed by `ALIGNED_64K_ALLOCATION` is guaranteed to + // be aligned to `_64K`, so `ptr` is guaranteed to satisfy `$ty`'s + // alignment. + // - As required by `addr_of!`, we do not write through `field`. + // + // Note that, as of [2], this requirement is technically unnecessary + // for Rust versions >= 1.75.0, but no harm in guaranteeing it anyway + // until we bump our MSRV. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html: + // + // The size of a value is always a multiple of its alignment. + // + // [2] https://github.com/rust-lang/reference/pull/1387 + let field = unsafe { + $crate::util::macro_util::core_reexport::ptr::addr_of!((*ptr).$trailing_field_name) + }; + // SAFETY: + // - Both `ptr` and `field` are derived from the same allocated object. + // - By the preceding safety comment, `field` is in bounds of that + // allocated object. + // - The distance, in bytes, between `ptr` and `field` is required to be + // a multiple of the size of `u8`, which is trivially true because + // `u8`'s size is 1. + // - The distance, in bytes, cannot overflow `isize`. This is guaranteed + // because no allocated object can have a size larger than can fit in + // `isize`. [1] + // - The distance being in-bounds cannot rely on wrapping around the + // address space. This is guaranteed because the same is guaranteed of + // allocated objects. [1] + // + // [1] FIXME(#429), FIXME(https://github.com/rust-lang/rust/pull/116675): + // Once these are guaranteed in the Reference, cite it. + let offset = unsafe { field.cast::().offset_from(ptr.cast::()) }; + // Guaranteed not to be lossy: `field` comes after `ptr`, so the offset + // from `ptr` to `field` is guaranteed to be positive. + assert!(offset >= 0); + Some( + #[allow(clippy::as_conversions)] + { + offset as usize + }, + ) + }}; +} + +/// Computes alignment of `$ty: ?Sized`. +/// +/// `align_of!` produces code which is valid in a `const` context. +// FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): Remove +// this `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! align_of { + ($ty:ty) => {{ + // SAFETY: `OffsetOfTrailingIsAlignment` is `repr(C)`, and its layout is + // guaranteed [1] to begin with the single-byte layout for `_byte`, + // followed by the padding needed to align `_trailing`, then the layout + // for `_trailing`, and finally any trailing padding bytes needed to + // correctly-align the entire struct. + // + // This macro computes the alignment of `$ty` by counting the number of + // bytes preceding `_trailing`. For instance, if the alignment of `$ty` + // is `1`, then no padding is required align `_trailing` and it will be + // located immediately after `_byte` at offset 1. If the alignment of + // `$ty` is 2, then a single padding byte is required before + // `_trailing`, and `_trailing` will be located at offset 2. + + // This correspondence between offset and alignment holds for all valid + // Rust alignments, and we confirm this exhaustively (or, at least up to + // the maximum alignment supported by `trailing_field_offset!`) in + // `test_align_of_dst`. + // + // [1]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprc + + #[repr(C)] + struct OffsetOfTrailingIsAlignment { + _byte: u8, + _trailing: $ty, + } + + trailing_field_offset!(OffsetOfTrailingIsAlignment, _trailing) + }}; +} + +mod size_to_tag { + pub trait SizeToTag { + type Tag; + } + + impl SizeToTag<1> for () { + type Tag = u8; + } + impl SizeToTag<2> for () { + type Tag = u16; + } + impl SizeToTag<4> for () { + type Tag = u32; + } + impl SizeToTag<8> for () { + type Tag = u64; + } + impl SizeToTag<16> for () { + type Tag = u128; + } +} + +/// An alias for the unsigned integer of the given size in bytes. +#[doc(hidden)] +pub type SizeToTag = <() as size_to_tag::SizeToTag>::Tag; + +// We put `Sized` in its own module so it can have the same name as the standard +// library `Sized` without shadowing it in the parent module. +#[cfg(not(no_zerocopy_diagnostic_on_unimplemented_1_78_0))] +mod __size_of { + #[diagnostic::on_unimplemented( + message = "`{Self}` is unsized", + label = "`IntoBytes` needs all field types to be `Sized` in order to determine whether there is padding", + note = "consider using `#[repr(packed)]` to remove padding", + note = "`IntoBytes` does not require the fields of `#[repr(packed)]` types to be `Sized`" + )] + pub trait Sized: core::marker::Sized {} + impl Sized for T {} + + #[inline(always)] + #[must_use] + #[allow(clippy::needless_maybe_sized)] + pub const fn size_of() -> usize { + core::mem::size_of::() + } +} + +#[cfg(no_zerocopy_diagnostic_on_unimplemented_1_78_0)] +pub use core::mem::size_of; + +#[cfg(not(no_zerocopy_diagnostic_on_unimplemented_1_78_0))] +pub use __size_of::size_of; + +/// How many padding bytes does the struct type `$t` have? +/// +/// `$ts` is the list of the type of every field in `$t`. `$t` must be a struct +/// type, or else `struct_padding!`'s result may be meaningless. +/// +/// Note that `struct_padding!`'s results are independent of `repcr` since they +/// only consider the size of the type and the sizes of the fields. Whatever the +/// repr, the size of the type already takes into account any padding that the +/// compiler has decided to add. Structs with well-defined representations (such +/// as `repr(C)`) can use this macro to check for padding. Note that while this +/// may yield some consistent value for some `repr(Rust)` structs, it is not +/// guaranteed across platforms or compilations. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! struct_padding { + ($t:ty, [$($ts:ty),*]) => { + $crate::util::macro_util::size_of::<$t>() - (0 $(+ $crate::util::macro_util::size_of::<$ts>())*) + }; +} + +/// Does the `repr(C)` struct type `$t` have padding? +/// +/// `$ts` is the list of the type of every field in `$t`. `$t` must be a +/// `repr(C)` struct type, or else `struct_has_padding!`'s result may be +/// meaningless. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! repr_c_struct_has_padding { + ($t:ty, [$($ts:tt),*]) => {{ + let layout = $crate::DstLayout::for_repr_c_struct( + $crate::util::macro_util::core_reexport::option::Option::None, + $crate::util::macro_util::core_reexport::option::Option::None, + &[$($crate::repr_c_struct_has_padding!(@field $ts),)*] + ); + layout.requires_static_padding() || layout.requires_dynamic_padding() + }}; + (@field ([$t:ty])) => { + <[$t] as $crate::KnownLayout>::LAYOUT + }; + (@field ($t:ty)) => { + $crate::DstLayout::for_unpadded_type::<$t>() + }; + (@field [$t:ty]) => { + <[$t] as $crate::KnownLayout>::LAYOUT + }; + (@field $t:ty) => { + $crate::DstLayout::for_unpadded_type::<$t>() + }; +} + +/// Does the union type `$t` have padding? +/// +/// `$ts` is the list of the type of every field in `$t`. `$t` must be a union +/// type, or else `union_padding!`'s result may be meaningless. +/// +/// Note that `union_padding!`'s results are independent of `repr` since they +/// only consider the size of the type and the sizes of the fields. Whatever the +/// repr, the size of the type already takes into account any padding that the +/// compiler has decided to add. Unions with well-defined representations (such +/// as `repr(C)`) can use this macro to check for padding. Note that while this +/// may yield some consistent value for some `repr(Rust)` unions, it is not +/// guaranteed across platforms or compilations. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! union_padding { + ($t:ty, [$($ts:ty),*]) => {{ + let mut max = 0; + $({ + let padding = $crate::util::macro_util::size_of::<$t>() - $crate::util::macro_util::size_of::<$ts>(); + if padding > max { + max = padding; + } + })* + max + }}; +} + +/// How many padding bytes does the enum type `$t` have? +/// +/// `$disc` is the type of the enum tag, and `$ts` is a list of fields in each +/// square-bracket-delimited variant. `$t` must be an enum, or else +/// `enum_padding!`'s result may be meaningless. An enum has padding if any of +/// its variant structs [1][2] contain padding, and so all of the variants of an +/// enum must be "full" in order for the enum to not have padding. +/// +/// The results of `enum_padding!` require that the enum is not `repr(Rust)`, as +/// `repr(Rust)` enums may niche the enum's tag and reduce the total number of +/// bytes required to represent the enum as a result. As long as the enum is +/// `repr(C)`, `repr(int)`, or `repr(C, int)`, this will consistently return +/// whether the enum contains any padding bytes. +/// +/// [1]: https://doc.rust-lang.org/1.81.0/reference/type-layout.html#reprc-enums-with-fields +/// [2]: https://doc.rust-lang.org/1.81.0/reference/type-layout.html#primitive-representation-of-enums-with-fields +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! enum_padding { + ($t:ty, $disc:ty, $([$($ts:ty),*]),*) => {{ + let mut max = 0; + $({ + let padding = $crate::util::macro_util::size_of::<$t>() + - ( + $crate::util::macro_util::size_of::<$disc>() + $(+ $crate::util::macro_util::size_of::<$ts>())* + ); + if padding > max { + max = padding; + } + })* + max + }}; +} + +/// Unwraps an infallible `Result`. +#[doc(hidden)] +#[macro_export] +macro_rules! into_inner { + ($e:expr) => { + match $e { + $crate::util::macro_util::core_reexport::result::Result::Ok(e) => e, + $crate::util::macro_util::core_reexport::result::Result::Err(i) => match i {}, + } + }; +} + +/// Translates an identifier or tuple index into a numeric identifier. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! ident_id { + ($field:ident) => { + $crate::util::macro_util::hash_name(stringify!($field)) + }; + ($field:literal) => { + $field + }; +} + +/// Computes the hash of a string. +/// +/// NOTE(#2749) on hash collisions: This function's output only needs to be +/// deterministic within a particular compilation. Thus, if a user ever reports +/// a hash collision (very unlikely given the <= 16-byte special case), we can +/// strengthen the hash function at that point and publish a new version. Since +/// this is computed at compile time on small strings, we can easily use more +/// expensive and higher-quality hash functions if need be. +#[inline(always)] +#[must_use] +#[allow(clippy::as_conversions, clippy::indexing_slicing, clippy::arithmetic_side_effects)] +pub const fn hash_name(name: &str) -> i128 { + let name = name.as_bytes(); + + // We guarantee freedom from hash collisions between any two strings of + // length 16 or less by having the hashes of such strings be equal to + // their value. There is still a possibility that such strings will have + // the same value as the hash of a string of length > 16. + if name.len() <= size_of::() { + let mut bytes = [0u8; 16]; + + let mut i = 0; + while i < name.len() { + bytes[i] = name[i]; + i += 1; + } + + return i128::from_ne_bytes(bytes); + }; + + // An implementation of FxHasher, although returning a u128. Probably + // not as strong as it could be, but probably more collision resistant + // than normal 64-bit FxHasher. + let mut hash = 0u128; + let mut i = 0; + while i < name.len() { + // This is just FxHasher's `0x517cc1b727220a95` constant + // concatenated back-to-back. + const K: u128 = 0x517cc1b727220a95517cc1b727220a95; + hash = (hash.rotate_left(5) ^ (name[i] as u128)).wrapping_mul(K); + i += 1; + } + i128::from_ne_bytes(hash.to_ne_bytes()) +} + +/// Attempts to transmute `Src` into `Dst`. +/// +/// A helper for `try_transmute!`. +/// +/// # Panics +/// +/// `try_transmute` may either produce a post-monomorphization error or a panic +/// if `Dst` is bigger than `Src`. Otherwise, `try_transmute` panics under the +/// same circumstances as [`is_bit_valid`]. +/// +/// [`is_bit_valid`]: TryFromBytes::is_bit_valid +#[inline(always)] +pub fn try_transmute(src: Src) -> Result> +where + Src: IntoBytes, + Dst: TryFromBytes, +{ + static_assert!(Src, Dst => mem::size_of::() == mem::size_of::()); + + let mu_src = mem::MaybeUninit::new(src); + // SAFETY: `MaybeUninit` has no validity requirements. + let mu_dst: mem::MaybeUninit> = + unsafe { crate::util::transmute_unchecked(mu_src) }; + + let ptr = Ptr::from_ref(&mu_dst); + + // SAFETY: Since `Src: IntoBytes`, and since `size_of::() == + // size_of::()` by the preceding assertion, all of `mu_dst`'s bytes are + // initialized. `MaybeUninit` has no validity requirements, so even if + // `ptr` is used to mutate its referent (which it actually can't be - it's + // a shared `ReadOnly` pointer), that won't violate its referent's validity. + let ptr = unsafe { ptr.assume_validity::() }; + if Dst::is_bit_valid(ptr.cast::<_, CastSized, _>()) { + // SAFETY: Since `Dst::is_bit_valid`, we know that `ptr`'s referent is + // bit-valid for `Dst`. `ptr` points to `mu_dst`, and no intervening + // operations have mutated it, so it is a bit-valid `Dst`. + Ok(ReadOnly::into_inner(unsafe { mu_dst.assume_init() })) + } else { + // SAFETY: `MaybeUninit` has no validity requirements. + let mu_src: mem::MaybeUninit = unsafe { crate::util::transmute_unchecked(mu_dst) }; + // SAFETY: `mu_dst`/`mu_src` was constructed from `src` and never + // modified, so it is still bit-valid. + Err(ValidityError::new(unsafe { mu_src.assume_init() })) + } +} + +/// See `try_transmute_ref!` documentation. +pub trait TryTransmuteRefDst<'a> { + type Dst: ?Sized; + + /// See `try_transmute_ref!` documentation. + fn try_transmute_ref(self) -> Result<&'a Self::Dst, ValidityError<&'a Self::Src, Self::Dst>> + where + Self: TryTransmuteRefSrc<'a>, + Self::Src: IntoBytes + Immutable + KnownLayout, + Self::Dst: TryFromBytes + Immutable + KnownLayout; +} + +pub trait TryTransmuteRefSrc<'a> { + type Src: ?Sized; +} + +impl<'a, Src, Dst> TryTransmuteRefSrc<'a> for Wrap<&'a Src, &'a Dst> +where + Src: ?Sized, + Dst: ?Sized, +{ + type Src = Src; +} + +impl<'a, Src, Dst> TryTransmuteRefDst<'a> for Wrap<&'a Src, &'a Dst> +where + Src: IntoBytes + Immutable + KnownLayout + ?Sized, + Dst: TryFromBytes + Immutable + KnownLayout + ?Sized, +{ + type Dst = Dst; + + #[inline(always)] + fn try_transmute_ref( + self, + ) -> Result< + &'a Dst, + ValidityError<&'a as TryTransmuteRefSrc<'a>>::Src, Dst>, + > { + let ptr = Ptr::from_ref(self.0); + #[rustfmt::skip] + let res = ptr.try_with(#[inline(always)] |ptr| { + let ptr = ptr.recall_validity::(); + let ptr = ptr.cast::<_, crate::layout::CastFrom, _>(); + ptr.try_into_valid() + }); + match res { + Ok(ptr) => { + static_assert!(Src: ?Sized + KnownLayout, Dst: ?Sized + KnownLayout => { + Src::LAYOUT.align.get() >= Dst::LAYOUT.align.get() + }, "cannot transmute reference when destination type has higher alignment than source type"); + // SAFETY: We have checked that `Dst` does not have a stricter + // alignment requirement than `Src`. + let ptr = unsafe { ptr.assume_alignment::() }; + Ok(ptr.as_ref()) + } + Err(err) => Err(err.map_src(Ptr::as_ref)), + } + } +} + +pub trait TryTransmuteMutDst<'a> { + type Dst: ?Sized; + + /// See `try_transmute_mut!` documentation. + fn try_transmute_mut( + self, + ) -> Result<&'a mut Self::Dst, ValidityError<&'a mut Self::Src, Self::Dst>> + where + Self: TryTransmuteMutSrc<'a>, + Self::Src: IntoBytes, + Self::Dst: TryFromBytes; +} + +pub trait TryTransmuteMutSrc<'a> { + type Src: ?Sized; +} + +impl<'a, Src, Dst> TryTransmuteMutSrc<'a> for Wrap<&'a mut Src, &'a mut Dst> +where + Src: ?Sized, + Dst: ?Sized, +{ + type Src = Src; +} + +impl<'a, Src, Dst> TryTransmuteMutDst<'a> for Wrap<&'a mut Src, &'a mut Dst> +where + Src: FromBytes + IntoBytes + KnownLayout + ?Sized, + Dst: TryFromBytes + IntoBytes + KnownLayout + ?Sized, +{ + type Dst = Dst; + + #[inline(always)] + fn try_transmute_mut( + self, + ) -> Result< + &'a mut Dst, + ValidityError<&'a mut as TryTransmuteMutSrc<'a>>::Src, Dst>, + > { + let ptr = Ptr::from_mut(self.0); + // SAFETY: The provided closure returns the only copy of `ptr`. + #[rustfmt::skip] + let res = unsafe { + ptr.try_with_unchecked(#[inline(always)] |ptr| { + let ptr = ptr.recall_validity::(); + let ptr = ptr.cast::<_, crate::layout::CastFrom, _>(); + ptr.try_into_valid() + }) + }; + match res { + Ok(ptr) => { + static_assert!(Src: ?Sized + KnownLayout, Dst: ?Sized + KnownLayout => { + Src::LAYOUT.align.get() >= Dst::LAYOUT.align.get() + }, "cannot transmute reference when destination type has higher alignment than source type"); + // SAFETY: We have checked that `Dst` does not have a stricter + // alignment requirement than `Src`. + let ptr = unsafe { ptr.assume_alignment::() }; + Ok(ptr.as_mut()) + } + Err(err) => Err(err.map_src(Ptr::as_mut)), + } + } +} + +// Used in `transmute_ref!` and friends. +// +// This permits us to use the autoref specialization trick to dispatch to +// associated functions for `transmute_ref` and `transmute_mut` when both `Src` +// and `Dst` are `Sized`, and to trait methods otherwise. The associated +// functions, unlike the trait methods, do not require a `KnownLayout` bound. +// This permits us to add support for transmuting references to unsized types +// without breaking backwards-compatibility (on v0.8.x) with the old +// implementation, which did not require a `KnownLayout` bound to transmute +// sized types. +#[derive(Copy, Clone)] +pub struct Wrap(pub Src, pub PhantomData); + +impl Wrap { + #[inline(always)] + pub const fn new(src: Src) -> Self { + Wrap(src, PhantomData) + } +} + +impl<'a, Src, Dst> Wrap<&'a Src, &'a Dst> +where + Src: ?Sized, + Dst: ?Sized, +{ + #[allow(clippy::must_use_candidate, clippy::missing_inline_in_public_items, clippy::empty_loop)] + pub const fn transmute_ref_inference_helper(self) -> &'a Dst { + loop {} + } +} + +impl<'a, Src, Dst> Wrap<&'a Src, &'a Dst> { + /// # Safety + /// The caller must guarantee that: + /// - `Src: IntoBytes + Immutable` + /// - `Dst: FromBytes + Immutable` + /// + /// # PME + /// + /// Instantiating this method PMEs unless both: + /// - `mem::size_of::() == mem::size_of::()` + /// - `mem::align_of::() <= mem::align_of::()` + #[inline(always)] + #[must_use] + pub const unsafe fn transmute_ref(self) -> &'a Dst { + static_assert!(Src, Dst => mem::size_of::() == mem::size_of::()); + static_assert!(Src, Dst => mem::align_of::() <= mem::align_of::()); + + let src: *const Src = self.0; + let dst = src.cast::(); + // SAFETY: + // - We know that it is sound to view the target type of the input + // reference (`Src`) as the target type of the output reference + // (`Dst`) because the caller has guaranteed that `Src: IntoBytes`, + // `Dst: FromBytes`, and `size_of::() == size_of::()`. + // - We know that there are no `UnsafeCell`s, and thus we don't have to + // worry about `UnsafeCell` overlap, because `Src: Immutable` and + // `Dst: Immutable`. + // - The caller has guaranteed that alignment is not increased. + // - We know that the returned lifetime will not outlive the input + // lifetime thanks to the lifetime bounds on this function. + // + // FIXME(#67): Once our MSRV is 1.58, replace this `transmute` with + // `&*dst`. + #[allow(clippy::transmute_ptr_to_ref)] + unsafe { + mem::transmute(dst) + } + } + + #[inline(always)] + pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + where + Src: IntoBytes + Immutable, + Dst: TryFromBytes + Immutable, + { + static_assert!(Src => mem::align_of::() == mem::align_of::>()); + static_assert!(Dst => mem::align_of::() == mem::align_of::>()); + + // SAFETY: By the preceding assert, `Src` and `Wrapping` have the + // same alignment. + let src: &Wrapping = + unsafe { crate::util::transmute_ref::<_, _, BecauseImmutable>(self.0) }; + let src = Wrap::new(src); + , &'a Wrapping> as TryTransmuteRefDst<'a>>::try_transmute_ref( + src, + ) + // SAFETY: By the preceding assert, `Dst` and `Wrapping` have the + // same alignment. + .map(|dst| unsafe { crate::util::transmute_ref::<_, _, BecauseImmutable>(dst) }) + .map_err(|err| { + // SAFETY: By the preceding assert, `Src` and `Wrapping` have the + // same alignment. + ValidityError::new(unsafe { + crate::util::transmute_ref::<_, _, BecauseImmutable>(err.into_src()) + }) + }) + } +} + +impl<'a, Src, Dst> Wrap<&'a mut Src, &'a mut Dst> +where + Src: ?Sized, + Dst: ?Sized, +{ + #[allow(clippy::must_use_candidate, clippy::missing_inline_in_public_items, clippy::empty_loop)] + pub fn transmute_mut_inference_helper(self) -> &'a mut Dst { + loop {} + } +} + +impl<'a, Src, Dst> Wrap<&'a mut Src, &'a mut Dst> { + /// Transmutes a mutable reference of one type to a mutable reference of + /// another type. + /// + /// # PME + /// + /// Instantiating this method PMEs unless both: + /// - `mem::size_of::() == mem::size_of::()` + /// - `mem::align_of::() <= mem::align_of::()` + #[inline(always)] + #[must_use] + pub fn transmute_mut(self) -> &'a mut Dst + where + Src: FromBytes + IntoBytes, + Dst: FromBytes + IntoBytes, + { + static_assert!(Src, Dst => mem::size_of::() == mem::size_of::()); + static_assert!(Src, Dst => mem::align_of::() <= mem::align_of::()); + + let src: *mut Src = self.0; + let dst = src.cast::(); + // SAFETY: + // - We know that it is sound to view the target type of the input + // reference (`Src`) as the target type of the output reference + // (`Dst`) and vice-versa because `Src: FromBytes + IntoBytes`, `Dst: + // FromBytes + IntoBytes`, and (as asserted above) `size_of::() + // == size_of::()`. + // - We asserted above that alignment will not increase. + // - We know that the returned lifetime will not outlive the input + // lifetime thanks to the lifetime bounds on this function. + unsafe { &mut *dst } + } + + #[inline(always)] + pub fn try_transmute_mut(self) -> Result<&'a mut Dst, ValidityError<&'a mut Src, Dst>> + where + Src: FromBytes + IntoBytes, + Dst: TryFromBytes + IntoBytes, + { + static_assert!(Src => mem::align_of::() == mem::align_of::>()); + static_assert!(Dst => mem::align_of::() == mem::align_of::>()); + + // SAFETY: By the preceding assert, `Src` and `Wrapping` have the + // same alignment. + let src: &mut Wrapping = + unsafe { crate::util::transmute_mut::<_, _, (_, (_, _))>(self.0) }; + let src = Wrap::new(src); + , &'a mut Wrapping> as TryTransmuteMutDst<'a>> + ::try_transmute_mut(src) + // SAFETY: By the preceding assert, `Dst` and `Wrapping` have the + // same alignment. + .map(|dst| unsafe { crate::util::transmute_mut::<_, _, (_, (_, _))>(dst) }) + .map_err(|err| { + // SAFETY: By the preceding assert, `Src` and `Wrapping` have the + // same alignment. + ValidityError::new(unsafe { + crate::util::transmute_mut::<_, _, (_, (_, _))>(err.into_src()) + }) + }) + } +} + +pub trait TransmuteRefDst<'a> { + type Dst: ?Sized; + + #[must_use] + fn transmute_ref(self) -> &'a Self::Dst; +} + +impl<'a, Src: ?Sized, Dst: ?Sized> TransmuteRefDst<'a> for Wrap<&'a Src, &'a Dst> +where + Src: KnownLayout + IntoBytes + Immutable, + Dst: KnownLayout + FromBytes + Immutable, +{ + type Dst = Dst; + + #[inline(always)] + fn transmute_ref(self) -> &'a Dst { + let ptr = Ptr::from_ref(self.0) + .recall_validity::() + .transmute_with::, (crate::pointer::BecauseMutationCompatible, _)>() + .recall_validity::(); + + static_assert!(Src: ?Sized + KnownLayout, Dst: ?Sized + KnownLayout => { + Src::LAYOUT.align.get() >= Dst::LAYOUT.align.get() + }, "cannot transmute reference when destination type has higher alignment than source type"); + + // SAFETY: The preceding `static_assert!` ensures that + // `Src::LAYOUT.align >= Dst::LAYOUT.align`. Since `self` is + // validly-aligned for `Src`, it is also validly-aligned for `Dst`. + let ptr = unsafe { ptr.assume_alignment() }; + + ptr.as_ref() + } +} + +pub trait TransmuteMutDst<'a> { + type Dst: ?Sized; + #[must_use] + fn transmute_mut(self) -> &'a mut Self::Dst; +} + +impl<'a, Src: ?Sized, Dst: ?Sized> TransmuteMutDst<'a> for Wrap<&'a mut Src, &'a mut Dst> +where + Src: KnownLayout + FromBytes + IntoBytes, + Dst: KnownLayout + FromBytes + IntoBytes, +{ + type Dst = Dst; + + #[inline(always)] + fn transmute_mut(self) -> &'a mut Dst { + let ptr = Ptr::from_mut(self.0) + .recall_validity::() + .transmute_with::, _>() + .recall_validity::(); + + static_assert!(Src: ?Sized + KnownLayout, Dst: ?Sized + KnownLayout => { + Src::LAYOUT.align.get() >= Dst::LAYOUT.align.get() + }, "cannot transmute reference when destination type has higher alignment than source type"); + + // SAFETY: The preceding `static_assert!` ensures that + // `Src::LAYOUT.align >= Dst::LAYOUT.align`. Since `self` is + // validly-aligned for `Src`, it is also validly-aligned for `Dst`. + let ptr = unsafe { ptr.assume_alignment() }; + + ptr.as_mut() + } +} + +/// A function which emits a warning if its return value is not used. +#[must_use] +#[inline(always)] +pub const fn must_use(t: T) -> T { + t +} + +// NOTE: We can't change this to a `pub use core as core_reexport` until [1] is +// fixed or we update to a semver-breaking version (as of this writing, 0.8.0) +// on the `main` branch. +// +// [1] https://github.com/obi1kenobi/cargo-semver-checks/issues/573 +pub mod core_reexport { + pub use core::*; + + pub mod mem { + pub use core::mem::*; + } +} + +#[cfg(test)] +mod tests { + use crate::util::testutil::*; + + #[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] + mod nightly { + use super::super::*; + use crate::util::testutil::*; + + // FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): + // Remove this `cfg` when `size_of_val_raw` is stabilized. + #[allow(clippy::decimal_literal_representation)] + #[test] + fn test_trailing_field_offset() { + assert_eq!(mem::align_of::(), _64K); + + macro_rules! test { + (#[$cfg:meta] ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => {{ + #[$cfg] + struct Test($(#[allow(dead_code)] $ts,)* #[allow(dead_code)] $trailing_field_ty); + assert_eq!(test!(@offset $($ts),* ; $trailing_field_ty), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => { + test!(#[$cfg] ($($ts),* ; $trailing_field_ty) => $expect); + test!($(#[$cfgs])* ($($ts),* ; $trailing_field_ty) => $expect); + }; + (@offset ; $_trailing:ty) => { trailing_field_offset!(Test, 0) }; + (@offset $_t:ty ; $_trailing:ty) => { trailing_field_offset!(Test, 1) }; + } + + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; u8) => Some(0)); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; [u8]) => Some(0)); + test!(#[repr(C)] #[repr(C, packed)] (u8; u8) => Some(1)); + test!(#[repr(C)] (; AU64) => Some(0)); + test!(#[repr(C)] (; [AU64]) => Some(0)); + test!(#[repr(C)] (u8; AU64) => Some(8)); + test!(#[repr(C)] (u8; [AU64]) => Some(8)); + + #[derive( + Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone, + )] + #[repr(C)] + pub(crate) struct Nested { + _t: T, + _u: U, + } + + test!(#[repr(C)] (; Nested) => Some(0)); + test!(#[repr(C)] (; Nested) => Some(0)); + test!(#[repr(C)] (u8; Nested) => Some(8)); + test!(#[repr(C)] (u8; Nested) => Some(8)); + + // Test that `packed(N)` limits the offset of the trailing field. + test!(#[repr(C, packed( 1))] (u8; elain::Align< 2>) => Some( 1)); + test!(#[repr(C, packed( 2))] (u8; elain::Align< 4>) => Some( 2)); + test!(#[repr(C, packed( 4))] (u8; elain::Align< 8>) => Some( 4)); + test!(#[repr(C, packed( 8))] (u8; elain::Align< 16>) => Some( 8)); + test!(#[repr(C, packed( 16))] (u8; elain::Align< 32>) => Some( 16)); + test!(#[repr(C, packed( 32))] (u8; elain::Align< 64>) => Some( 32)); + test!(#[repr(C, packed( 64))] (u8; elain::Align< 128>) => Some( 64)); + test!(#[repr(C, packed( 128))] (u8; elain::Align< 256>) => Some( 128)); + test!(#[repr(C, packed( 256))] (u8; elain::Align< 512>) => Some( 256)); + test!(#[repr(C, packed( 512))] (u8; elain::Align< 1024>) => Some( 512)); + test!(#[repr(C, packed( 1024))] (u8; elain::Align< 2048>) => Some( 1024)); + test!(#[repr(C, packed( 2048))] (u8; elain::Align< 4096>) => Some( 2048)); + test!(#[repr(C, packed( 4096))] (u8; elain::Align< 8192>) => Some( 4096)); + test!(#[repr(C, packed( 8192))] (u8; elain::Align< 16384>) => Some( 8192)); + test!(#[repr(C, packed( 16384))] (u8; elain::Align< 32768>) => Some( 16384)); + test!(#[repr(C, packed( 32768))] (u8; elain::Align< 65536>) => Some( 32768)); + test!(#[repr(C, packed( 65536))] (u8; elain::Align< 131072>) => Some( 65536)); + /* Alignments above 65536 are not yet supported. + test!(#[repr(C, packed( 131072))] (u8; elain::Align< 262144>) => Some( 131072)); + test!(#[repr(C, packed( 262144))] (u8; elain::Align< 524288>) => Some( 262144)); + test!(#[repr(C, packed( 524288))] (u8; elain::Align< 1048576>) => Some( 524288)); + test!(#[repr(C, packed( 1048576))] (u8; elain::Align< 2097152>) => Some( 1048576)); + test!(#[repr(C, packed( 2097152))] (u8; elain::Align< 4194304>) => Some( 2097152)); + test!(#[repr(C, packed( 4194304))] (u8; elain::Align< 8388608>) => Some( 4194304)); + test!(#[repr(C, packed( 8388608))] (u8; elain::Align< 16777216>) => Some( 8388608)); + test!(#[repr(C, packed( 16777216))] (u8; elain::Align< 33554432>) => Some( 16777216)); + test!(#[repr(C, packed( 33554432))] (u8; elain::Align< 67108864>) => Some( 33554432)); + test!(#[repr(C, packed( 67108864))] (u8; elain::Align< 33554432>) => Some( 67108864)); + test!(#[repr(C, packed( 33554432))] (u8; elain::Align<134217728>) => Some( 33554432)); + test!(#[repr(C, packed(134217728))] (u8; elain::Align<268435456>) => Some(134217728)); + test!(#[repr(C, packed(268435456))] (u8; elain::Align<268435456>) => Some(268435456)); + */ + + // Test that `align(N)` does not limit the offset of the trailing field. + test!(#[repr(C, align( 1))] (u8; elain::Align< 2>) => Some( 2)); + test!(#[repr(C, align( 2))] (u8; elain::Align< 4>) => Some( 4)); + test!(#[repr(C, align( 4))] (u8; elain::Align< 8>) => Some( 8)); + test!(#[repr(C, align( 8))] (u8; elain::Align< 16>) => Some( 16)); + test!(#[repr(C, align( 16))] (u8; elain::Align< 32>) => Some( 32)); + test!(#[repr(C, align( 32))] (u8; elain::Align< 64>) => Some( 64)); + test!(#[repr(C, align( 64))] (u8; elain::Align< 128>) => Some( 128)); + test!(#[repr(C, align( 128))] (u8; elain::Align< 256>) => Some( 256)); + test!(#[repr(C, align( 256))] (u8; elain::Align< 512>) => Some( 512)); + test!(#[repr(C, align( 512))] (u8; elain::Align< 1024>) => Some( 1024)); + test!(#[repr(C, align( 1024))] (u8; elain::Align< 2048>) => Some( 2048)); + test!(#[repr(C, align( 2048))] (u8; elain::Align< 4096>) => Some( 4096)); + test!(#[repr(C, align( 4096))] (u8; elain::Align< 8192>) => Some( 8192)); + test!(#[repr(C, align( 8192))] (u8; elain::Align< 16384>) => Some( 16384)); + test!(#[repr(C, align( 16384))] (u8; elain::Align< 32768>) => Some( 32768)); + test!(#[repr(C, align( 32768))] (u8; elain::Align< 65536>) => Some( 65536)); + /* Alignments above 65536 are not yet supported. + test!(#[repr(C, align( 65536))] (u8; elain::Align< 131072>) => Some( 131072)); + test!(#[repr(C, align( 131072))] (u8; elain::Align< 262144>) => Some( 262144)); + test!(#[repr(C, align( 262144))] (u8; elain::Align< 524288>) => Some( 524288)); + test!(#[repr(C, align( 524288))] (u8; elain::Align< 1048576>) => Some( 1048576)); + test!(#[repr(C, align( 1048576))] (u8; elain::Align< 2097152>) => Some( 2097152)); + test!(#[repr(C, align( 2097152))] (u8; elain::Align< 4194304>) => Some( 4194304)); + test!(#[repr(C, align( 4194304))] (u8; elain::Align< 8388608>) => Some( 8388608)); + test!(#[repr(C, align( 8388608))] (u8; elain::Align< 16777216>) => Some( 16777216)); + test!(#[repr(C, align( 16777216))] (u8; elain::Align< 33554432>) => Some( 33554432)); + test!(#[repr(C, align( 33554432))] (u8; elain::Align< 67108864>) => Some( 67108864)); + test!(#[repr(C, align( 67108864))] (u8; elain::Align< 33554432>) => Some( 33554432)); + test!(#[repr(C, align( 33554432))] (u8; elain::Align<134217728>) => Some(134217728)); + test!(#[repr(C, align(134217728))] (u8; elain::Align<268435456>) => Some(268435456)); + */ + } + + // FIXME(#29), FIXME(https://github.com/rust-lang/rust/issues/69835): + // Remove this `cfg` when `size_of_val_raw` is stabilized. + #[allow(clippy::decimal_literal_representation)] + #[test] + fn test_align_of_dst() { + // Test that `align_of!` correctly computes the alignment of DSTs. + assert_eq!(align_of!([elain::Align<1>]), Some(1)); + assert_eq!(align_of!([elain::Align<2>]), Some(2)); + assert_eq!(align_of!([elain::Align<4>]), Some(4)); + assert_eq!(align_of!([elain::Align<8>]), Some(8)); + assert_eq!(align_of!([elain::Align<16>]), Some(16)); + assert_eq!(align_of!([elain::Align<32>]), Some(32)); + assert_eq!(align_of!([elain::Align<64>]), Some(64)); + assert_eq!(align_of!([elain::Align<128>]), Some(128)); + assert_eq!(align_of!([elain::Align<256>]), Some(256)); + assert_eq!(align_of!([elain::Align<512>]), Some(512)); + assert_eq!(align_of!([elain::Align<1024>]), Some(1024)); + assert_eq!(align_of!([elain::Align<2048>]), Some(2048)); + assert_eq!(align_of!([elain::Align<4096>]), Some(4096)); + assert_eq!(align_of!([elain::Align<8192>]), Some(8192)); + assert_eq!(align_of!([elain::Align<16384>]), Some(16384)); + assert_eq!(align_of!([elain::Align<32768>]), Some(32768)); + assert_eq!(align_of!([elain::Align<65536>]), Some(65536)); + /* Alignments above 65536 are not yet supported. + assert_eq!(align_of!([elain::Align<131072>]), Some(131072)); + assert_eq!(align_of!([elain::Align<262144>]), Some(262144)); + assert_eq!(align_of!([elain::Align<524288>]), Some(524288)); + assert_eq!(align_of!([elain::Align<1048576>]), Some(1048576)); + assert_eq!(align_of!([elain::Align<2097152>]), Some(2097152)); + assert_eq!(align_of!([elain::Align<4194304>]), Some(4194304)); + assert_eq!(align_of!([elain::Align<8388608>]), Some(8388608)); + assert_eq!(align_of!([elain::Align<16777216>]), Some(16777216)); + assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432)); + assert_eq!(align_of!([elain::Align<67108864>]), Some(67108864)); + assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432)); + assert_eq!(align_of!([elain::Align<134217728>]), Some(134217728)); + assert_eq!(align_of!([elain::Align<268435456>]), Some(268435456)); + */ + } + } + + #[test] + fn test_enum_casts() { + // Test that casting the variants of enums with signed integer reprs to + // unsigned integers obeys expected signed -> unsigned casting rules. + + #[repr(i8)] + enum ReprI8 { + MinusOne = -1, + Zero = 0, + Min = i8::MIN, + Max = i8::MAX, + } + + #[allow(clippy::as_conversions)] + let x = ReprI8::MinusOne as u8; + assert_eq!(x, u8::MAX); + + #[allow(clippy::as_conversions)] + let x = ReprI8::Zero as u8; + assert_eq!(x, 0); + + #[allow(clippy::as_conversions)] + let x = ReprI8::Min as u8; + assert_eq!(x, 128); + + #[allow(clippy::as_conversions)] + let x = ReprI8::Max as u8; + assert_eq!(x, 127); + } + + #[test] + fn test_struct_padding() { + // Test that, for each provided repr, `struct_padding!` reports the + // expected value. + macro_rules! test { + (#[$cfg:meta] ($($ts:ty),*) => $expect:expr) => {{ + #[$cfg] + #[allow(dead_code)] + struct Test($($ts),*); + assert_eq!(struct_padding!(Test, [$($ts),*]), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),*) => $expect:expr) => { + test!(#[$cfg] ($($ts),*) => $expect); + test!($(#[$cfgs])* ($($ts),*) => $expect); + }; + } + + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] () => 0); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8) => 0); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8, ()) => 0); + test!(#[repr(C)] #[repr(packed)] (u8, u8) => 0); + + test!(#[repr(C)] (u8, AU64) => 7); + // Rust won't let you put `#[repr(packed)]` on a type which contains a + // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here. + // It's not ideal, but it definitely has align > 1 on /some/ of our CI + // targets, and this isn't a particularly complex macro we're testing + // anyway. + test!(#[repr(packed)] (u8, u64) => 0); + } + + #[test] + fn test_repr_c_struct_padding() { + // Test that, for each provided repr, `repr_c_struct_padding!` reports + // the expected value. + macro_rules! test { + (($($ts:tt),*) => $expect:expr) => {{ + #[repr(C)] + #[allow(dead_code)] + struct Test($($ts),*); + assert_eq!(repr_c_struct_has_padding!(Test, [$($ts),*]), $expect); + }}; + } + + // Test static padding + test!(() => false); + test!(([u8]) => false); + test!((u8) => false); + test!((u8, [u8]) => false); + test!((u8, ()) => false); + test!((u8, (), [u8]) => false); + test!((u8, u8) => false); + test!((u8, u8, [u8]) => false); + + test!((u8, AU64) => true); + test!((u8, AU64, [u8]) => true); + + // Test dynamic padding + test!((AU64, [AU64]) => false); + test!((u8, [AU64]) => true); + + #[repr(align(4))] + struct AU32(#[allow(unused)] u32); + test!((AU64, [AU64]) => false); + test!((AU64, [AU32]) => true); + } + + #[test] + fn test_union_padding() { + // Test that, for each provided repr, `union_padding!` reports the + // expected value. + macro_rules! test { + (#[$cfg:meta] {$($fs:ident: $ts:ty),*} => $expect:expr) => {{ + #[$cfg] + #[allow(unused)] // fields are never read + union Test{ $($fs: $ts),* } + assert_eq!(union_padding!(Test, [$($ts),*]), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* {$($fs:ident: $ts:ty),*} => $expect:expr) => { + test!(#[$cfg] {$($fs: $ts),*} => $expect); + test!($(#[$cfgs])* {$($fs: $ts),*} => $expect); + }; + } + + test!(#[repr(C)] #[repr(packed)] {a: u8} => 0); + test!(#[repr(C)] #[repr(packed)] {a: u8, b: u8} => 0); + + // Rust won't let you put `#[repr(packed)]` on a type which contains a + // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here. + // It's not ideal, but it definitely has align > 1 on /some/ of our CI + // targets, and this isn't a particularly complex macro we're testing + // anyway. + test!(#[repr(C)] #[repr(packed)] {a: u8, b: u64} => 7); + } + + #[test] + fn test_enum_padding() { + // Test that, for each provided repr, `enum_has_padding!` reports the + // expected value. + macro_rules! test { + (#[repr($disc:ident $(, $c:ident)?)] { $($vs:ident ($($ts:ty),*),)* } => $expect:expr) => { + test!(@case #[repr($disc $(, $c)?)] { $($vs ($($ts),*),)* } => $expect); + }; + (#[repr($disc:ident $(, $c:ident)?)] #[$cfg:meta] $(#[$cfgs:meta])* { $($vs:ident ($($ts:ty),*),)* } => $expect:expr) => { + test!(@case #[repr($disc $(, $c)?)] #[$cfg] { $($vs ($($ts),*),)* } => $expect); + test!(#[repr($disc $(, $c)?)] $(#[$cfgs])* { $($vs ($($ts),*),)* } => $expect); + }; + (@case #[repr($disc:ident $(, $c:ident)?)] $(#[$cfg:meta])? { $($vs:ident ($($ts:ty),*),)* } => $expect:expr) => {{ + #[repr($disc $(, $c)?)] + $(#[$cfg])? + #[allow(unused)] // variants and fields are never used + enum Test { + $($vs ($($ts),*),)* + } + assert_eq!( + enum_padding!(Test, $disc, $([$($ts),*]),*), + $expect + ); + }}; + } + + #[allow(unused)] + #[repr(align(2))] + struct U16(u16); + + #[allow(unused)] + #[repr(align(4))] + struct U32(u32); + + test!(#[repr(u8)] #[repr(C)] { + A(u8), + } => 0); + test!(#[repr(u16)] #[repr(C)] { + A(u8, u8), + B(U16), + } => 0); + test!(#[repr(u32)] #[repr(C)] { + A(u8, u8, u8, u8), + B(U16, u8, u8), + C(u8, u8, U16), + D(U16, U16), + E(U32), + } => 0); + + // `repr(int)` can pack the discriminant more efficiently + test!(#[repr(u8)] { + A(u8, U16), + } => 0); + test!(#[repr(u8)] { + A(u8, U16, U32), + } => 0); + + // `repr(C)` cannot + test!(#[repr(u8, C)] { + A(u8, U16), + } => 2); + test!(#[repr(u8, C)] { + A(u8, u8, u8, U32), + } => 4); + + // And field ordering can always cause problems + test!(#[repr(u8)] #[repr(C)] { + A(U16, u8), + } => 2); + test!(#[repr(u8)] #[repr(C)] { + A(U32, u8, u8, u8), + } => 4); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..28fedadac93c7af30f7d5505cd0b0baa98eed005 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/macros.rs @@ -0,0 +1,837 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +/// Unsafely implements trait(s) for a type. +/// +/// # Safety +/// +/// The trait impl must be sound. +/// +/// When implementing `TryFromBytes`: +/// - If no `is_bit_valid` impl is provided, then it must be valid for +/// `is_bit_valid` to unconditionally return `true`. In other words, it must +/// be the case that any initialized sequence of bytes constitutes a valid +/// instance of `$ty`. +/// - If an `is_bit_valid` impl is provided, then the impl of `is_bit_valid` +/// must only return `true` if its argument refers to a valid `$ty`. +macro_rules! unsafe_impl { + // Implement `$trait` for `$ty` with no bounds. + ($(#[$attr:meta])* $ty:ty: $trait:ident $(; |$candidate:ident| $is_bit_valid:expr)?) => {{ + crate::util::macros::__unsafe(); + + $(#[$attr])* + // SAFETY: The caller promises that this is sound. + unsafe impl $trait for $ty { + unsafe_impl!(@method $trait $(; |$candidate| $is_bit_valid)?); + } + }}; + + // Implement all `$traits` for `$ty` with no bounds. + // + // The 2 arms under this one are there so we can apply + // N attributes for each one of M trait implementations. + // The simple solution of: + // + // ($(#[$attrs:meta])* $ty:ty: $($traits:ident),*) => { + // $( unsafe_impl!( $(#[$attrs])* $ty: $traits ) );* + // } + // + // Won't work. The macro processor sees that the outer repetition + // contains both $attrs and $traits and expects them to match the same + // amount of fragments. + // + // To solve this we must: + // 1. Pack the attributes into a single token tree fragment we can match over. + // 2. Expand the traits. + // 3. Unpack and expand the attributes. + ($(#[$attrs:meta])* $ty:ty: $($traits:ident),*) => { + unsafe_impl!(@impl_traits_with_packed_attrs { $(#[$attrs])* } $ty: $($traits),*) + }; + + (@impl_traits_with_packed_attrs $attrs:tt $ty:ty: $($traits:ident),*) => {{ + $( unsafe_impl!(@unpack_attrs $attrs $ty: $traits); )* + }}; + + (@unpack_attrs { $(#[$attrs:meta])* } $ty:ty: $traits:ident) => { + unsafe_impl!($(#[$attrs])* $ty: $traits); + }; + + // This arm is identical to the following one, except it contains a + // preceding `const`. If we attempt to handle these with a single arm, there + // is an inherent ambiguity between `const` (the keyword) and `const` (the + // ident match for `$tyvar:ident`). + // + // To explain how this works, consider the following invocation: + // + // unsafe_impl!(const N: usize, T: ?Sized + Copy => Clone for Foo); + // + // In this invocation, here are the assignments to meta-variables: + // + // |---------------|------------| + // | Meta-variable | Assignment | + // |---------------|------------| + // | $constname | N | + // | $constty | usize | + // | $tyvar | T | + // | $optbound | Sized | + // | $bound | Copy | + // | $trait | Clone | + // | $ty | Foo | + // |---------------|------------| + // + // The following arm has the same behavior with the exception of the lack of + // support for a leading `const` parameter. + ( + $(#[$attr:meta])* + const $constname:ident : $constty:ident $(,)? + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)? + ) => { + unsafe_impl!( + @inner + $(#[$attr])* + @const $constname: $constty, + $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)* + => $trait for $ty $(; |$candidate| $is_bit_valid)? + ); + }; + ( + $(#[$attr:meta])* + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)? + ) => {{ + unsafe_impl!( + @inner + $(#[$attr])* + $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)* + => $trait for $ty $(; |$candidate| $is_bit_valid)? + ); + }}; + ( + @inner + $(#[$attr:meta])* + $(@const $constname:ident : $constty:ident,)* + $($tyvar:ident $(: $(? $optbound:ident +)* + $($bound:ident +)* )?,)* + => $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)? + ) => {{ + crate::util::macros::__unsafe(); + + $(#[$attr])* + #[allow(non_local_definitions)] + // SAFETY: The caller promises that this is sound. + unsafe impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),* $(, const $constname: $constty,)*> $trait for $ty { + unsafe_impl!(@method $trait $(; |$candidate| $is_bit_valid)?); + } + }}; + + (@method TryFromBytes ; |$candidate:ident| $is_bit_valid:expr) => { + #[allow(clippy::missing_inline_in_public_items, dead_code)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() {} + + #[inline] + fn is_bit_valid($candidate: Maybe<'_, Self, Alignment>) -> bool + where + Alignment: crate::invariant::Alignment, + { + $is_bit_valid + } + }; + (@method TryFromBytes) => { + #[allow(clippy::missing_inline_in_public_items)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() {} + #[inline(always)] + fn is_bit_valid(_candidate: Maybe<'_, Self, Alignment>) -> bool + where + Alignment: crate::invariant::Alignment, + { + true + } + }; + (@method $trait:ident) => { + #[allow(clippy::missing_inline_in_public_items, dead_code)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() {} + }; + (@method $trait:ident; |$_candidate:ident| $_is_bit_valid:expr) => { + compile_error!("Can't provide `is_bit_valid` impl for trait other than `TryFromBytes`"); + }; +} + +/// Implements `$trait` for `$ty` where `$ty: TransmuteFrom<$repr>` (and +/// vice-versa). +/// +/// Calling this macro is safe; the internals of the macro emit appropriate +/// trait bounds which ensure that the given impl is sound. +macro_rules! impl_for_transmute_from { + ( + $(#[$attr:meta])* + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?)? + => $trait:ident for $ty:ty [$repr:ty] + ) => { + const _: () = { + $(#[$attr])* + #[allow(non_local_definitions)] + + // SAFETY: `is_trait` (defined and used below) requires `T: + // TransmuteFrom`, `R: TransmuteFrom`, and `R: $trait`. It is + // called using `$ty` and `$repr`, ensuring that `$ty` and `$repr` + // have equivalent bit validity, and ensuring that `$repr: $trait`. + // The supported traits - `TryFromBytes`, `FromZeros`, `FromBytes`, + // and `IntoBytes` - are defined only in terms of the bit validity + // of a type. Therefore, `$repr: $trait` ensures that `$ty: $trait` + // is sound. + unsafe impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?)?> $trait for $ty { + #[allow(dead_code, clippy::missing_inline_in_public_items)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() { + use crate::pointer::{*, invariant::Valid}; + + impl_for_transmute_from!(@assert_is_supported_trait $trait); + + fn is_trait() + where + T: TransmuteFrom + ?Sized, + R: TransmuteFrom + ?Sized, + R: $trait, + { + } + + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn f<$($tyvar $(: $(? $optbound +)* $($bound +)*)?)?>() { + is_trait::<$ty, $repr>(); + } + } + + impl_for_transmute_from!( + @is_bit_valid + $(<$tyvar $(: $(? $optbound +)* $($bound +)*)?>)? + $trait for $ty [$repr] + ); + } + }; + }; + (@assert_is_supported_trait TryFromBytes) => {}; + (@assert_is_supported_trait FromZeros) => {}; + (@assert_is_supported_trait FromBytes) => {}; + (@assert_is_supported_trait IntoBytes) => {}; + ( + @is_bit_valid + $(<$tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?>)? + TryFromBytes for $ty:ty [$repr:ty] + ) => { + #[inline(always)] + fn is_bit_valid(candidate: $crate::Maybe<'_, Self, Alignment>) -> bool + where + Alignment: $crate::invariant::Alignment, + { + // SAFETY: This macro ensures that `$repr` and `Self` have the same + // size and bit validity. Thus, a bit-valid instance of `$repr` is + // also a bit-valid instance of `Self`. + <$repr as TryFromBytes>::is_bit_valid(candidate.transmute::<_, _, BecauseImmutable>()) + } + }; + ( + @is_bit_valid + $(<$tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?>)? + $trait:ident for $ty:ty [$repr:ty] + ) => { + // Trait other than `TryFromBytes`; no `is_bit_valid` impl. + }; +} + +/// Implements a trait for a type, bounding on each member of the power set of +/// a set of type variables. This is useful for implementing traits for tuples +/// or `fn` types. +/// +/// The last argument is the name of a macro which will be called in every +/// `impl` block, and is expected to expand to the name of the type for which to +/// implement the trait. +/// +/// For example, the invocation: +/// ```ignore +/// unsafe_impl_for_power_set!(A, B => Foo for type!(...)) +/// ``` +/// ...expands to: +/// ```ignore +/// unsafe impl Foo for type!() { ... } +/// unsafe impl Foo for type!(B) { ... } +/// unsafe impl Foo for type!(A, B) { ... } +/// ``` +macro_rules! unsafe_impl_for_power_set { + ( + $first:ident $(, $rest:ident)* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...) + $(; |$candidate:ident| $is_bit_valid:expr)? + ) => { + unsafe_impl_for_power_set!( + $($rest),* $(-> $ret)? => $trait for $macro!(...) + $(; |$candidate| $is_bit_valid)? + ); + unsafe_impl_for_power_set!( + @impl $first $(, $rest)* $(-> $ret)? => $trait for $macro!(...) + $(; |$candidate| $is_bit_valid)? + ); + }; + ( + $(-> $ret:ident)? => $trait:ident for $macro:ident!(...) + $(; |$candidate:ident| $is_bit_valid:expr)? + ) => { + unsafe_impl_for_power_set!( + @impl $(-> $ret)? => $trait for $macro!(...) + $(; |$candidate| $is_bit_valid)? + ); + }; + ( + @impl $($vars:ident),* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...) + $(; |$candidate:ident| $is_bit_valid:expr)? + ) => { + unsafe_impl!( + $($vars,)* $($ret)? => $trait for $macro!($($vars),* $(-> $ret)?) + $(; |$candidate| $is_bit_valid)? + ); + }; +} + +/// Expands to an `Option` type with the given argument types and +/// return type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_extern_c_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +/// Expands to an `Option` type with the given argument +/// types and return type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_unsafe_extern_c_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +/// Expands to an `Option` type with the given argument types and return +/// type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +/// Expands to an `Option` type with the given argument types and +/// return type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_unsafe_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +// This `allow` is needed because, when testing, we export this macro so it can +// be used in `doctests`. +#[allow(rustdoc::private_intra_doc_links)] +/// Implements trait(s) for a type or verifies the given implementation by +/// referencing an existing (derived) implementation. +/// +/// This macro exists so that we can provide zerocopy-derive as an optional +/// dependency and still get the benefit of using its derives to validate that +/// our trait impls are sound. +/// +/// When compiling without `--cfg 'feature = "derive"` and without `--cfg test`, +/// `impl_or_verify!` emits the provided trait impl. When compiling with either +/// of those cfgs, it is expected that the type in question is deriving the +/// traits instead. In this case, `impl_or_verify!` emits code which validates +/// that the given trait impl is at least as restrictive as the the impl emitted +/// by the custom derive. This has the effect of confirming that the impl which +/// is emitted when the `derive` feature is disabled is actually sound (on the +/// assumption that the impl emitted by the custom derive is sound). +/// +/// The caller is still required to provide a safety comment (e.g. using the +/// `const _: () = unsafe` macro). The reason for this restriction is that, +/// while `impl_or_verify!` can guarantee that the provided impl is sound when +/// it is compiled with the appropriate cfgs, there is no way to guarantee that +/// it is ever compiled with those cfgs. In particular, it would be possible to +/// accidentally place an `impl_or_verify!` call in a context that is only ever +/// compiled when the `derive` feature is disabled. If that were to happen, +/// there would be nothing to prevent an unsound trait impl from being emitted. +/// Requiring a safety comment reduces the likelihood of emitting an unsound +/// impl in this case, and also provides useful documentation for readers of the +/// code. +/// +/// Finally, if a `TryFromBytes::is_bit_valid` impl is provided, it must adhere +/// to the safety preconditions of [`unsafe_impl!`]. +/// +/// ## Example +/// +/// ```rust,ignore +/// // Note that these derives are gated by `feature = "derive"` +/// #[cfg_attr(any(feature = "derive", test), derive(FromZeros, FromBytes, IntoBytes, Unaligned))] +/// #[repr(transparent)] +/// struct Wrapper(T); +/// +/// const _: () = unsafe { +/// /// SAFETY: +/// /// `Wrapper` is `repr(transparent)`, so it is sound to implement any +/// /// zerocopy trait if `T` implements that trait. +/// impl_or_verify!(T: FromZeros => FromZeros for Wrapper); +/// impl_or_verify!(T: FromBytes => FromBytes for Wrapper); +/// impl_or_verify!(T: IntoBytes => IntoBytes for Wrapper); +/// impl_or_verify!(T: Unaligned => Unaligned for Wrapper); +/// } +/// ``` +#[cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE, macro_export)] // Used in `doctests.rs` +macro_rules! impl_or_verify { + // The following two match arms follow the same pattern as their + // counterparts in `unsafe_impl!`; see the documentation on those arms for + // more details. + ( + const $constname:ident : $constty:ident $(,)? + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty + ) => { + impl_or_verify!(@impl { unsafe_impl!( + const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty + ); }); + impl_or_verify!(@verify $trait, { + impl Subtrait for $ty {} + }); + }; + ( + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)? + ) => { + impl_or_verify!(@impl { unsafe_impl!( + $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty + $(; |$candidate| $is_bit_valid)? + ); }); + impl_or_verify!(@verify $trait, { + impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + }); + }; + (@impl $impl_block:tt) => { + #[cfg(not(any(feature = "derive", test)))] + { $impl_block }; + }; + (@verify $trait:ident, $impl_block:tt) => { + #[cfg(any(feature = "derive", test))] + { + // On some toolchains, `Subtrait` triggers the `dead_code` lint + // because it is implemented but never used. + #[allow(dead_code)] + trait Subtrait: $trait {} + $impl_block + }; + }; +} + +/// Implements `KnownLayout` for a sized type. +macro_rules! impl_known_layout { + ($(const $constvar:ident : $constty:ty, $tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => { + $(impl_known_layout!(@inner const $constvar: $constty, $tyvar $(: ?$optbound)? => $ty);)* + }; + ($($tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => { + $(impl_known_layout!(@inner , $tyvar $(: ?$optbound)? => $ty);)* + }; + ($($(#[$attrs:meta])* $ty:ty),*) => { $(impl_known_layout!(@inner , => $(#[$attrs])* $ty);)* }; + (@inner $(const $constvar:ident : $constty:ty)? , $($tyvar:ident $(: ?$optbound:ident)?)? => $(#[$attrs:meta])* $ty:ty) => { + const _: () = { + use core::ptr::NonNull; + + #[allow(non_local_definitions)] + $(#[$attrs])* + // SAFETY: Delegates safety to `DstLayout::for_type`. + unsafe impl<$($tyvar $(: ?$optbound)?)? $(, const $constvar : $constty)?> KnownLayout for $ty { + #[allow(clippy::missing_inline_in_public_items)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {} + + type PointerMetadata = (); + + // SAFETY: `CoreMaybeUninit::LAYOUT` and `T::LAYOUT` are + // identical because `CoreMaybeUninit` has the same size and + // alignment as `T` [1], and `CoreMaybeUninit` admits + // uninitialized bytes in all positions. + // + // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: + // + // `MaybeUninit` is guaranteed to have the same size, + // alignment, and ABI as `T` + type MaybeUninit = core::mem::MaybeUninit; + + const LAYOUT: crate::DstLayout = crate::DstLayout::for_type::<$ty>(); + + // SAFETY: `.cast` preserves address and provenance. + // + // FIXME(#429): Add documentation to `.cast` that promises that + // it preserves provenance. + #[inline(always)] + fn raw_from_ptr_len(bytes: NonNull, _meta: ()) -> NonNull { + bytes.cast::() + } + + #[inline(always)] + fn pointer_to_metadata(_ptr: *mut Self) -> () { + } + } + }; + }; +} + +/// Implements `KnownLayout` for a type in terms of the implementation of +/// another type with the same representation. +/// +/// # Safety +/// +/// - `$ty` and `$repr` must have the same: +/// - Fixed prefix size +/// - Alignment +/// - (For DSTs) trailing slice element size +/// - It must be valid to perform an `as` cast from `*mut $repr` to `*mut $ty`, +/// and this operation must preserve referent size (ie, `size_of_val_raw`). +macro_rules! unsafe_impl_known_layout { + ($($tyvar:ident: ?Sized + KnownLayout =>)? #[repr($repr:ty)] $ty:ty) => {{ + use core::ptr::NonNull; + + crate::util::macros::__unsafe(); + + #[allow(non_local_definitions)] + // SAFETY: The caller promises that this is sound. + unsafe impl<$($tyvar: ?Sized + KnownLayout)?> KnownLayout for $ty { + #[allow(clippy::missing_inline_in_public_items, dead_code)] + #[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))] + fn only_derive_is_allowed_to_implement_this_trait() {} + + type PointerMetadata = <$repr as KnownLayout>::PointerMetadata; + type MaybeUninit = <$repr as KnownLayout>::MaybeUninit; + + const LAYOUT: DstLayout = <$repr as KnownLayout>::LAYOUT; + + // SAFETY: All operations preserve address and provenance. Caller + // has promised that the `as` cast preserves size. + // + // FIXME(#429): Add documentation to `NonNull::new_unchecked` that + // it preserves provenance. + #[inline(always)] + fn raw_from_ptr_len(bytes: NonNull, meta: <$repr as KnownLayout>::PointerMetadata) -> NonNull { + #[allow(clippy::as_conversions)] + let ptr = <$repr>::raw_from_ptr_len(bytes, meta).as_ptr() as *mut Self; + // SAFETY: `ptr` was converted from `bytes`, which is non-null. + unsafe { NonNull::new_unchecked(ptr) } + } + + #[inline(always)] + fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata { + #[allow(clippy::as_conversions)] + let ptr = ptr as *mut $repr; + <$repr>::pointer_to_metadata(ptr) + } + } + }}; +} + +/// Uses `align_of` to confirm that a type or set of types have alignment 1. +/// +/// Note that `align_of` requires `T: Sized`, so this macro doesn't work for +/// unsized types. +macro_rules! assert_unaligned { + ($($tys:ty),*) => { + $( + // We only compile this assertion under `cfg(test)` to avoid taking + // an extra non-dev dependency (and making this crate more expensive + // to compile for our dependents). + #[cfg(test)] + static_assertions::const_assert_eq!(core::mem::align_of::<$tys>(), 1); + )* + }; +} + +/// Emits a function definition as either `const fn` or `fn` depending on +/// whether the current toolchain version supports `const fn` with generic trait +/// bounds. +macro_rules! maybe_const_trait_bounded_fn { + // This case handles both `self` methods (where `self` is by value) and + // non-method functions. Each `$args` may optionally be followed by `: + // $arg_tys:ty`, which can be omitted for `self`. + ($(#[$attr:meta])* $vis:vis const fn $name:ident($($args:ident $(: $arg_tys:ty)?),* $(,)?) $(-> $ret_ty:ty)? $body:block) => { + #[cfg(not(no_zerocopy_generic_bounds_in_const_fn_1_61_0))] + $(#[$attr])* $vis const fn $name($($args $(: $arg_tys)?),*) $(-> $ret_ty)? $body + + #[cfg(no_zerocopy_generic_bounds_in_const_fn_1_61_0)] + $(#[$attr])* $vis fn $name($($args $(: $arg_tys)?),*) $(-> $ret_ty)? $body + }; +} + +/// Either panic (if the current Rust toolchain supports panicking in `const +/// fn`) or evaluate a constant that will cause an array indexing error whose +/// error message will include the format string. +/// +/// The type that this expression evaluates to must be `Copy`, or else the +/// non-panicking desugaring will fail to compile. +macro_rules! const_panic { + (@non_panic $($_arg:tt)+) => {{ + // This will type check to whatever type is expected based on the call + // site. + let panic: [_; 0] = []; + // This will always fail (since we're indexing into an array of size 0. + #[allow(unconditional_panic)] + panic[0] + }}; + ($($arg:tt)+) => {{ + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + panic!($($arg)+); + #[cfg(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] + const_panic!(@non_panic $($arg)+) + }}; +} + +/// Either assert (if the current Rust toolchain supports panicking in `const +/// fn`) or evaluate the expression and, if it evaluates to `false`, call +/// `const_panic!`. This is used in place of `assert!` in const contexts to +/// accommodate old toolchains. +macro_rules! const_assert { + ($e:expr) => {{ + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + assert!($e); + #[cfg(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] + { + let e = $e; + if !e { + let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e))); + } + } + }}; + ($e:expr, $($args:tt)+) => {{ + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + assert!($e, $($args)+); + #[cfg(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] + { + let e = $e; + if !e { + let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e), ": ", stringify!($arg)), $($args)*); + } + } + }}; +} + +/// Like `const_assert!`, but relative to `debug_assert!`. +macro_rules! const_debug_assert { + ($e:expr $(, $msg:expr)?) => {{ + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + debug_assert!($e $(, $msg)?); + #[cfg(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] + { + // Use this (rather than `#[cfg(debug_assertions)]`) to ensure that + // `$e` is always compiled even if it will never be evaluated at + // runtime. + if cfg!(debug_assertions) { + let e = $e; + if !e { + let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e) $(, ": ", $msg)?)); + } + } + } + }} +} + +/// Either invoke `unreachable!()` or `loop {}` depending on whether the Rust +/// toolchain supports panicking in `const fn`. +macro_rules! const_unreachable { + () => {{ + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + unreachable!(); + + #[cfg(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] + loop {} + }}; +} + +/// Asserts at compile time that `$condition` is true for `Self` or the given +/// `$tyvar`s. Unlike `const_assert`, this is *strictly* a compile-time check; +/// it cannot be evaluated in a runtime context. The condition is checked after +/// monomorphization and, upon failure, emits a compile error. +macro_rules! static_assert { + (Self $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )? => $condition:expr $(, $args:tt)*) => {{ + trait StaticAssert { + const ASSERT: bool; + } + + impl StaticAssert for T { + const ASSERT: bool = { + const_assert!($condition $(, $args)*); + $condition + }; + } + + const_assert!(::ASSERT); + }}; + ($($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* => $condition:expr $(, $args:tt)*) => {{ + trait StaticAssert { + const ASSERT: bool; + } + + // NOTE: We use `PhantomData` so we can support unsized types. + impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?,)*> StaticAssert for ($(core::marker::PhantomData<$tyvar>,)*) { + const ASSERT: bool = { + const_assert!($condition $(, $args)*); + $condition + }; + } + + const_assert!(<($(core::marker::PhantomData<$tyvar>,)*) as StaticAssert>::ASSERT); + }}; +} + +/// Assert at compile time that `tyvar` does not have a zero-sized DST +/// component. +macro_rules! static_assert_dst_is_not_zst { + ($tyvar:ident) => {{ + use crate::KnownLayout; + static_assert!($tyvar: ?Sized + KnownLayout => { + let dst_is_zst = match $tyvar::LAYOUT.size_info { + crate::SizeInfo::Sized { .. } => false, + crate::SizeInfo::SliceDst(TrailingSliceLayout { elem_size, .. }) => { + elem_size == 0 + } + }; + !dst_is_zst + }, "cannot call this method on a dynamically-sized type whose trailing slice element is zero-sized"); + }} +} + +/// Defines a named [`Cast`] implementation. +/// +/// # Safety +/// +/// The caller must ensure that, given `src: *mut $src`, `src as *mut $dst` is a +/// size-preserving or size-shrinking cast. +/// +/// [`Cast`]: crate::pointer::cast::Cast +#[macro_export] +#[doc(hidden)] +macro_rules! define_cast { + // We require the caller to provide an `unsafe` block as part of the input + // syntax since a call to `define_cast!` is useless inside of an `unsafe` + // block (since it would introduce a type which can't be named outside of + // the context of that block). + (unsafe { $vis:vis $name:ident $(<$tyvar:ident $(: ?$optbound:ident)?>)? = $src:ty => $dst:ty }) => { + #[allow(missing_debug_implementations, missing_copy_implementations, unreachable_pub)] + $vis enum $name {} + + // SAFETY: The caller promises that `src as *mut $src` is a size- + // preserving or size-shrinking cast. All operations preserve + // provenance. + unsafe impl $(<$tyvar $(: ?$optbound)?>)? $crate::pointer::cast::Project<$src, $dst> for $name { + fn project(src: $crate::pointer::PtrInner<'_, $src>) -> *mut $dst { + #[allow(clippy::as_conversions)] + return src.as_ptr() as *mut $dst; + } + } + + // SAFETY: The impl of `Project::project` preserves referent address. + unsafe impl $(<$tyvar $(: ?$optbound)?>)? $crate::pointer::cast::Cast<$src, $dst> for $name {} + }; +} + +/// Implements `TransmuteFrom` and `SizeEq` for `T` and `$wrapper`. +/// +/// # Safety +/// +/// `T` and `$wrapper` must have the same bit validity, and must have the +/// same size in the sense of `CastExact` (specifically, both a +/// `T`-to-`$wrapper` cast and a `$wrapper`-to-`T` cast must be +/// size-preserving). +macro_rules! unsafe_impl_for_transparent_wrapper { + ($vis:vis T $(: ?$optbound:ident)? => $wrapper:ident) => {{ + crate::util::macros::__unsafe(); + + use crate::pointer::{TransmuteFrom, cast::{CastExact, TransitiveProject}, SizeEq, invariant::Valid}; + use crate::wrappers::ReadOnly; + + // SAFETY: The caller promises that `T` and `$wrapper` have the same + // bit validity. + unsafe impl TransmuteFrom for $wrapper {} + // SAFETY: See previous safety comment. + unsafe impl TransmuteFrom<$wrapper, Valid, Valid> for T {} + // SAFETY: The caller promises that a `T` to `$wrapper` cast is + // size-preserving. + define_cast!(unsafe { $vis CastToWrapper = T => $wrapper }); + // SAFETY: The caller promises that a `T` to `$wrapper` cast is + // size-preserving. + unsafe impl CastExact> for CastToWrapper {} + // SAFETY: The caller promises that a `$wrapper` to `T` cast is + // size-preserving. + define_cast!(unsafe { $vis CastFromWrapper = $wrapper => T }); + // SAFETY: The caller promises that a `$wrapper` to `T` cast is + // size-preserving. + unsafe impl CastExact<$wrapper, T> for CastFromWrapper {} + + impl SizeEq for $wrapper { + type CastFrom = CastToWrapper; + } + impl SizeEq<$wrapper> for T { + type CastFrom = CastFromWrapper; + } + + impl SizeEq> for $wrapper { + type CastFrom = TransitiveProject< + T, + >>::CastFrom, + CastToWrapper, + >; + } + impl SizeEq<$wrapper> for ReadOnly { + type CastFrom = TransitiveProject< + T, + CastFromWrapper, + as SizeEq>::CastFrom, + >; + } + + impl SizeEq> for ReadOnly<$wrapper> { + type CastFrom = TransitiveProject< + $wrapper, + <$wrapper as SizeEq>>::CastFrom, + > as SizeEq<$wrapper>>::CastFrom, + >; + } + impl SizeEq>> for ReadOnly { + type CastFrom = TransitiveProject< + $wrapper, + <$wrapper as SizeEq>>>::CastFrom, + as SizeEq<$wrapper>>::CastFrom, + >; + } + }}; +} + +macro_rules! impl_transitive_transmute_from { + ($($tyvar:ident $(: ?$optbound:ident)?)? => $t:ty => $u:ty => $v:ty) => { + const _: () = { + use crate::pointer::{TransmuteFrom, SizeEq, invariant::Valid}; + + impl<$($tyvar $(: ?$optbound)?)?> SizeEq<$t> for $v + where + $u: SizeEq<$t>, + $v: SizeEq<$u>, + { + type CastFrom = cast::TransitiveProject< + $u, + <$u as SizeEq<$t>>::CastFrom, + <$v as SizeEq<$u>>::CastFrom + >; + } + + // SAFETY: Since `$u: TransmuteFrom<$t, Valid, Valid>`, it is sound + // to transmute a bit-valid `$t` to a bit-valid `$u`. Since `$v: + // TransmuteFrom<$u, Valid, Valid>`, it is sound to transmute that + // bit-valid `$u` to a bit-valid `$v`. + unsafe impl<$($tyvar $(: ?$optbound)?)?> TransmuteFrom<$t, Valid, Valid> for $v + where + $u: TransmuteFrom<$t, Valid, Valid>, + $v: TransmuteFrom<$u, Valid, Valid>, + {} + }; + }; +} + +/// A no-op `unsafe fn` for use in macro expansions. +/// +/// Calling this function in a macro expansion ensures that the macro's caller +/// must wrap the call in `unsafe { ... }`. +#[inline(always)] +pub(crate) const unsafe fn __unsafe() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/mod.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ccc5166fdde1b653f53bc89efa60708347d4c2c2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/util/mod.rs @@ -0,0 +1,919 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +pub(crate) mod macros; + +#[doc(hidden)] +pub mod macro_util; + +use core::{ + marker::PhantomData, + mem::{self, ManuallyDrop}, + num::NonZeroUsize, + ptr::NonNull, +}; + +use super::*; +use crate::pointer::{ + invariant::{Exclusive, Shared, Valid}, + SizeEq, TransmuteFromPtr, +}; + +/// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the +/// wrapped `T` is. +pub(crate) struct SendSyncPhantomData(PhantomData); + +// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound +// to be called from multiple threads. +unsafe impl Send for SendSyncPhantomData {} +// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound +// to be called from multiple threads. +unsafe impl Sync for SendSyncPhantomData {} + +impl Default for SendSyncPhantomData { + fn default() -> SendSyncPhantomData { + SendSyncPhantomData(PhantomData) + } +} + +impl PartialEq for SendSyncPhantomData { + fn eq(&self, _other: &Self) -> bool { + true + } +} + +impl Eq for SendSyncPhantomData {} + +impl Clone for SendSyncPhantomData { + fn clone(&self) -> Self { + SendSyncPhantomData(PhantomData) + } +} + +#[cfg(miri)] +extern "Rust" { + /// Miri-provided intrinsic that marks the pointer `ptr` as aligned to + /// `align`. + /// + /// This intrinsic is used to inform Miri's symbolic alignment checker that + /// a pointer is aligned, even if Miri cannot statically deduce that fact. + /// This is often required when performing raw pointer arithmetic or casts + /// where the alignment is guaranteed by runtime checks or invariants that + /// Miri is not aware of. + pub(crate) fn miri_promise_symbolic_alignment(ptr: *const (), align: usize); +} + +pub(crate) trait AsAddress { + fn addr(self) -> usize; +} + +impl AsAddress for &T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +impl AsAddress for &mut T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +impl AsAddress for NonNull { + #[inline(always)] + fn addr(self) -> usize { + AsAddress::addr(self.as_ptr()) + } +} + +impl AsAddress for *const T { + #[inline(always)] + fn addr(self) -> usize { + // FIXME(#181), FIXME(https://github.com/rust-lang/rust/issues/95228): + // Use `.addr()` instead of `as usize` once it's stable, and get rid of + // this `allow`. Currently, `as usize` is the only way to accomplish + // this. + #[allow(clippy::as_conversions)] + #[cfg_attr( + __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, + allow(lossy_provenance_casts) + )] + return self.cast::<()>() as usize; + } +} + +impl AsAddress for *mut T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +/// Validates that `t` is aligned to `align_of::()`. +#[inline(always)] +pub(crate) fn validate_aligned_to(t: T) -> Result<(), AlignmentError<(), U>> { + // `mem::align_of::()` is guaranteed to return a non-zero value, which in + // turn guarantees that this mod operation will not panic. + #[allow(clippy::arithmetic_side_effects)] + let remainder = t.addr() % mem::align_of::(); + if remainder == 0 { + Ok(()) + } else { + // SAFETY: We just confirmed that `t.addr() % align_of::() != 0`. + // That's only possible if `align_of::() > 1`. + Err(unsafe { AlignmentError::new_unchecked(()) }) + } +} + +/// Returns the bytes needed to pad `len` to the next multiple of `align`. +/// +/// This function assumes that align is a power of two; there are no guarantees +/// on the answer it gives if this is not the case. +#[cfg_attr( + kani, + kani::requires(len <= isize::MAX as usize), + kani::requires(align.is_power_of_two()), + kani::ensures(|&p| (len + p) % align.get() == 0), + // Ensures that we add the minimum required padding. + kani::ensures(|&p| p < align.get()), +)] +pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { + #[cfg(kani)] + #[kani::proof_for_contract(padding_needed_for)] + fn proof() { + padding_needed_for(kani::any(), kani::any()); + } + + // Abstractly, we want to compute: + // align - (len % align). + // Handling the case where len%align is 0. + // Because align is a power of two, len % align = len & (align-1). + // Guaranteed not to underflow as align is nonzero. + #[allow(clippy::arithmetic_side_effects)] + let mask = align.get() - 1; + + // To efficiently subtract this value from align, we can use the bitwise + // complement. + // Note that ((!len) & (align-1)) gives us a number that with (len & + // (align-1)) sums to align-1. So subtracting 1 from x before taking the + // complement subtracts `len` from `align`. Some quick inspection of + // cases shows that this also handles the case where `len % align = 0` + // correctly too: len-1 % align then equals align-1, so the complement mod + // align will be 0, as desired. + // + // The following reasoning can be verified quickly by an SMT solver + // supporting the theory of bitvectors: + // ```smtlib + // ; Naive implementation of padding + // (define-fun padding1 ( + // (len (_ BitVec 32)) + // (align (_ BitVec 32))) (_ BitVec 32) + // (ite + // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32)))) + // (_ bv0 32) + // (bvsub align (bvand len (bvsub align (_ bv1 32)))))) + // + // ; The implementation below + // (define-fun padding2 ( + // (len (_ BitVec 32)) + // (align (_ BitVec 32))) (_ BitVec 32) + // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32)))) + // + // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool + // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32))))) + // + // (declare-const len (_ BitVec 32)) + // (declare-const align (_ BitVec 32)) + // ; Search for a case where align is a power of two and padding2 disagrees + // ; with padding1 + // (assert (and (is-power-of-two align) + // (not (= (padding1 len align) (padding2 len align))))) + // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20 + // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20 + // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30 + // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30 + // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0 + // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0 + // (check-sat) ; unsat, also works for 64-bit bitvectors + // ``` + !(len.wrapping_sub(1)) & mask +} + +/// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align +/// == 0`. +/// +/// # Panics +/// +/// May panic if `align` is not a power of two. Even if it doesn't panic in this +/// case, it will produce nonsense results. +#[inline(always)] +#[cfg_attr( + kani, + kani::requires(align.is_power_of_two()), + kani::ensures(|&m| m <= n && m % align.get() == 0), + // Guarantees that `m` is the *largest* value such that `m % align == 0`. + kani::ensures(|&m| { + // If this `checked_add` fails, then the next multiple would wrap + // around, which trivially satisfies the "largest value" requirement. + m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true) + }) +)] +pub(crate) const fn round_down_to_next_multiple_of_alignment( + n: usize, + align: NonZeroUsize, +) -> usize { + #[cfg(kani)] + #[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)] + fn proof() { + round_down_to_next_multiple_of_alignment(kani::any(), kani::any()); + } + + let align = align.get(); + #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))] + debug_assert!(align.is_power_of_two()); + + // Subtraction can't underflow because `align.get() >= 1`. + #[allow(clippy::arithmetic_side_effects)] + let mask = !(align - 1); + n & mask +} + +pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { + if a.get() < b.get() { + b + } else { + a + } +} + +pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { + if a.get() > b.get() { + b + } else { + a + } +} + +/// Copies `src` into the prefix of `dst`. +/// +/// # Safety +/// +/// The caller guarantees that `src.len() <= dst.len()`. +#[inline(always)] +pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) { + debug_assert!(src.len() <= dst.len()); + // SAFETY: This invocation satisfies the safety contract of + // copy_nonoverlapping [1]: + // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes + // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the + // caller has promised that `src.len() <= dst.len()` + // - `src` and `dst` are, trivially, properly aligned + // - the region of memory beginning at `src` with a size of `src.len()` + // bytes does not overlap with the region of memory beginning at `dst` + // with the same size, because `dst` is derived from an exclusive + // reference. + unsafe { + core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len()); + }; +} + +/// Unsafely transmutes the given `src` into a type `Dst`. +/// +/// # Safety +/// +/// The value `src` must be a valid instance of `Dst`. +#[inline(always)] +pub(crate) const unsafe fn transmute_unchecked(src: Src) -> Dst { + static_assert!(Src, Dst => core::mem::size_of::() == core::mem::size_of::()); + + #[repr(C)] + union Transmute { + src: ManuallyDrop, + dst: ManuallyDrop, + } + + // SAFETY: Since `Transmute` is `#[repr(C)]`, its `src` and `dst` + // fields both start at the same offset and the types of those fields are + // transparent wrappers around `Src` and `Dst` [1]. Consequently, + // initializing `Transmute` with with `src` and then reading out `dst` is + // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` + // to `Dst` is valid because — by contract on the caller — `src` is a valid + // instance of `Dst`. + // + // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: + // + // `ManuallyDrop` is guaranteed to have the same layout and bit + // validity as `T`, and is subject to the same layout optimizations as + // `T`. + // + // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: + // + // Effectively, writing to and then reading from a union with the C + // representation is analogous to a transmute from the type used for + // writing to the type used for reading. + unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } +} + +/// # Safety +/// +/// `Src` must have a greater or equal alignment to `Dst`. +pub(crate) unsafe fn transmute_ref(src: &Src) -> &Dst +where + Src: ?Sized, + Dst: SizeEq + + TransmuteFromPtr>::CastFrom, R> + + ?Sized, +{ + let dst = Ptr::from_ref(src).transmute(); + // SAFETY: The caller promises that `Src`'s alignment is at least as large + // as `Dst`'s alignment. + let dst = unsafe { dst.assume_alignment() }; + dst.as_ref() +} + +/// # Safety +/// +/// `Src` must have a greater or equal alignment to `Dst`. +pub(crate) unsafe fn transmute_mut(src: &mut Src) -> &mut Dst +where + Src: ?Sized, + Dst: SizeEq + + TransmuteFromPtr>::CastFrom, R> + + ?Sized, +{ + let dst = Ptr::from_mut(src).transmute(); + // SAFETY: The caller promises that `Src`'s alignment is at least as large + // as `Dst`'s alignment. + let dst = unsafe { dst.assume_alignment() }; + dst.as_mut() +} + +/// Uses `allocate` to create a `Box`. +/// +/// # Errors +/// +/// Returns an error on allocation failure. Allocation failure is guaranteed +/// never to cause a panic or an abort. +/// +/// # Safety +/// +/// `allocate` must be either `alloc::alloc::alloc` or +/// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box` +/// has the same bit-validity as the referent of the pointer returned by the +/// given `allocate` and sufficient size to store `T` with `meta`. +#[must_use = "has no side effects (other than allocation)"] +#[cfg(feature = "alloc")] +#[inline] +pub(crate) unsafe fn new_box( + meta: T::PointerMetadata, + allocate: unsafe fn(core::alloc::Layout) -> *mut u8, +) -> Result, AllocError> +where + T: ?Sized + crate::KnownLayout, +{ + let size = match T::size_for_metadata(meta) { + Some(size) => size, + None => return Err(AllocError), + }; + + let align = T::LAYOUT.align.get(); + // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in + // which sufficiently-large allocations (those which, when rounded up to the + // alignment, overflow `isize`) are not rejected, which can cause undefined + // behavior. See #64 for details. + // + // FIXME(#67): Once our MSRV is > 1.64.0, remove this assertion. + #[allow(clippy::as_conversions)] + let max_alloc = (isize::MAX as usize).saturating_sub(align); + if size > max_alloc { + return Err(AllocError); + } + + // FIXME(https://github.com/rust-lang/rust/issues/55724): Use + // `Layout::repeat` once it's stabilized. + let layout = Layout::from_size_align(size, align).or(Err(AllocError))?; + + let ptr = if layout.size() != 0 { + // SAFETY: By contract on the caller, `allocate` is either + // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above + // check ensures their shared safety precondition: that the supplied + // layout is not zero-sized type [1]. + // + // [1] Per https://doc.rust-lang.org/1.81.0/std/alloc/trait.GlobalAlloc.html#tymethod.alloc: + // + // This function is unsafe because undefined behavior can result if + // the caller does not ensure that layout has non-zero size. + let ptr = unsafe { allocate(layout) }; + match NonNull::new(ptr) { + Some(ptr) => ptr, + None => return Err(AllocError), + } + } else { + let align = T::LAYOUT.align.get(); + + // We use `transmute` instead of an `as` cast since Miri (with strict + // provenance enabled) notices and complains that an `as` cast creates a + // pointer with no provenance. Miri isn't smart enough to realize that + // we're only executing this branch when we're constructing a zero-sized + // `Box`, which doesn't require provenance. + // + // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All + // bits of a `usize` are initialized. + // + // `#[allow(unknown_lints)]` is for `integer_to_ptr_transmutes` + #[allow(unknown_lints)] + #[allow(clippy::useless_transmute, integer_to_ptr_transmutes)] + let dangling = unsafe { mem::transmute::(align) }; + // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a + // `NonZeroUsize`, which is guaranteed to be non-zero. + // + // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is + // zero, but it does require a non-null dangling pointer for its + // allocation. + // + // FIXME(https://github.com/rust-lang/rust/issues/95228): Use + // `std::ptr::without_provenance` once it's stable. That may optimize + // better. As written, Rust may assume that this consumes "exposed" + // provenance, and thus Rust may have to assume that this may consume + // provenance from any pointer whose provenance has been exposed. + unsafe { NonNull::new_unchecked(dangling) } + }; + + let ptr = T::raw_from_ptr_len(ptr, meta); + + // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to + // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST + // case (in which we manually construct a dangling pointer) and to justify + // why `Box` is safe to drop (it's because `allocate` uses the system + // allocator). + #[allow(clippy::undocumented_unsafe_blocks)] + Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) }) +} + +mod len_of { + use super::*; + + /// A witness type for metadata of a valid instance of `&T`. + pub(crate) struct MetadataOf { + /// # Safety + /// + /// The size of an instance of `&T` with the given metadata is not + /// larger than `isize::MAX`. + meta: T::PointerMetadata, + _p: PhantomData, + } + + impl Copy for MetadataOf {} + impl Clone for MetadataOf { + fn clone(&self) -> Self { + *self + } + } + + impl MetadataOf + where + T: KnownLayout, + { + /// Returns `None` if `meta` is greater than `t`'s metadata. + #[inline(always)] + pub(crate) fn new_in_bounds(t: &T, meta: usize) -> Option + where + T: KnownLayout, + { + if meta <= Ptr::from_ref(t).len() { + // SAFETY: We have checked that `meta` is not greater than `t`'s + // metadata, which, by invariant on `&T`, addresses no more than + // `isize::MAX` bytes [1][2]. + // + // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety: + // + // For all types, `T: ?Sized`, and for all `t: &T` or `t: + // &mut T`, when such values cross an API boundary, the + // following invariants must generally be upheld: + // + // * `t` is non-null + // * `t` is aligned to `align_of_val(t)` + // * if `size_of_val(t) > 0`, then `t` is dereferenceable for + // `size_of_val(t)` many bytes + // + // If `t` points at address `a`, being "dereferenceable" for + // N bytes means that the memory range `[a, a + N)` is all + // contained within a single allocated object. + // + // [2] Per https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object: + // + // For any allocated object with `base` address, `size`, and + // a set of `addresses`, the following are guaranteed: + // - For all addresses `a` in `addresses`, `a` is in the + // range `base .. (base + size)` (note that this requires + // `a < base + size`, not `a <= base + size`) + // - `base` is not equal to [`null()`] (i.e., the address + // with the numerical value 0) + // - `base + size <= usize::MAX` + // - `size <= isize::MAX` + Some(unsafe { Self::new_unchecked(meta) }) + } else { + None + } + } + + /// # Safety + /// + /// The size of an instance of `&T` with the given metadata is not + /// larger than `isize::MAX`. + pub(crate) unsafe fn new_unchecked(meta: T::PointerMetadata) -> Self { + // SAFETY: The caller has promised that the size of an instance of + // `&T` with the given metadata is not larger than `isize::MAX`. + Self { meta, _p: PhantomData } + } + + pub(crate) fn get(&self) -> T::PointerMetadata + where + T::PointerMetadata: Copy, + { + self.meta + } + + #[inline] + pub(crate) fn padding_needed_for(&self) -> usize + where + T: KnownLayout, + { + let trailing_slice_layout = crate::trailing_slice_layout::(); + + // FIXME(#67): Remove this allow. See NumExt for more details. + #[allow( + unstable_name_collisions, + clippy::incompatible_msrv, + clippy::multiple_unsafe_ops_per_block + )] + // SAFETY: By invariant on `self`, a `&T` with metadata `self.meta` + // describes an object of size `<= isize::MAX`. This computes the + // size of such a `&T` without any trailing padding, and so neither + // the multiplication nor the addition will overflow. + let unpadded_size = unsafe { + let trailing_size = self.meta.unchecked_mul(trailing_slice_layout.elem_size); + trailing_size.unchecked_add(trailing_slice_layout.offset) + }; + + util::padding_needed_for(unpadded_size, T::LAYOUT.align) + } + + #[inline(always)] + pub(crate) fn validate_cast_and_convert_metadata( + addr: usize, + bytes_len: MetadataOf<[u8]>, + cast_type: CastType, + meta: Option, + ) -> Result<(MetadataOf, MetadataOf<[u8]>), MetadataCastError> { + let layout = match meta { + None => T::LAYOUT, + // This can return `None` if the metadata describes an object + // which can't fit in an `isize`. + Some(meta) => { + let size = match T::size_for_metadata(meta) { + Some(size) => size, + None => return Err(MetadataCastError::Size), + }; + DstLayout { + align: T::LAYOUT.align, + size_info: crate::SizeInfo::Sized { size }, + statically_shallow_unpadded: false, + } + } + }; + // Lemma 0: By contract on `validate_cast_and_convert_metadata`, if + // the result is `Ok(..)`, then a `&T` with `elems` trailing slice + // elements is no larger in size than `bytes_len.get()`. + let (elems, split_at) = + layout.validate_cast_and_convert_metadata(addr, bytes_len.get(), cast_type)?; + let elems = T::PointerMetadata::from_elem_count(elems); + + // For a slice DST type, if `meta` is `Some(elems)`, then we + // synthesize `layout` to describe a sized type whose size is equal + // to the size of the instance that we are asked to cast. For sized + // types, `validate_cast_and_convert_metadata` returns `elems == 0`. + // Thus, in this case, we need to use the `elems` passed by the + // caller, not the one returned by + // `validate_cast_and_convert_metadata`. + // + // Lemma 1: A `&T` with `elems` trailing slice elements is no larger + // in size than `bytes_len.get()`. Proof: + // - If `meta` is `None`, then `elems` satisfies this condition by + // Lemma 0. + // - If `meta` is `Some(meta)`, then `layout` describes an object + // whose size is equal to the size of an `&T` with `meta` + // metadata. By Lemma 0, that size is not larger than + // `bytes_len.get()`. + // + // Lemma 2: A `&T` with `elems` trailing slice elements is no larger + // than `isize::MAX` bytes. Proof: By Lemma 1, a `&T` with metadata + // `elems` is not larger in size than `bytes_len.get()`. By + // invariant on `MetadataOf<[u8]>`, a `&[u8]` with metadata + // `bytes_len` is not larger than `isize::MAX`. Because + // `size_of::()` is `1`, a `&[u8]` with metadata `bytes_len` has + // size `bytes_len.get()` bytes. Therefore, a `&T` with metadata + // `elems` has size not larger than `isize::MAX`. + let elems = meta.unwrap_or(elems); + + // SAFETY: See Lemma 2. + let elems = unsafe { MetadataOf::new_unchecked(elems) }; + + // SAFETY: Let `size` be the size of a `&T` with metadata `elems`. + // By post-condition on `validate_cast_and_convert_metadata`, one of + // the following conditions holds: + // - `split_at == size`, in which case, by Lemma 2, `split_at <= + // isize::MAX`. Since `size_of::() == 1`, a `[u8]` with + // `split_at` elems has size not larger than `isize::MAX`. + // - `split_at == bytes_len - size`. Since `bytes_len: + // MetadataOf`, and since `size` is non-negative, `split_at` + // addresses no more bytes than `bytes_len` does. Since + // `bytes_len: MetadataOf`, `bytes_len` describes a `[u8]` + // which has no more than `isize::MAX` bytes, and thus so does + // `split_at`. + let split_at = unsafe { MetadataOf::<[u8]>::new_unchecked(split_at) }; + Ok((elems, split_at)) + } + } +} + +pub(crate) use len_of::MetadataOf; + +/// Since we support multiple versions of Rust, there are often features which +/// have been stabilized in the most recent stable release which do not yet +/// exist (stably) on our MSRV. This module provides polyfills for those +/// features so that we can write more "modern" code, and just remove the +/// polyfill once our MSRV supports the corresponding feature. Without this, +/// we'd have to write worse/more verbose code and leave FIXME comments +/// sprinkled throughout the codebase to update to the new pattern once it's +/// stabilized. +/// +/// Each trait is imported as `_` at the crate root; each polyfill should "just +/// work" at usage sites. +pub(crate) mod polyfills { + use core::ptr::{self, NonNull}; + + // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our + // MSRV is 1.70, when that function was stabilized. + // + // The `#[allow(unused)]` is necessary because, on sufficiently recent + // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent + // method rather than to this trait, and so this trait is considered unused. + // + // FIXME(#67): Once our MSRV is 1.70, remove this. + #[allow(unused)] + pub(crate) trait NonNullExt { + fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; + } + + impl NonNullExt for NonNull { + // NOTE on coverage: this will never be tested in nightly since it's a + // polyfill for a feature which has been stabilized on our nightly + // toolchain. + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + #[inline(always)] + fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { + let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); + // SAFETY: `ptr` is converted from `data`, which is non-null. + unsafe { NonNull::new_unchecked(ptr) } + } + } + + // A polyfill for `Self::unchecked_sub` that we can use until methods like + // `usize::unchecked_sub` is stabilized. + // + // The `#[allow(unused)]` is necessary because, on sufficiently recent + // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent + // method rather than to this trait, and so this trait is considered unused. + // + // FIXME(#67): Once our MSRV is high enough, remove this. + #[allow(unused)] + pub(crate) trait NumExt { + /// Add without checking for overflow. + /// + /// # Safety + /// + /// The caller promises that the addition will not overflow. + unsafe fn unchecked_add(self, rhs: Self) -> Self; + + /// Subtract without checking for underflow. + /// + /// # Safety + /// + /// The caller promises that the subtraction will not underflow. + unsafe fn unchecked_sub(self, rhs: Self) -> Self; + + /// Multiply without checking for overflow. + /// + /// # Safety + /// + /// The caller promises that the multiplication will not overflow. + unsafe fn unchecked_mul(self, rhs: Self) -> Self; + } + + // NOTE on coverage: these will never be tested in nightly since they're + // polyfills for a feature which has been stabilized on our nightly + // toolchain. + impl NumExt for usize { + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + #[inline(always)] + unsafe fn unchecked_add(self, rhs: usize) -> usize { + match self.checked_add(rhs) { + Some(x) => x, + None => { + // SAFETY: The caller promises that the addition will not + // underflow. + unsafe { core::hint::unreachable_unchecked() } + } + } + } + + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + #[inline(always)] + unsafe fn unchecked_sub(self, rhs: usize) -> usize { + match self.checked_sub(rhs) { + Some(x) => x, + None => { + // SAFETY: The caller promises that the subtraction will not + // underflow. + unsafe { core::hint::unreachable_unchecked() } + } + } + } + + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + #[inline(always)] + unsafe fn unchecked_mul(self, rhs: usize) -> usize { + match self.checked_mul(rhs) { + Some(x) => x, + None => { + // SAFETY: The caller promises that the multiplication will + // not overflow. + unsafe { core::hint::unreachable_unchecked() } + } + } + } + } +} + +#[cfg(test)] +pub(crate) mod testutil { + use crate::*; + + /// A `T` which is aligned to at least `align_of::()`. + #[derive(Default)] + pub(crate) struct Align { + pub(crate) t: T, + _a: [A; 0], + } + + impl Align { + pub(crate) fn set_default(&mut self) { + self.t = T::default(); + } + } + + impl Align { + pub(crate) const fn new(t: T) -> Align { + Align { t, _a: [] } + } + } + + /// A `T` which is guaranteed not to satisfy `align_of::()`. + /// + /// It must be the case that `align_of::() < align_of::()` in order + /// for this type to work properly. + #[repr(C)] + pub(crate) struct ForceUnalign { + // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is + // placed at the minimum offset that guarantees its alignment. If + // `align_of::() < align_of::()`, then that offset will be + // guaranteed *not* to satisfy `align_of::()`. + // + // Note that we need `T: Unaligned` in order to guarantee that there is + // no padding between `_u` and `t`. + _u: u8, + pub(crate) t: T, + _a: [A; 0], + } + + impl ForceUnalign { + pub(crate) fn new(t: T) -> ForceUnalign { + ForceUnalign { _u: 0, t, _a: [] } + } + } + // A `u64` with alignment 8. + // + // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By + // contrast, `AU64` is guaranteed to have alignment 8 on all platforms. + #[derive( + KnownLayout, + Immutable, + FromBytes, + IntoBytes, + Eq, + PartialEq, + Ord, + PartialOrd, + Default, + Debug, + Copy, + Clone, + )] + #[repr(C, align(8))] + pub(crate) struct AU64(pub(crate) u64); + + impl AU64 { + // Converts this `AU64` to bytes using this platform's endianness. + pub(crate) fn to_bytes(self) -> [u8; 8] { + crate::transmute!(self) + } + } + + impl Display for AU64 { + #[cfg_attr( + all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + coverage(off) + )] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_round_down_to_next_multiple_of_alignment() { + fn alt_impl(n: usize, align: NonZeroUsize) -> usize { + let mul = n / align.get(); + mul * align.get() + } + + for align in [1, 2, 4, 8, 16] { + for n in 0..256 { + let align = NonZeroUsize::new(align).unwrap(); + let want = alt_impl(n, align); + let got = round_down_to_next_multiple_of_alignment(n, align); + assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align); + } + } + } + + #[rustversion::since(1.57.0)] + #[test] + #[should_panic] + fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() { + round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap()); + } + #[test] + fn test_send_sync_phantom_data() { + let x = SendSyncPhantomData::::default(); + let y = x.clone(); + assert!(x == y); + assert!(x == SendSyncPhantomData::::default()); + } + + #[test] + #[allow(clippy::as_conversions)] + fn test_as_address() { + let x = 0u8; + let r = &x; + let mut x_mut = 0u8; + let rm = &mut x_mut; + let p = r as *const u8; + let pm = rm as *mut u8; + let nn = NonNull::new(p as *mut u8).unwrap(); + + assert_eq!(AsAddress::addr(r), p as usize); + assert_eq!(AsAddress::addr(rm), pm as usize); + assert_eq!(AsAddress::addr(p), p as usize); + assert_eq!(AsAddress::addr(pm), pm as usize); + assert_eq!(AsAddress::addr(nn), p as usize); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/wrappers.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/wrappers.rs new file mode 100644 index 0000000000000000000000000000000000000000..5f2414b0c6d265e0b2a1fa84c2eb4f5888fd4dcb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/src/wrappers.rs @@ -0,0 +1,1029 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{fmt, hash::Hash}; + +use super::*; +use crate::pointer::{invariant::Valid, SizeEq, TransmuteFrom}; + +/// A type with no alignment requirement. +/// +/// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign` +/// has the same size and bit validity as `T`, but not necessarily the same +/// alignment [or ABI]. This is useful if a type with an alignment requirement +/// needs to be read from a chunk of memory which provides no alignment +/// guarantees. +/// +/// Since `Unalign` has no alignment requirement, the inner `T` may not be +/// properly aligned in memory. There are five ways to access the inner `T`: +/// - by value, using [`get`] or [`into_inner`] +/// - by reference inside of a callback, using [`update`] +/// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can +/// fail if the `Unalign` does not satisfy `T`'s alignment requirement at +/// runtime +/// - unsafely by reference, using [`deref_unchecked`] or +/// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that +/// the `Unalign` satisfies `T`'s alignment requirement +/// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or +/// [`DerefMut::deref_mut`] +/// +/// [or ABI]: https://github.com/google/zerocopy/issues/164 +/// [`get`]: Unalign::get +/// [`into_inner`]: Unalign::into_inner +/// [`update`]: Unalign::update +/// [`try_deref`]: Unalign::try_deref +/// [`try_deref_mut`]: Unalign::try_deref_mut +/// [`deref_unchecked`]: Unalign::deref_unchecked +/// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked +/// +/// # Example +/// +/// In this example, we need `EthernetFrame` to have no alignment requirement - +/// and thus implement [`Unaligned`]. `EtherType` is `#[repr(u16)]` and so +/// cannot implement `Unaligned`. We use `Unalign` to relax `EtherType`'s +/// alignment requirement so that `EthernetFrame` has no alignment requirement +/// and can implement `Unaligned`. +/// +/// ```rust +/// use zerocopy::*; +/// # use zerocopy_derive::*; +/// # #[derive(FromBytes, KnownLayout, Immutable, Unaligned)] #[repr(C)] struct Mac([u8; 6]); +/// +/// # #[derive(PartialEq, Copy, Clone, Debug)] +/// #[derive(TryFromBytes, KnownLayout, Immutable)] +/// #[repr(u16)] +/// enum EtherType { +/// Ipv4 = 0x0800u16.to_be(), +/// Arp = 0x0806u16.to_be(), +/// Ipv6 = 0x86DDu16.to_be(), +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(TryFromBytes, KnownLayout, Immutable, Unaligned)] +/// #[repr(C)] +/// struct EthernetFrame { +/// src: Mac, +/// dst: Mac, +/// ethertype: Unalign, +/// payload: [u8], +/// } +/// +/// let bytes = &[ +/// # 0, 1, 2, 3, 4, 5, +/// # 6, 7, 8, 9, 10, 11, +/// # /* +/// ... +/// # */ +/// 0x86, 0xDD, // EtherType +/// 0xDE, 0xAD, 0xBE, 0xEF // Payload +/// ][..]; +/// +/// // PANICS: Guaranteed not to panic because `bytes` is of the right +/// // length, has the right contents, and `EthernetFrame` has no +/// // alignment requirement. +/// let packet = EthernetFrame::try_ref_from_bytes(&bytes).unwrap(); +/// +/// assert_eq!(packet.ethertype.get(), EtherType::Ipv6); +/// assert_eq!(packet.payload, [0xDE, 0xAD, 0xBE, 0xEF]); +/// ``` +/// +/// # Safety +/// +/// `Unalign` is guaranteed to have the same size and bit validity as `T`, +/// and to have [`UnsafeCell`]s covering the same byte ranges as `T`. +/// `Unalign` is guaranteed to have alignment 1. +// NOTE: This type is sound to use with types that need to be dropped. The +// reason is that the compiler-generated drop code automatically moves all +// values to aligned memory slots before dropping them in-place. This is not +// well-documented, but it's hinted at in places like [1] and [2]. However, this +// also means that `T` must be `Sized`; unless something changes, we can never +// support unsized `T`. [3] +// +// [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646 +// [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323 +// [3] https://github.com/google/zerocopy/issues/209 +#[allow(missing_debug_implementations)] +#[derive(Default, Copy)] +#[cfg_attr(any(feature = "derive", test), derive(Immutable, FromBytes, IntoBytes, Unaligned))] +#[repr(C, packed)] +pub struct Unalign(T); + +// We do not use `derive(KnownLayout)` on `Unalign`, because the derive is not +// smart enough to realize that `Unalign` is always sized and thus emits a +// `KnownLayout` impl bounded on `T: KnownLayout.` This is overly restrictive. +impl_known_layout!(T => Unalign); + +// FIXME(https://github.com/rust-lang/rust-clippy/issues/16087): Move these +// attributes below the comment once this Clippy bug is fixed. +#[cfg_attr( + all(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, any(feature = "derive", test)), + expect(unused_unsafe) +)] +#[cfg_attr( + all( + not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), + any(feature = "derive", test) + ), + allow(unused_unsafe) +)] +// SAFETY: +// - `Unalign` promises to have alignment 1, and so we don't require that `T: +// Unaligned`. +// - `Unalign` has the same bit validity as `T`, and so it is `FromZeros`, +// `FromBytes`, or `IntoBytes` exactly when `T` is as well. +// - `Immutable`: `Unalign` has the same fields as `T`, so it permits +// interior mutation exactly when `T` does. +// - `TryFromBytes`: `Unalign` has the same the same bit validity as `T`, so +// `T::is_bit_valid` is a sound implementation of `is_bit_valid`. +// +#[allow(clippy::multiple_unsafe_ops_per_block)] +const _: () = unsafe { + impl_or_verify!(T => Unaligned for Unalign); + impl_or_verify!(T: Immutable => Immutable for Unalign); + impl_or_verify!( + T: TryFromBytes => TryFromBytes for Unalign; + |c| T::is_bit_valid(c.transmute::<_, _, BecauseImmutable>()) + ); + impl_or_verify!(T: FromZeros => FromZeros for Unalign); + impl_or_verify!(T: FromBytes => FromBytes for Unalign); + impl_or_verify!(T: IntoBytes => IntoBytes for Unalign); +}; + +// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be +// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound +// is not sufficient to implement `Clone` for `Unalign`. +impl Clone for Unalign { + #[inline(always)] + fn clone(&self) -> Unalign { + *self + } +} + +impl Unalign { + /// Constructs a new `Unalign`. + #[inline(always)] + pub const fn new(val: T) -> Unalign { + Unalign(val) + } + + /// Consumes `self`, returning the inner `T`. + #[inline(always)] + pub const fn into_inner(self) -> T { + // SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same size + // and bit validity as `T`. + // + // We do this instead of just destructuring in order to prevent + // `Unalign`'s `Drop::drop` from being run, since dropping is not + // supported in `const fn`s. + // + // FIXME(https://github.com/rust-lang/rust/issues/73255): Destructure + // instead of using unsafe. + unsafe { crate::util::transmute_unchecked(self) } + } + + /// Attempts to return a reference to the wrapped `T`, failing if `self` is + /// not properly aligned. + /// + /// If `self` does not satisfy `align_of::()`, then `try_deref` returns + /// `Err`. + /// + /// If `T: Unaligned`, then `Unalign` implements [`Deref`], and callers + /// may prefer [`Deref::deref`], which is infallible. + #[inline(always)] + pub fn try_deref(&self) -> Result<&T, AlignmentError<&Self, T>> { + let inner = Ptr::from_ref(self).transmute(); + match inner.try_into_aligned() { + Ok(aligned) => Ok(aligned.as_ref()), + Err(err) => Err(err.map_src(|src| src.into_unalign().as_ref())), + } + } + + /// Attempts to return a mutable reference to the wrapped `T`, failing if + /// `self` is not properly aligned. + /// + /// If `self` does not satisfy `align_of::()`, then `try_deref` returns + /// `Err`. + /// + /// If `T: Unaligned`, then `Unalign` implements [`DerefMut`], and + /// callers may prefer [`DerefMut::deref_mut`], which is infallible. + #[inline(always)] + pub fn try_deref_mut(&mut self) -> Result<&mut T, AlignmentError<&mut Self, T>> { + let inner = Ptr::from_mut(self).transmute::<_, _, (_, (_, _))>(); + match inner.try_into_aligned() { + Ok(aligned) => Ok(aligned.as_mut()), + Err(err) => Err(err.map_src(|src| src.into_unalign().as_mut())), + } + } + + /// Returns a reference to the wrapped `T` without checking alignment. + /// + /// If `T: Unaligned`, then `Unalign` implements[ `Deref`], and callers + /// may prefer [`Deref::deref`], which is safe. + /// + /// # Safety + /// + /// The caller must guarantee that `self` satisfies `align_of::()`. + #[inline(always)] + pub const unsafe fn deref_unchecked(&self) -> &T { + // SAFETY: `Unalign` is `repr(transparent)`, so there is a valid `T` + // at the same memory location as `self`. It has no alignment guarantee, + // but the caller has promised that `self` is properly aligned, so we + // know that it is sound to create a reference to `T` at this memory + // location. + // + // We use `mem::transmute` instead of `&*self.get_ptr()` because + // dereferencing pointers is not stable in `const` on our current MSRV + // (1.56 as of this writing). + unsafe { mem::transmute(self) } + } + + /// Returns a mutable reference to the wrapped `T` without checking + /// alignment. + /// + /// If `T: Unaligned`, then `Unalign` implements[ `DerefMut`], and + /// callers may prefer [`DerefMut::deref_mut`], which is safe. + /// + /// # Safety + /// + /// The caller must guarantee that `self` satisfies `align_of::()`. + #[inline(always)] + pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T { + // SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at + // the same memory location as `self`. It has no alignment guarantee, + // but the caller has promised that `self` is properly aligned, so we + // know that the pointer itself is aligned, and thus that it is sound to + // create a reference to a `T` at this memory location. + unsafe { &mut *self.get_mut_ptr() } + } + + /// Gets an unaligned raw pointer to the inner `T`. + /// + /// # Safety + /// + /// The returned raw pointer is not necessarily aligned to + /// `align_of::()`. Most functions which operate on raw pointers require + /// those pointers to be aligned, so calling those functions with the result + /// of `get_ptr` will result in undefined behavior if alignment is not + /// guaranteed using some out-of-band mechanism. In general, the only + /// functions which are safe to call with this pointer are those which are + /// explicitly documented as being sound to use with an unaligned pointer, + /// such as [`read_unaligned`]. + /// + /// Even if the caller is permitted to mutate `self` (e.g. they have + /// ownership or a mutable borrow), it is not guaranteed to be sound to + /// write through the returned pointer. If writing is required, prefer + /// [`get_mut_ptr`] instead. + /// + /// [`read_unaligned`]: core::ptr::read_unaligned + /// [`get_mut_ptr`]: Unalign::get_mut_ptr + #[inline(always)] + pub const fn get_ptr(&self) -> *const T { + ptr::addr_of!(self.0) + } + + /// Gets an unaligned mutable raw pointer to the inner `T`. + /// + /// # Safety + /// + /// The returned raw pointer is not necessarily aligned to + /// `align_of::()`. Most functions which operate on raw pointers require + /// those pointers to be aligned, so calling those functions with the result + /// of `get_ptr` will result in undefined behavior if alignment is not + /// guaranteed using some out-of-band mechanism. In general, the only + /// functions which are safe to call with this pointer are those which are + /// explicitly documented as being sound to use with an unaligned pointer, + /// such as [`read_unaligned`]. + /// + /// [`read_unaligned`]: core::ptr::read_unaligned + // FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn get_mut_ptr(&mut self) -> *mut T { + ptr::addr_of_mut!(self.0) + } + + /// Sets the inner `T`, dropping the previous value. + // FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn set(&mut self, t: T) { + *self = Unalign::new(t); + } + + /// Updates the inner `T` by calling a function on it. + /// + /// If [`T: Unaligned`], then `Unalign` implements [`DerefMut`], and that + /// impl should be preferred over this method when performing updates, as it + /// will usually be faster and more ergonomic. + /// + /// For large types, this method may be expensive, as it requires copying + /// `2 * size_of::()` bytes. \[1\] + /// + /// \[1\] Since the inner `T` may not be aligned, it would not be sound to + /// invoke `f` on it directly. Instead, `update` moves it into a + /// properly-aligned location in the local stack frame, calls `f` on it, and + /// then moves it back to its original location in `self`. + /// + /// [`T: Unaligned`]: Unaligned + #[inline] + pub fn update O>(&mut self, f: F) -> O { + if mem::align_of::() == 1 { + // While we advise callers to use `DerefMut` when `T: Unaligned`, + // not all callers will be able to guarantee `T: Unaligned` in all + // cases. In particular, callers who are themselves providing an API + // which is generic over `T` may sometimes be called by *their* + // callers with `T` such that `align_of::() == 1`, but cannot + // guarantee this in the general case. Thus, this optimization may + // sometimes be helpful. + + // SAFETY: Since `T`'s alignment is 1, `self` satisfies its + // alignment by definition. + let t = unsafe { self.deref_mut_unchecked() }; + return f(t); + } + + // On drop, this moves `copy` out of itself and uses `ptr::write` to + // overwrite `slf`. + struct WriteBackOnDrop { + copy: ManuallyDrop, + slf: *mut Unalign, + } + + impl Drop for WriteBackOnDrop { + fn drop(&mut self) { + // SAFETY: We never use `copy` again as required by + // `ManuallyDrop::take`. + let copy = unsafe { ManuallyDrop::take(&mut self.copy) }; + // SAFETY: `slf` is the raw pointer value of `self`. We know it + // is valid for writes and properly aligned because `self` is a + // mutable reference, which guarantees both of these properties. + unsafe { ptr::write(self.slf, Unalign::new(copy)) }; + } + } + + // SAFETY: We know that `self` is valid for reads, properly aligned, and + // points to an initialized `Unalign` because it is a mutable + // reference, which guarantees all of these properties. + // + // Since `T: !Copy`, it would be unsound in the general case to allow + // both the original `Unalign` and the copy to be used by safe code. + // We guarantee that the copy is used to overwrite the original in the + // `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is + // called before any other safe code executes, soundness is upheld. + // While this method can terminate in two ways (by returning normally or + // by unwinding due to a panic in `f`), in both cases, `write_back` is + // dropped - and its `drop` called - before any other safe code can + // execute. + let copy = unsafe { ptr::read(self) }.into_inner(); + let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self }; + + let ret = f(&mut write_back.copy); + + drop(write_back); + ret + } +} + +impl Unalign { + /// Gets a copy of the inner `T`. + // FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn get(&self) -> T { + let Unalign(val) = *self; + val + } +} + +impl Deref for Unalign { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + Ptr::from_ref(self).transmute().bikeshed_recall_aligned().as_ref() + } +} + +impl DerefMut for Unalign { + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { + Ptr::from_mut(self).transmute::<_, _, (_, (_, _))>().bikeshed_recall_aligned().as_mut() + } +} + +impl PartialOrd> for Unalign { + #[inline(always)] + fn partial_cmp(&self, other: &Unalign) -> Option { + PartialOrd::partial_cmp(self.deref(), other.deref()) + } +} + +impl Ord for Unalign { + #[inline(always)] + fn cmp(&self, other: &Unalign) -> Ordering { + Ord::cmp(self.deref(), other.deref()) + } +} + +impl PartialEq> for Unalign { + #[inline(always)] + fn eq(&self, other: &Unalign) -> bool { + PartialEq::eq(self.deref(), other.deref()) + } +} + +impl Eq for Unalign {} + +impl Hash for Unalign { + #[inline(always)] + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.deref().hash(state); + } +} + +impl Debug for Unalign { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self.deref(), f) + } +} + +impl Display for Unalign { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(self.deref(), f) + } +} + +/// A wrapper type to construct uninitialized instances of `T`. +/// +/// `MaybeUninit` is identical to the [standard library +/// `MaybeUninit`][core-maybe-uninit] type except that it supports unsized +/// types. +/// +/// # Layout +/// +/// The same layout guarantees and caveats apply to `MaybeUninit` as apply to +/// the [standard library `MaybeUninit`][core-maybe-uninit] with one exception: +/// for `T: !Sized`, there is no single value for `T`'s size. Instead, for such +/// types, the following are guaranteed: +/// - Every [valid size][valid-size] for `T` is a valid size for +/// `MaybeUninit` and vice versa +/// - Given `t: *const T` and `m: *const MaybeUninit` with identical fat +/// pointer metadata, `t` and `m` address the same number of bytes (and +/// likewise for `*mut`) +/// +/// [core-maybe-uninit]: core::mem::MaybeUninit +/// [valid-size]: crate::KnownLayout#what-is-a-valid-size +#[repr(transparent)] +#[doc(hidden)] +pub struct MaybeUninit( + // SAFETY: `MaybeUninit` has the same size as `T`, because (by invariant + // on `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`, + // and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT` + // accurately reflects the layout of `T`. By invariant on `T::MaybeUninit`, + // it admits uninitialized bytes in all positions. Because `MaybeUninit` is + // marked `repr(transparent)`, these properties additionally hold true for + // `Self`. + T::MaybeUninit, +); + +#[doc(hidden)] +impl MaybeUninit { + /// Constructs a `MaybeUninit` initialized with the given value. + #[inline(always)] + pub fn new(val: T) -> Self + where + T: Sized, + Self: Sized, + { + // SAFETY: It is valid to transmute `val` to `MaybeUninit` because it + // is both valid to transmute `val` to `T::MaybeUninit`, and it is valid + // to transmute from `T::MaybeUninit` to `MaybeUninit`. + // + // First, it is valid to transmute `val` to `T::MaybeUninit` because, by + // invariant on `T::MaybeUninit`: + // - For `T: Sized`, `T` and `T::MaybeUninit` have the same size. + // - All byte sequences of the correct size are valid values of + // `T::MaybeUninit`. + // + // Second, it is additionally valid to transmute from `T::MaybeUninit` + // to `MaybeUninit`, because `MaybeUninit` is a + // `repr(transparent)` wrapper around `T::MaybeUninit`. + // + // These two transmutes are collapsed into one so we don't need to add a + // `T::MaybeUninit: Sized` bound to this function's `where` clause. + unsafe { crate::util::transmute_unchecked(val) } + } + + /// Constructs an uninitialized `MaybeUninit`. + #[must_use] + #[inline(always)] + pub fn uninit() -> Self + where + T: Sized, + Self: Sized, + { + let uninit = CoreMaybeUninit::::uninit(); + // SAFETY: It is valid to transmute from `CoreMaybeUninit` to + // `MaybeUninit` since they both admit uninitialized bytes in all + // positions, and they have the same size (i.e., that of `T`). + // + // `MaybeUninit` has the same size as `T`, because (by invariant on + // `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`, + // and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT` + // accurately reflects the layout of `T`. + // + // `CoreMaybeUninit` has the same size as `T` [1] and admits + // uninitialized bytes in all positions. + // + // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: + // + // `MaybeUninit` is guaranteed to have the same size, alignment, + // and ABI as `T` + unsafe { crate::util::transmute_unchecked(uninit) } + } + + /// Creates a `Box>`. + /// + /// This function is useful for allocating large, uninit values on the heap + /// without ever creating a temporary instance of `Self` on the stack. + /// + /// # Errors + /// + /// Returns an error on allocation failure. Allocation failure is guaranteed + /// never to cause a panic or an abort. + #[cfg(feature = "alloc")] + #[inline] + pub fn new_boxed_uninit(meta: T::PointerMetadata) -> Result, AllocError> { + // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of + // `new_box`. The referent of the pointer returned by `alloc` (and, + // consequently, the `Box` derived from it) is a valid instance of + // `Self`, because `Self` is `MaybeUninit` and thus admits arbitrary + // (un)initialized bytes. + unsafe { crate::util::new_box(meta, alloc::alloc::alloc) } + } + + /// Extracts the value from the `MaybeUninit` container. + /// + /// # Safety + /// + /// The caller must ensure that `self` is in an bit-valid state. Depending + /// on subsequent use, it may also need to be in a library-valid state. + #[inline(always)] + pub unsafe fn assume_init(self) -> T + where + T: Sized, + Self: Sized, + { + // SAFETY: The caller guarantees that `self` is in an bit-valid state. + unsafe { crate::util::transmute_unchecked(self) } + } +} + +impl fmt::Debug for MaybeUninit { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad(core::any::type_name::()) + } +} + +#[allow(unreachable_pub)] // False positive on MSRV +#[doc(hidden)] +pub use read_only_def::*; +mod read_only_def { + /// A read-only wrapper. + /// + /// A `ReadOnly` disables any interior mutability in `T`, ensuring that + /// a `&ReadOnly` is genuinely read-only. Thus, `ReadOnly` is + /// [`Immutable`] regardless of whether `T` is. + /// + /// Note that `&mut ReadOnly` still permits mutation – the read-only + /// property only applies to shared references. + /// + /// [`Immutable`]: crate::Immutable + #[repr(transparent)] + pub struct ReadOnly { + // INVARIANT: `inner` is never mutated through a `&ReadOnly` + // reference. + inner: T, + } + + impl ReadOnly { + /// Creates a new `ReadOnly`. + #[must_use] + #[inline(always)] + pub const fn new(t: T) -> ReadOnly { + ReadOnly { inner: t } + } + + /// Returns the inner value. + #[must_use] + #[inline(always)] + pub fn into_inner(r: ReadOnly) -> T { + r.inner + } + } + + impl ReadOnly { + #[inline(always)] + pub(crate) fn as_mut(r: &mut ReadOnly) -> &mut T { + // SAFETY: `r: &mut ReadOnly`, so this doesn't violate the invariant + // that `inner` is never mutated through a `&ReadOnly` reference. + &mut r.inner + } + + /// # Safety + /// + /// The caller promises not to mutate the referent (i.e., via interior + /// mutation). + pub(crate) const unsafe fn as_ref_unchecked(r: &ReadOnly) -> &T { + // SAFETY: The caller promises not to mutate the referent. + &r.inner + } + } +} + +// SAFETY: `ReadOnly` is a `#[repr(transparent)` wrapper around `T`. +const _: () = unsafe { + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ReadOnly); +}; + +#[allow(clippy::multiple_unsafe_ops_per_block)] +// SAFETY: +// - `ReadOnly` has the same alignment as `T`, and so it is `Unaligned` +// exactly when `T` is as well. +// - `ReadOnly` has the same bit validity as `T`, and so this `is_bit_valid` +// implementation is correct, and thus the `TryFromBytes` impl is sound. +// - `ReadOnly` has the same bit validity as `T`, and so it is `FromZeros`, +// `FromBytes`, and `IntoBytes` exactly when `T` is as well. +const _: () = unsafe { + unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ReadOnly); + unsafe_impl!( + T: ?Sized + TryFromBytes => TryFromBytes for ReadOnly; + |c| T::is_bit_valid(c.cast::<_, as SizeEq>>>::CastFrom, _>()) + ); + unsafe_impl!(T: ?Sized + FromZeros => FromZeros for ReadOnly); + unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ReadOnly); + unsafe_impl!(T: ?Sized + IntoBytes => IntoBytes for ReadOnly); +}; + +// SAFETY: By invariant, `inner` is never mutated through a `&ReadOnly` +// reference. +const _: () = unsafe { + unsafe_impl!(T: ?Sized => Immutable for ReadOnly); +}; + +const _: () = { + use crate::pointer::cast::CastExact; + + // SAFETY: `ReadOnly` has the same layout as `T`. + define_cast!(unsafe { pub CastFromReadOnly = ReadOnly => T}); + // SAFETY: `ReadOnly` has the same layout as `T`. + unsafe impl CastExact, T> for CastFromReadOnly {} + // SAFETY: `ReadOnly` has the same layout as `T`. + define_cast!(unsafe { pub CastToReadOnly = T => ReadOnly}); + // SAFETY: `ReadOnly` has the same layout as `T`. + unsafe impl CastExact> for CastToReadOnly {} + + impl SizeEq> for T { + type CastFrom = CastFromReadOnly; + } + + impl SizeEq for ReadOnly { + type CastFrom = CastToReadOnly; + } +}; + +// SAFETY: `ReadOnly` is a `#[repr(transparent)]` wrapper around `T`, and so +// it has the same bit validity as `T`. +unsafe impl TransmuteFrom for ReadOnly {} + +// SAFETY: `ReadOnly` is a `#[repr(transparent)]` wrapper around `T`, and so +// it has the same bit validity as `T`. +unsafe impl TransmuteFrom, Valid, Valid> for T {} + +impl<'a, T: ?Sized + Immutable> From<&'a T> for &'a ReadOnly { + #[inline(always)] + fn from(t: &'a T) -> &'a ReadOnly { + let ro = Ptr::from_ref(t).transmute::<_, _, (_, _)>(); + // SAFETY: `ReadOnly` has the same alignment as `T`, and + // `Ptr::from_ref` produces an aligned `Ptr`. + let ro = unsafe { ro.assume_alignment() }; + ro.as_ref() + } +} + +impl Deref for ReadOnly { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // SAFETY: By `T: Immutable`, `&T` doesn't permit interior mutation. + unsafe { ReadOnly::as_ref_unchecked(self) } + } +} + +impl DerefMut for ReadOnly { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + ReadOnly::as_mut(self) + } +} + +impl Debug for ReadOnly { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.deref().fmt(f) + } +} + +// SAFETY: See safety comment on `ProjectToTag`. +unsafe impl HasTag for ReadOnly { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type Tag = T::Tag; + + // SAFETY: `>>::CastFrom` is a no-op projection that + // produces a pointer with the same referent. By invariant, for any `Ptr<'_, + // T, I>` it is sound to use `T::ProjectToTag` to project to a `Ptr<'_, + // T::Tag, I>`. Since `ReadOnly` has the same layout and validity as `T`, + // the same is true of projecting from a `Ptr<'_, ReadOnly, I>`. + type ProjectToTag = crate::pointer::cast::TransitiveProject< + T, + >>::CastFrom, + T::ProjectToTag, + >; +} + +// SAFETY: `ReadOnly` is a `#[repr(transparent)]` wrapper around `T`, and so +// has the same fields at the same offsets. Thus, it satisfies the safety +// invariants of `HasField` for field `f` exactly +// when `T` does, as guaranteed by the `T: HasField` bound: +// - If `VARIANT_ID` is `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID`, then `T` has +// the layout of a struct or union type. Since `ReadOnly` is a transparent +// wrapper around `T`, it does too. Otherwise, if `VARIANT_ID` is an enum +// variant index, then `T` has the layout of an enum type, and `ReadOnly` +// does too. +// - By `T: HasField<_, _, FIELD_ID>`: +// - `T` has a field `f` with name `n` such that +// `FIELD_ID = zerocopy::ident_id!(n)` or at index `i` such that +// `FIELD_ID = zerocopy::ident_id!(i)`. +// - `Field` has the same visibility as `f`. +// - `T::Type` has the same type as `f`. Thus, `ReadOnly` has the +// same type as `f`, wrapped in `ReadOnly`. +// +// `project` satisfies its post-condition – namely, that the returned pointer +// refers to a non-strict subset of the bytes of `slf`'s referent, and has the +// same provenance as `slf` – because all intermediate operations satisfy those +// same conditions. +unsafe impl + HasField for ReadOnly +where + T: HasField + ?Sized, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type Type = ReadOnly; + + #[inline(always)] + fn project(slf: PtrInner<'_, Self>) -> *mut ReadOnly { + slf.project::<_, >>::CastFrom>() + .project::<_, crate::pointer::cast::Projection>() + .project::<_, as SizeEq>::CastFrom>() + .as_non_null() + .as_ptr() + } +} + +// SAFETY: `ReadOnly` is a `#[repr(transparent)]` wrapper around `T`, and so +// has the same fields at the same offsets. `is_projectable` simply delegates to +// `T::is_projectable`, which is sound because a `Ptr<'_, ReadOnly, I>` will +// be projectable exactly when a `Ptr<'_, T, I>` referent is. +unsafe impl + ProjectField for ReadOnly +where + T: ProjectField + ?Sized, + I: invariant::Invariants, +{ + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + type Invariants = T::Invariants; + + type Error = T::Error; + + #[inline(always)] + fn is_projectable<'a>(ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> { + T::is_projectable(ptr) + } +} + +#[cfg(test)] +mod tests { + use core::panic::AssertUnwindSafe; + + use super::*; + use crate::util::testutil::*; + + #[test] + fn test_unalign() { + // Test methods that don't depend on alignment. + let mut u = Unalign::new(AU64(123)); + assert_eq!(u.get(), AU64(123)); + assert_eq!(u.into_inner(), AU64(123)); + assert_eq!(u.get_ptr(), <*const _>::cast::(&u)); + assert_eq!(u.get_mut_ptr(), <*mut _>::cast::(&mut u)); + u.set(AU64(321)); + assert_eq!(u.get(), AU64(321)); + + // Test methods that depend on alignment (when alignment is satisfied). + let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); + assert_eq!(u.t.try_deref().unwrap(), &AU64(123)); + assert_eq!(u.t.try_deref_mut().unwrap(), &mut AU64(123)); + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123)); + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123)); + *u.t.try_deref_mut().unwrap() = AU64(321); + assert_eq!(u.t.get(), AU64(321)); + + // Test methods that depend on alignment (when alignment is not + // satisfied). + let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123))); + assert!(matches!(u.t.try_deref(), Err(AlignmentError { .. }))); + assert!(matches!(u.t.try_deref_mut(), Err(AlignmentError { .. }))); + + // Test methods that depend on `T: Unaligned`. + let mut u = Unalign::new(123u8); + assert_eq!(u.try_deref(), Ok(&123)); + assert_eq!(u.try_deref_mut(), Ok(&mut 123)); + assert_eq!(u.deref(), &123); + assert_eq!(u.deref_mut(), &mut 123); + *u = 21; + assert_eq!(u.get(), 21); + + // Test that some `Unalign` functions and methods are `const`. + const _UNALIGN: Unalign = Unalign::new(0); + const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr(); + const _U64: u64 = _UNALIGN.into_inner(); + // Make sure all code is considered "used". + // + // FIXME(https://github.com/rust-lang/rust/issues/104084): Remove this + // attribute. + #[allow(dead_code)] + const _: () = { + let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); + // Make sure that `deref_unchecked` is `const`. + // + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + let au64 = unsafe { x.t.deref_unchecked() }; + match au64 { + AU64(123) => {} + _ => const_unreachable!(), + } + }; + } + + #[test] + fn test_unalign_update() { + let mut u = Unalign::new(AU64(123)); + u.update(|a| a.0 += 1); + assert_eq!(u.get(), AU64(124)); + + // Test that, even if the callback panics, the original is still + // correctly overwritten. Use a `Box` so that Miri is more likely to + // catch any unsoundness (which would likely result in two `Box`es for + // the same heap object, which is the sort of thing that Miri would + // probably catch). + let mut u = Unalign::new(Box::new(AU64(123))); + let res = std::panic::catch_unwind(AssertUnwindSafe(|| { + u.update(|a| { + a.0 += 1; + panic!(); + }) + })); + assert!(res.is_err()); + assert_eq!(u.into_inner(), Box::new(AU64(124))); + + // Test the align_of::() == 1 optimization. + let mut u = Unalign::new([0u8, 1]); + u.update(|a| a[0] += 1); + assert_eq!(u.get(), [1u8, 1]); + } + + #[test] + fn test_unalign_copy_clone() { + // Test that `Copy` and `Clone` do not cause soundness issues. This test + // is mainly meant to exercise UB that would be caught by Miri. + + // `u.t` is definitely not validly-aligned for `AU64`'s alignment of 8. + let u = ForceUnalign::<_, AU64>::new(Unalign::new(AU64(123))); + #[allow(clippy::clone_on_copy)] + let v = u.t.clone(); + let w = u.t; + assert_eq!(u.t.get(), v.get()); + assert_eq!(u.t.get(), w.get()); + assert_eq!(v.get(), w.get()); + } + + #[test] + fn test_unalign_trait_impls() { + let zero = Unalign::new(0u8); + let one = Unalign::new(1u8); + + assert!(zero < one); + assert_eq!(PartialOrd::partial_cmp(&zero, &one), Some(Ordering::Less)); + assert_eq!(Ord::cmp(&zero, &one), Ordering::Less); + + assert_ne!(zero, one); + assert_eq!(zero, zero); + assert!(!PartialEq::eq(&zero, &one)); + assert!(PartialEq::eq(&zero, &zero)); + + fn hash(t: &T) -> u64 { + let mut h = std::collections::hash_map::DefaultHasher::new(); + t.hash(&mut h); + h.finish() + } + + assert_eq!(hash(&zero), hash(&0u8)); + assert_eq!(hash(&one), hash(&1u8)); + + assert_eq!(format!("{:?}", zero), format!("{:?}", 0u8)); + assert_eq!(format!("{:?}", one), format!("{:?}", 1u8)); + assert_eq!(format!("{}", zero), format!("{}", 0u8)); + assert_eq!(format!("{}", one), format!("{}", 1u8)); + } + + #[test] + #[allow(clippy::as_conversions)] + fn test_maybe_uninit() { + // int + { + let input = 42; + let uninit = MaybeUninit::new(input); + // SAFETY: `uninit` is in an initialized state + let output = unsafe { uninit.assume_init() }; + assert_eq!(input, output); + } + + // thin ref + { + let input = 42; + let uninit = MaybeUninit::new(&input); + // SAFETY: `uninit` is in an initialized state + let output = unsafe { uninit.assume_init() }; + assert_eq!(&input as *const _, output as *const _); + assert_eq!(input, *output); + } + + // wide ref + { + let input = [1, 2, 3, 4]; + let uninit = MaybeUninit::new(&input[..]); + // SAFETY: `uninit` is in an initialized state + let output = unsafe { uninit.assume_init() }; + assert_eq!(&input[..] as *const _, output as *const _); + assert_eq!(input, *output); + } + } + #[test] + fn test_maybe_uninit_uninit() { + let _uninit = MaybeUninit::::uninit(); + // Cannot check value, but can check it compiles and runs + } + + #[test] + #[cfg(feature = "alloc")] + fn test_maybe_uninit_new_boxed_uninit() { + let _boxed = MaybeUninit::::new_boxed_uninit(()).unwrap(); + } + + #[test] + fn test_maybe_uninit_debug() { + let uninit = MaybeUninit::::uninit(); + assert!(format!("{:?}", uninit).contains("MaybeUninit")); + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/testdata/include_value/data b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/testdata/include_value/data new file mode 100644 index 0000000000000000000000000000000000000000..85df50785d62d3b05ab03d9cbf7e4a0b49449730 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/testdata/include_value/data @@ -0,0 +1 @@ +abcd \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/include.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/include.rs new file mode 100644 index 0000000000000000000000000000000000000000..95bcbf4878ba84cb7b7bf92efd0f81f432815738 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/include.rs @@ -0,0 +1,67 @@ +// Copyright 2026 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +#[allow(unused)] +#[cfg(feature = "derive")] +mod util { + /// A type that doesn't implement any zerocopy traits. + pub struct NotZerocopy(pub T); + + /// A `u16` with alignment 2. + /// + /// Though `u16` has alignment 2 on some platforms, it's not guaranteed. By + /// contrast, `util::AU16` is guaranteed to have alignment 2. + #[derive( + zerocopy::KnownLayout, + zerocopy::Immutable, + zerocopy::FromBytes, + zerocopy::IntoBytes, + Copy, + Clone, + )] + #[repr(C, align(2))] + pub struct AU16(pub u16); + + // Since we can't import these by path (ie, `util::assert_impl_all!`), use a + // name prefix to ensure our derive-emitted code isn't accidentally relying + // on `assert_impl_all!` being in scope. + #[macro_export] + macro_rules! util_assert_impl_all { + ($type:ty: $($trait:path),+ $(,)?) => { + const _: fn() = || { + use ::core::prelude::v1::*; + ::static_assertions::assert_impl_all!($type: $($trait),+); + }; + }; + } + + // Since we can't import these by path (ie, `util::assert_not_impl_any!`), + // use a name prefix to ensure our derive-emitted code isn't accidentally + // relying on `assert_not_impl_any!` being in scope. + #[macro_export] + macro_rules! util_assert_not_impl_any { + ($x:ty: $($t:path),+ $(,)?) => { + const _: fn() = || { + use ::core::prelude::v1::*; + ::static_assertions::assert_not_impl_any!($x: $($t),+); + }; + }; + } + + #[macro_export] + macro_rules! test_trivial_is_bit_valid { + ($x:ty => $name:ident) => { + #[test] + fn $name() { + util::test_trivial_is_bit_valid::<$x>(); + } + }; + } +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/trybuild.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/trybuild.rs new file mode 100644 index 0000000000000000000000000000000000000000..293ec338bed0c76bfe0e118d994d41454c449df7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/trybuild.rs @@ -0,0 +1,40 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// Many of our UI tests require the "derive" feature to function properly. In +// particular: +// - Some tests directly include `zerocopy-derive/tests/include.rs`, which +// derives traits on the `AU16` type. +// - The file `invalid-impls.rs` directly includes `src/util/macros.rs` in order +// to test the `impl_or_verify!` macro which is defined in that file. +// Specifically, it tests the verification portion of that macro, which is +// enabled when `cfg(any(feature = "derive", test))`. While `--cfg test` is of +// course passed to the code in the file you're reading right now, `trybuild` +// does not pass `--cfg test` when it invokes Cargo. As a result, this +// `trybuild` test only tests the correct behavior when the "derive" feature +// is enabled. +#![cfg(feature = "derive")] + +use testutil::{set_rustflags_w_warnings, ToolchainVersion}; + +#[test] +#[cfg_attr(miri, ignore)] +fn ui() { + // See the doc comment on this method for an explanation of what this does + // and why we store source files in different directories. + let source_files_dirname = ToolchainVersion::extract_from_env() + .expect("UI tests must only be run on pinned MSRV, stable, or nightly toolchains") + .get_ui_source_files_dirname(); + + // Set `-Wwarnings` in the `RUSTFLAGS` environment variable to ensure that + // `.stderr` files reflect what the typical user would encounter. + set_rustflags_w_warnings(); + + let t = trybuild::TestCases::new(); + t.compile_fail(format!("tests/{}/*.rs", source_files_dirname)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8c6d1d5314e2af8cc03122ec9b389c13a87ca4e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_bytes::(); +} + +fn takes_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1b3a87e72afde7002cb13c231dcf49f997cbce6c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-bytes.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-from-bytes.rs:16:24 + | +16 | takes_from_bytes::(); + | ^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_from_bytes` + --> tests/ui-msrv/diagnostic-not-implemented-from-bytes.rs:19:24 + | +19 | fn takes_from_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.rs new file mode 100644 index 0000000000000000000000000000000000000000..a84a833c138a89f003cf5e83a2b0df1077586801 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromZeros; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_zeros::(); +} + +fn takes_from_zeros() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e16480b9af7c4d6d63ab866110469196311f04cb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-from-zeros.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeros` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-from-zeros.rs:16:24 + | +16 | takes_from_zeros::(); + | ^^^^^^^^^^^ the trait `FromZeros` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_from_zeros` + --> tests/ui-msrv/diagnostic-not-implemented-from-zeros.rs:19:24 + | +19 | fn takes_from_zeros() {} + | ^^^^^^^^^ required by this bound in `takes_from_zeros` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..48e9e6580106de2cef30fe5afcc8a4d4ce44f53a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Immutable; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_immutable::(); +} + +fn takes_immutable() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..37a4bdc911bf2d287bc6992d1f4e27670beab920 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-immutable.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-immutable.rs:16:23 + | +16 | takes_immutable::(); + | ^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_immutable` + --> tests/ui-msrv/diagnostic-not-implemented-immutable.rs:19:23 + | +19 | fn takes_immutable() {} + | ^^^^^^^^^ required by this bound in `takes_immutable` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..a348aafdf1981d9abfdbed39fc13306269c87bc1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::IntoBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_into_bytes::(); +} + +fn takes_into_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5ba5bae60f434f4e96a365851037c53ea1c5a9ac --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-into-bytes.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-into-bytes.rs:16:24 + | +16 | takes_into_bytes::(); + | ^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_into_bytes` + --> tests/ui-msrv/diagnostic-not-implemented-into-bytes.rs:19:24 + | +19 | fn takes_into_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_into_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b048e758154859c7f6873d98df32db0a608ea1b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.rs @@ -0,0 +1,57 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::{Immutable, IntoBytes}; + +fn main() { + // This is adapted from #1296, which includes the following text: + // + // The compiler errors when a type is missing Immutable are somewhat + // misleading, although I'm not sure there's much zerocopy can do about + // this. An example where the compiler recommends adding a reference + // rather than implementing Immutable (some were even more confusing than + // this): + // + // error[E0277]: the trait bound `virtio::wl::CtrlVfdNewDmabuf: zerocopy::Immutable` is not satisfied + // --> devices/src/virtio/wl.rs:317:20 + // | + // 317 | .write_obj(ctrl_vfd_new_dmabuf) + // | --------- ^^^^^^^^^^^^^^^^^^^ the trait `zerocopy::Immutable` is not implemented for `virtio::wl::CtrlVfdNewDmabuf` + // | | + // | required by a bound introduced by this call + // | + // note: required by a bound in `virtio::descriptor_utils::Writer::write_obj` + // --> devices/src/virtio/descriptor_utils.rs:536:25 + // | + // 536 | pub fn write_obj(&mut self, val: T) -> io::Result<()> { + // | ^^^^^^^^^ required by this bound in `Writer::write_obj` + // help: consider borrowing here + // | + // 317 | .write_obj(&ctrl_vfd_new_dmabuf) + // | + + // 317 | .write_obj(&mut ctrl_vfd_new_dmabuf) + // | ++++ + // + // Taking the compiler's suggestion results in a different error with a + // recommendation to remove the reference (back to the original code). + // + // As of this writing, the described problem is still happening thanks to + // https://github.com/rust-lang/rust/issues/130563. We include this test so + // that we can capture the current behavior, but we will update it once that + // Rust issue is fixed. + Foo.write_obj(NotZerocopy(())); +} + +struct Foo; + +impl Foo { + fn write_obj(&mut self, _val: T) {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7a0a498f4ed867ed68f82f3be136f6a85d9b8c8b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-issue-1296.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | ^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | ^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.rs new file mode 100644 index 0000000000000000000000000000000000000000..ded20312bec5c5d7c138d85238cdf3ecc4ecce3d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::KnownLayout; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_known_layout::(); +} + +fn takes_known_layout() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.stderr new file mode 100644 index 0000000000000000000000000000000000000000..fe08341de561fffb98f4cd144538f45f80ed1a82 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-known-layout.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: KnownLayout` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-known-layout.rs:16:26 + | +16 | takes_known_layout::(); + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_known_layout` + --> tests/ui-msrv/diagnostic-not-implemented-known-layout.rs:19:26 + | +19 | fn takes_known_layout() {} + | ^^^^^^^^^^^ required by this bound in `takes_known_layout` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..18900ecaa639aa301f47f89a99f0543d1d9c6f31 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::TryFromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_try_from_bytes::(); +} + +fn takes_try_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..819d80683a002aa81cc75635b7611e4086b1ea37 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.rs:16:28 + | +16 | takes_try_from_bytes::(); + | ^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_try_from_bytes` + --> tests/ui-msrv/diagnostic-not-implemented-try-from-bytes.rs:19:28 + | +19 | fn takes_try_from_bytes() {} + | ^^^^^^^^^^^^ required by this bound in `takes_try_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.rs new file mode 100644 index 0000000000000000000000000000000000000000..6196f6a75bf8c45d85a539bdd265109844db47a2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Unaligned; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_unaligned::(); +} + +fn takes_unaligned() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.stderr new file mode 100644 index 0000000000000000000000000000000000000000..2a6df94972d544d3cb08fc4ed075d9495dda2eeb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/diagnostic-not-implemented-unaligned.stderr @@ -0,0 +1,11 @@ +error[E0277]: the trait bound `NotZerocopy: zerocopy::Unaligned` is not satisfied + --> tests/ui-msrv/diagnostic-not-implemented-unaligned.rs:16:23 + | +16 | takes_unaligned::(); + | ^^^^^^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `NotZerocopy` + | +note: required by a bound in `takes_unaligned` + --> tests/ui-msrv/diagnostic-not-implemented-unaligned.rs:19:23 + | +19 | fn takes_unaligned() {} + | ^^^^^^^^^ required by this bound in `takes_unaligned` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e01c4a9b6a26ee1b7ccf7b6c86517f510f65e1ec --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; + +fn main() {} + +// Should fail because `NotZerocopy: !FromBytes`. +const NOT_FROM_BYTES: NotZerocopy = + zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7b54ce1c30c45c4c876152556d1fe475b2a0e229 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_not_from_bytes.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `NOT_FROM_BYTES::transmute` + --> tests/ui-msrv/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this + | required by this bound in `NOT_FROM_BYTES::transmute` + = note: this error originates in the macro `$crate::transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0c5fcfc5f5d451a7d11e25cbbd467ac02508840 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.rs @@ -0,0 +1,12 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8ca70942138205fec60390beca13e98917c316db --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/include_value_wrong_size.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/include_value_wrong_size.rs:12:25 + | +12 | const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.rs new file mode 100644 index 0000000000000000000000000000000000000000..53e3eb9b0aa59db355673e1d96e9086e7db4e6bb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.stderr new file mode 100644 index 0000000000000000000000000000000000000000..6ab6e47e2bf21fac3a5081cbd378dd389e657aa7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-msrv/max-align.rs:96:11 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^^^^^^^^ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9a76948fb03ba1dbde7a0ffbefe687188f1601d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.rs @@ -0,0 +1,29 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::pointer::{ + invariant::{Aligned, Exclusive, Shared, Valid}, + Ptr, +}; + +fn _when_exclusive<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Exclusive, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Exclusive, Aligned, Valid)>, +) { + _small = big; +} + +fn _when_shared<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Shared, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Shared, Aligned, Valid)>, +) { + _small = big; +} + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f628f7b4f24ea1abb13c125e9a95e1d4409a5723 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/ptr-is-invariant-over-v.stderr @@ -0,0 +1,17 @@ +error[E0623]: lifetime mismatch + --> tests/ui-msrv/ptr-is-invariant-over-v.rs:19:14 + | +16 | big: Ptr<'small, &'big u32, (Exclusive, Aligned, Valid)>, + | --------------------------------------------------- these two types are declared with different lifetimes... +... +19 | _small = big; + | ^^^ ...but data from `big` flows into `big` here + +error[E0623]: lifetime mismatch + --> tests/ui-msrv/ptr-is-invariant-over-v.rs:26:14 + | +23 | big: Ptr<'small, &'big u32, (Shared, Aligned, Valid)>, + | ------------------------------------------------ these two types are declared with different lifetimes... +... +26 | _small = big; + | ^^^ ...but data from `big` flows into `big` here diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3daed1c842a35919b805fadb78cb2831ed0671fa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.rs @@ -0,0 +1,17 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e337c1f9b7118afbb4a58d2fc9d8d5d6188f4344 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-dst-not-frombytes.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `DST_NOT_FROM_BYTES::transmute` + --> tests/ui-msrv/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this + | required by this bound in `DST_NOT_FROM_BYTES::transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bee817cf610f55c28a3ccebce546b911e416038 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.stderr new file mode 100644 index 0000000000000000000000000000000000000000..235565b87ef0d93078214b17ac49a9c1b8a97030 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-const.stderr @@ -0,0 +1,48 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-msrv/transmute-mut-const.rs:18:52 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(const_item_mutation)]` on by default + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-msrv/transmute-mut-const.rs:15:1 + | +15 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-msrv/transmute-mut-const.rs:18:52 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: calls in constants are limited to constant functions, tuple structs and tuple variants + --> tests/ui-msrv/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0015]: calls in constants are limited to constant functions, tuple structs and tuple variants + --> tests/ui-msrv/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0716]: temporary value dropped while borrowed + --> tests/ui-msrv/transmute-mut-const.rs:18:57 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | --------------------^^^^^^^^^^^^- + | | | + | | creates a temporary which is freed while still in use + | temporary value is freed at the end of this statement + | using this value as a constant requires that borrow lasts for `'static` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f2d511cca0e4b558dfe75412754c543c2893f7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e8f88d1d71bdfe4fc48de26b6a44e5ff4c317eed --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,19 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..504c12ac7e3542270c75fa56a26c56cd2ad290d6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..de71445873879b86c1e8c774cea350c9525c5a21 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-dst-not-frombytes.rs:22:38 + | +22 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c9d60e6e0ec57737324a4c40f24e2ec97d4f22b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `IntoBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0e8b9c14e5aa26103faac390b6dfedcd94606ebe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-dst-not-intobytes.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-dst-not-intobytes.rs:22:36 + | +22 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..c31765e4b96b498bbea7d521c8c949a26484ea9f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5ff7145966fe520ca49d3debcc3194890e72aa62 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,9 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-msrv/transmute-mut-illegal-lifetime.rs:14:56 + | +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..d07829de056f5b0ac3266db2e92a6a1530331897 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0d6e56dc1efc2159c80a59d329dd82a9136440b3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-dst-not-references.rs:15:59 + | +15 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | | help: consider mutably borrowing here: `&mut 0usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d6e84542a805e03ddadb4cf33686a5fee315c2e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..bae697c83876f0e98684fabe41eba7cbee24a731 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-immutable.stderr @@ -0,0 +1,11 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-immutable.rs:15:37 + | +15 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..71f6e67586a12c224a33e893c14cc05c5138beaf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ca8669d037855d7abcfddbe6304944dac2bd2d63 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-not-a-reference.rs:15:53 + | +15 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | | help: consider mutably borrowing here: `&mut 0usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..fbf6232cabf7e6236d9fbe35a05b402bc09bfdd6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..6fbc73eeb0ba003ddf3292f0bde96000d93906b7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-frombytes.rs:22:38 + | +22 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..505734ac3d6f1164867a57885953fdf2493071ca --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4bc57109fc8674a349aac611d3d289783e3a7054 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-not-intobytes.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-intobytes.rs:22:36 + | +22 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Src` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..af2ffd363f88eb2be6362d805ef0e026fcde047a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type to +// a sized destination type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b242575d21d239ae98088473f19e0cc5690ea207 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-mut-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_mut` exists for struct `Wrap<&mut [u8], &mut [u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-msrv/transmute-mut-src-unsized.rs:15:35 + | +15 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ method cannot be called on `Wrap<&mut [u8], &mut [u8; 1]>` due to unsatisfied trait bounds + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | --------------------------------------------------------- doesn't satisfy `Wrap<&mut [u8], &mut [u8; 1]>: TransmuteMutDst` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `Wrap<&mut [u8], &mut [u8; 1]>: TransmuteMutDst` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.rs new file mode 100644 index 0000000000000000000000000000000000000000..27db0bbb9750912a2dff430fefc1208772740bbc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d00c98fe8f7dd81603bc0f73a0159cf0322f2977 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ptr-to-usize.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `*const usize: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `*const usize` + | +note: required by a bound in `POINTER_VALUE::transmute` + --> tests/ui-msrv/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this + | required by this bound in `POINTER_VALUE::transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..75f837acb0652fe7ea5b33a65fed56e332969c27 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d37e716554bd2a0190cca3de36540435380435d2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-mutable.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6dbd00599515ae0a2944190a634d3fdc4e95e04 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5e5a591b738fe453e8c5a8677c769151fa4cce46 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..969e8877b6833402acab765cb6738dd6f64594a9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..05ae73925fc4ac23b9220c9dd81c8fed925d2dd4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Dst` + | +note: required by `AssertDstIsFromBytes` + --> tests/ui-msrv/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f21b7273312dd5ebc288947f52e6f5d10c72bde --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::FromBytes)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `Immutable` +const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..572b25a964d896bddbcd35b0b8716448e3405803 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-dst-not-nocell.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `Dst: Immutable` is not satisfied + --> tests/ui-msrv/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `Dst` + | +note: required by `AssertDstIsImmutable` + --> tests/ui-msrv/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..8dd191e6f4038619262984d9723e12fce7d4d210 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..866ea56a66fb0257343dc6ee24cfa91bcf27adb0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,9 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-msrv/transmute-ref-illegal-lifetime.rs:14:52 + | +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..c65bd24a93b72b7db351d8b418f6524984370c62 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f909a09910b33b2af2faeafe78ffc1e43d532ffb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,52 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:15:54 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected reference, found `usize` + | | help: consider borrowing here: `&0usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bbba06e1565749ce9f4c5e523c615dd1050d2b2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..590ab51bf17b5cbced0063ba7a6f58f6dc3a2e9b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-not-a-reference.rs:15:49 + | +15 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected reference, found `usize` + | | help: consider borrowing here: `&0usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f357c92aa9b4dcfbc615642cce67e6bd929a7f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..47795b23f13ce2a3dcc33a5fca2de4b12c6d3cb1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-intobytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Src` + | +note: required by `AssertSrcIsIntoBytes` + --> tests/ui-msrv/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsIntoBytes` + --> tests/ui-msrv/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsIntoBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..862951b888cb3d124492cc7c12327ffc80793e17 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::IntoBytes)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `Immutable` +const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..86ff3a8a73c997f682844baa56b818c1b4480c3c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-not-nocell.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected an implementor of trait `Immutable` + | +note: required by `AssertSrcIsImmutable` + --> tests/ui-msrv/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsImmutable` + --> tests/ui-msrv/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..262395bd7361ca4705b70a1b177af53948a2be13 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.rs @@ -0,0 +1,14 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..78bc8f146dd27497ca14d5f6e429c802f86965e8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-ref-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_ref` exists for struct `Wrap<&[u8], &[u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-msrv/transmute-ref-src-unsized.rs:14:31 + | +14 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ method cannot be called on `Wrap<&[u8], &[u8; 1]>` due to unsatisfied trait bounds + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | --------------------------------------------------------- doesn't satisfy `Wrap<&[u8], &[u8; 1]>: TransmuteRefDst` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `Wrap<&[u8], &[u8; 1]>: TransmuteRefDst` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..98d00e1950eec4e9686f791dc9d074fac7d26a23 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..063797806d213992e3d3105437b0e42b2abdffda --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-size-decrease.rs:18:27 + | +18 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a0569aa34c115d431c66193fbb69431d6e7d312 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.stderr new file mode 100644 index 0000000000000000000000000000000000000000..016bb217c791216885e1c10a16b74c36aef497ab --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase-allow-shrink.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-size-increase-allow-shrink.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `Transmute` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..06e1990bda59a6d02b2986798342ac954614fbfe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8f36c38206851c10145040744d21bd992e4621b3 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-size-increase.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c9ac6625a308bac3213a2bea81845581201d821 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4f0ce7746225c1dc9cd495a70d431c8e0aeb3316 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/transmute-src-not-intobytes.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `SRC_NOT_AS_BYTES::transmute` + --> tests/ui-msrv/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this + | required by this bound in `SRC_NOT_AS_BYTES::transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a9fdeb2fb77e43634dce6666748e5a42082c483 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1d8cb404bcdd13e4751cda31f2aebafd61158da8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-dst-not-tryfrombytes.stderr @@ -0,0 +1,37 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs:15:58 + | +15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `try_transmute` + --> src/util/macro_util.rs + | + | Dst: TryFromBytes, + | ^^^^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs:15:33 + | +15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute-dst-not-tryfrombytes.rs:15:58 + | +15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..74c45cf87fff5c2b1c3ae702772d5f51c96d39d2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +fn main() { + let _decrease_size: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b00fa63ae9622fb4bbd66ad1293f6280e8910bee --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/try_transmute-size-decrease.rs:17:41 + | +17 | let _decrease_size: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..05dfe05370b603536e642a806edfe8f595884f64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// `try_transmute!` does not support transmuting from a smaller type to a larger +// one. +fn main() { + let _increase_size: Result = try_transmute!(0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..693902af4e9e285c47336bebbb8446fea12aa350 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/try_transmute-size-increase.rs:17:43 + | +17 | let _increase_size: Result = try_transmute!(0u8); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..304fb004b516a36c528dbcf6a7e2c6c777d21f83 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + // `try_transmute` requires that the source type implements `IntoBytes` + let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f9b65cbdc8a81c7edd96ff4a19bf7ef189eaee8b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute-src-not-intobytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute-src-not-intobytes.rs:16:47 + | +16 | let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `try_transmute` + --> src/util/macro_util.rs + | + | Src: IntoBytes, + | ^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8abd132c745d17e4f5adb303a15f3c52d73bb9d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs @@ -0,0 +1,19 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_mut; + +fn main() { + // `try_transmute_mut` requires that the destination type implements + // `IntoBytes` + let src = &mut AU16(0); + let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..21d5c5a622d5427517fb5057c14a98adb18568c4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.stderr @@ -0,0 +1,40 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | +18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | +18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs:18:33 + | +18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | +18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..d47321e000658b6d2f587fc9cbbdede7c44f7159 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::IntoBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `FromBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..05f13ad210bb23938c94f53de76d45b9f37ee772 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-frombytes.stderr @@ -0,0 +1,23 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-frombytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff53576693acdc1700a38295b6ac6e927b4fa57d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `IntoBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..de6b9fe7120fd189f62f57cf98cc158001b509c4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_mut-src-not-intobytes.stderr @@ -0,0 +1,23 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Src` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute_mut-src-not-intobytes.rs:21:40 + | +21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `Dst` + | + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2f64893ebf0d40d653aaf8abc9951d0b296fa2e4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::try_transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `try_transmute_ref!` requires that its destination type be an immutable + // reference. + let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4da514d2645880759a7c73afa22a3157d9c897c9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-mutable.stderr @@ -0,0 +1,22 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/try_transmute_ref-dst-mutable.rs:16:33 + | +16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/try_transmute_ref-dst-mutable.rs:16:33 + | +16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | types differ in mutability + | help: try using a variant of the expected enum: `Err(t.try_transmute_ref())` + | + = note: expected enum `Result<&mut u8, _>` + found enum `Result<&_, ValidityError<&u8, _>>` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..078369342c5ce876eaba47ec93a85f44165a7c63 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..cc57c87129d726151e66c45fc00041904193d6a0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr @@ -0,0 +1,40 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | +17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | +17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:33 + | +17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-msrv/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | +17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `TryFromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..593e68ffa7d135f5b732538e3c5892bd6c49cd0b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e1d380fe0fa27388f098b2afb263b667bb3d7880 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | +17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `IntoBytes` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-msrv/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | +17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + | + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8c6d1d5314e2af8cc03122ec9b389c13a87ca4e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_bytes::(); +} + +fn takes_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4d8bbf8c79ee206ff375b9842a004eb7b552d029 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-from-bytes.rs:16:24 + | +16 | takes_from_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_from_bytes` + --> tests/ui-nightly/diagnostic-not-implemented-from-bytes.rs:19:24 + | +19 | fn takes_from_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.rs new file mode 100644 index 0000000000000000000000000000000000000000..a84a833c138a89f003cf5e83a2b0df1077586801 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromZeros; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_zeros::(); +} + +fn takes_from_zeros() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.stderr new file mode 100644 index 0000000000000000000000000000000000000000..a7e8b97fc0659cd9ef83978f0375e1de06b7afc2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-from-zeros.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeros` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-from-zeros.rs:16:24 + | +16 | takes_from_zeros::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromZeros` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromZeros)]` to `NotZerocopy` + = help: the following other types implement trait `FromZeros`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_from_zeros` + --> tests/ui-nightly/diagnostic-not-implemented-from-zeros.rs:19:24 + | +19 | fn takes_from_zeros() {} + | ^^^^^^^^^ required by this bound in `takes_from_zeros` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..48e9e6580106de2cef30fe5afcc8a4d4ce44f53a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Immutable; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_immutable::(); +} + +fn takes_immutable() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..df7294ba7c53fa8c3576f441067e13d2a35b0d29 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-immutable.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-immutable.rs:16:23 + | +16 | takes_immutable::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `takes_immutable` + --> tests/ui-nightly/diagnostic-not-implemented-immutable.rs:19:23 + | +19 | fn takes_immutable() {} + | ^^^^^^^^^ required by this bound in `takes_immutable` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..a348aafdf1981d9abfdbed39fc13306269c87bc1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::IntoBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_into_bytes::(); +} + +fn takes_into_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..dfb29ad454b6a34936dfa9eb6a5f67b57de6f0ec --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-into-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-into-bytes.rs:16:24 + | +16 | takes_into_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `takes_into_bytes` + --> tests/ui-nightly/diagnostic-not-implemented-into-bytes.rs:19:24 + | +19 | fn takes_into_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_into_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b048e758154859c7f6873d98df32db0a608ea1b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs @@ -0,0 +1,57 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::{Immutable, IntoBytes}; + +fn main() { + // This is adapted from #1296, which includes the following text: + // + // The compiler errors when a type is missing Immutable are somewhat + // misleading, although I'm not sure there's much zerocopy can do about + // this. An example where the compiler recommends adding a reference + // rather than implementing Immutable (some were even more confusing than + // this): + // + // error[E0277]: the trait bound `virtio::wl::CtrlVfdNewDmabuf: zerocopy::Immutable` is not satisfied + // --> devices/src/virtio/wl.rs:317:20 + // | + // 317 | .write_obj(ctrl_vfd_new_dmabuf) + // | --------- ^^^^^^^^^^^^^^^^^^^ the trait `zerocopy::Immutable` is not implemented for `virtio::wl::CtrlVfdNewDmabuf` + // | | + // | required by a bound introduced by this call + // | + // note: required by a bound in `virtio::descriptor_utils::Writer::write_obj` + // --> devices/src/virtio/descriptor_utils.rs:536:25 + // | + // 536 | pub fn write_obj(&mut self, val: T) -> io::Result<()> { + // | ^^^^^^^^^ required by this bound in `Writer::write_obj` + // help: consider borrowing here + // | + // 317 | .write_obj(&ctrl_vfd_new_dmabuf) + // | + + // 317 | .write_obj(&mut ctrl_vfd_new_dmabuf) + // | ++++ + // + // Taking the compiler's suggestion results in a different error with a + // recommendation to remove the reference (back to the original code). + // + // As of this writing, the described problem is still happening thanks to + // https://github.com/rust-lang/rust/issues/130563. We include this test so + // that we can capture the current behavior, but we will update it once that + // Rust issue is fixed. + Foo.write_obj(NotZerocopy(())); +} + +struct Foo; + +impl Foo { + fn write_obj(&mut self, _val: T) {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.stderr new file mode 100644 index 0000000000000000000000000000000000000000..24453f41b3f1b726485d72d1a0148e588e0eb589 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-issue-1296.stderr @@ -0,0 +1,49 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | --------- ^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + | | + | required by a bound introduced by this call + | +note: required by a bound in `Foo::write_obj` + --> tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs:56:21 + | +56 | fn write_obj(&mut self, _val: T) {} + | ^^^^^^^^^ required by this bound in `Foo::write_obj` +help: consider borrowing here + | +50 | Foo.write_obj(&NotZerocopy(())); + | + +50 | Foo.write_obj(&mut NotZerocopy(())); + | ++++ + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | --------- ^^^^^^^^^^^^^^^ unsatisfied trait bound + | | + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `Foo::write_obj` + --> tests/ui-nightly/diagnostic-not-implemented-issue-1296.rs:56:33 + | +56 | fn write_obj(&mut self, _val: T) {} + | ^^^^^^^^^ required by this bound in `Foo::write_obj` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.rs new file mode 100644 index 0000000000000000000000000000000000000000..ded20312bec5c5d7c138d85238cdf3ecc4ecce3d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::KnownLayout; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_known_layout::(); +} + +fn takes_known_layout() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7fed63a8796f1e4f1d1dca38de154403507fb297 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-known-layout.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: KnownLayout` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-known-layout.rs:16:26 + | +16 | takes_known_layout::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `KnownLayout` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(KnownLayout)]` to `NotZerocopy` + = help: the following other types implement trait `KnownLayout`: + &T + &mut T + () + *const T + *mut T + AU16 + AtomicBool + AtomicI16 + and $N others +note: required by a bound in `takes_known_layout` + --> tests/ui-nightly/diagnostic-not-implemented-known-layout.rs:19:26 + | +19 | fn takes_known_layout() {} + | ^^^^^^^^^^^ required by this bound in `takes_known_layout` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..18900ecaa639aa301f47f89a99f0543d1d9c6f31 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::TryFromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_try_from_bytes::(); +} + +fn takes_try_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b5b36914001a8421808aec7d414ff7fe7857900c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.rs:16:28 + | +16 | takes_try_from_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_try_from_bytes` + --> tests/ui-nightly/diagnostic-not-implemented-try-from-bytes.rs:19:28 + | +19 | fn takes_try_from_bytes() {} + | ^^^^^^^^^^^^ required by this bound in `takes_try_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.rs new file mode 100644 index 0000000000000000000000000000000000000000..6196f6a75bf8c45d85a539bdd265109844db47a2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Unaligned; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_unaligned::(); +} + +fn takes_unaligned() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.stderr new file mode 100644 index 0000000000000000000000000000000000000000..cbb83d28168f19bc4ce25b81180441e284846488 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/diagnostic-not-implemented-unaligned.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: zerocopy::Unaligned` is not satisfied + --> tests/ui-nightly/diagnostic-not-implemented-unaligned.rs:16:23 + | +16 | takes_unaligned::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `zerocopy::Unaligned` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Unaligned)]` to `NotZerocopy` + = help: the following other types implement trait `zerocopy::Unaligned`: + () + AtomicBool + AtomicI8 + AtomicU8 + Cell + F32 + F64 + I128 + and $N others +note: required by a bound in `takes_unaligned` + --> tests/ui-nightly/diagnostic-not-implemented-unaligned.rs:19:23 + | +19 | fn takes_unaligned() {} + | ^^^^^^^^^ required by this bound in `takes_unaligned` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e01c4a9b6a26ee1b7ccf7b6c86517f510f65e1ec --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; + +fn main() {} + +// Should fail because `NotZerocopy: !FromBytes`. +const NOT_FROM_BYTES: NotZerocopy = + zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1d20b883461bf510ef9c5d6ba1a61347f6f1fe30 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_not_from_bytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `NOT_FROM_BYTES::transmute` + --> tests/ui-nightly/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0c5fcfc5f5d451a7d11e25cbbd467ac02508840 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.rs @@ -0,0 +1,12 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.stderr new file mode 100644 index 0000000000000000000000000000000000000000..3a6f98da516dc83aa72ec74c17eca2822d07cf68 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/include_value_wrong_size.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 4-byte type to 8-byte type: `[u8; 4]` -> `u64` + --> tests/ui-nightly/include_value_wrong_size.rs:12:25 + | +12 | const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `WRONG_SIZE` failed here + | + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/include_value_wrong_size.rs:12:25 + | +12 | const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.rs new file mode 100644 index 0000000000000000000000000000000000000000..53e3eb9b0aa59db355673e1d96e9086e7db4e6bb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c11eed539e5da7b208cd51d9beac497d64eb8f9c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-nightly/max-align.rs:96:17 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9a76948fb03ba1dbde7a0ffbefe687188f1601d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.rs @@ -0,0 +1,29 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::pointer::{ + invariant::{Aligned, Exclusive, Shared, Valid}, + Ptr, +}; + +fn _when_exclusive<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Exclusive, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Exclusive, Aligned, Valid)>, +) { + _small = big; +} + +fn _when_shared<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Shared, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Shared, Aligned, Valid)>, +) { + _small = big; +} + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.stderr new file mode 100644 index 0000000000000000000000000000000000000000..aa50070756c3fc8e41b87e52bc07265fc1b9d7d4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/ptr-is-invariant-over-v.stderr @@ -0,0 +1,31 @@ +error: lifetime may not live long enough + --> tests/ui-nightly/ptr-is-invariant-over-v.rs:19:5 + | +15 | fn _when_exclusive<'big: 'small, 'small>( + | ---- ------ lifetime `'small` defined here + | | + | lifetime `'big` defined here +... +19 | _small = big; + | ^^^^^^^^^^^^ assignment requires that `'small` must outlive `'big` + | + = help: consider adding the following bound: `'small: 'big` + = note: requirement occurs because of the type `Ptr<'_, &u32, (zerocopy::invariant::Exclusive, zerocopy::invariant::Aligned, zerocopy::invariant::Valid)>`, which makes the generic argument `&u32` invariant + = note: the struct `Ptr<'a, T, I>` is invariant over the parameter `T` + = help: see for more information about variance + +error: lifetime may not live long enough + --> tests/ui-nightly/ptr-is-invariant-over-v.rs:26:5 + | +22 | fn _when_shared<'big: 'small, 'small>( + | ---- ------ lifetime `'small` defined here + | | + | lifetime `'big` defined here +... +26 | _small = big; + | ^^^^^^^^^^^^ assignment requires that `'small` must outlive `'big` + | + = help: consider adding the following bound: `'small: 'big` + = note: requirement occurs because of the type `Ptr<'_, &u32, (zerocopy::invariant::Shared, zerocopy::invariant::Aligned, zerocopy::invariant::Valid)>`, which makes the generic argument `&u32` invariant + = note: the struct `Ptr<'a, T, I>` is invariant over the parameter `T` + = help: see for more information about variance diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3daed1c842a35919b805fadb78cb2831ed0671fa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.rs @@ -0,0 +1,17 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..abe4bf0572552b4691a2b8c131572fed18e8d43c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `DST_NOT_FROM_BYTES::transmute` + --> tests/ui-nightly/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bee817cf610f55c28a3ccebce546b911e416038 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5edaea447dbaab4af070c359280b3bebf6e2a538 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-const.stderr @@ -0,0 +1,32 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-nightly/transmute-mut-const.rs:18:52 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-nightly/transmute-mut-const.rs:15:1 + | +15 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(const_item_mutation)]` on by default + +error[E0015]: cannot call non-const method `zerocopy::util::macro_util::Wrap::<&mut [u8; 2], &mut [u8; 2]>::transmute_mut_inference_helper` in constants + --> tests/ui-nightly/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0015]: cannot call non-const method `zerocopy::util::macro_util::Wrap::<&mut [u8; 2], &mut [u8; 2]>::transmute_mut` in constants + --> tests/ui-nightly/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f2d511cca0e4b558dfe75412754c543c2893f7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..652a17de8669f48ddfa77137eb388c3ca29d1884 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,19 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..504c12ac7e3542270c75fa56a26c56cd2ad290d6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7f5ddbe90d4608075036a11c79a4b89a0f0d729e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-dst-not-frombytes.rs:22:38 + | + 22 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-nightly/transmute-mut-dst-not-frombytes.rs:19:1 + | + 19 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c9d60e6e0ec57737324a4c40f24e2ec97d4f22b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `IntoBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..a33b3b1e9d0fe03a5bccddeb3aa70bc0bffd3720 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-dst-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-dst-not-intobytes.rs:22:36 + | + 22 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-nightly/transmute-mut-dst-not-intobytes.rs:19:1 + | + 19 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..c31765e4b96b498bbea7d521c8c949a26484ea9f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b826fcc7a9a8ed39ddfe7891da48250dc3bb484d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-nightly/transmute-mut-illegal-lifetime.rs:14:56 + | +12 | let mut x = 0u64; + | ----- binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..d07829de056f5b0ac3266db2e92a6a1530331897 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ffe1ab4f74d87a8e72704e62086e19ab1f68386d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-dst-not-references.rs:15:59 + | +15 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +15 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(&mut 0usize); + | ++++ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d6e84542a805e03ddadb4cf33686a5fee315c2e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..75924df3c645435d6b35c57086ca228ad3a19211 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-immutable.stderr @@ -0,0 +1,11 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-immutable.rs:15:37 + | +15 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..71f6e67586a12c224a33e893c14cc05c5138beaf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..3cc9126c90201847a5d660176b6cf2aebcb047cc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-not-a-reference.rs:15:53 + | +15 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +15 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(&mut 0usize); + | ++++ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..fbf6232cabf7e6236d9fbe35a05b402bc09bfdd6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8baf521f35d345a4f8dc5cfa287780e6b850aa08 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:22:38 + | + 22 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Src` + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:15:1 + | + 15 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Src` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..505734ac3d6f1164867a57885953fdf2493071ca --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..76a4a3de62f2d1efe0ffa7df646ec5391c1e4095 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-intobytes.rs:22:36 + | + 22 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-nightly/transmute-mut-src-not-intobytes.rs:15:1 + | + 15 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..af2ffd363f88eb2be6362d805ef0e026fcde047a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type to +// a sized destination type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9c70729463698578bd7942b255c22043e5bd0101 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-mut-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_mut` exists for struct `zerocopy::util::macro_util::Wrap<&mut [u8], &mut [u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-nightly/transmute-mut-src-unsized.rs:15:35 + | + 15 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | ------------------------- doesn't satisfy `_: TransmuteMutDst<'_>` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `zerocopy::util::macro_util::Wrap<&mut [u8], &mut [u8; 1]>: zerocopy::util::macro_util::TransmuteMutDst<'_>` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.rs new file mode 100644 index 0000000000000000000000000000000000000000..27db0bbb9750912a2dff430fefc1208772740bbc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.stderr new file mode 100644 index 0000000000000000000000000000000000000000..64b3c5f9a53695acdaae4b8318da319dc58b5f0e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ptr-to-usize.stderr @@ -0,0 +1,29 @@ +error[E0277]: the trait bound `*const usize: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `IntoBytes` is not implemented for `*const usize` + | required by a bound introduced by this call + | + = note: Consider adding `#[derive(IntoBytes)]` to `*const usize` +help: the trait `IntoBytes` is implemented for `usize` + --> src/util/macros.rs + | + | unsafe impl $trait for $ty { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + ::: src/impls.rs + | + | unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + | ----------------------------------------------------------------------------- in this macro invocation +note: required by a bound in `POINTER_VALUE::transmute` + --> tests/ui-nightly/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..75f837acb0652fe7ea5b33a65fed56e332969c27 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1fff03460c462d952b6f5d614d16b4792c9c4e78 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-mutable.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6dbd00599515ae0a2944190a634d3fdc4e95e04 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8a47ed870c4a3e77f257c1090bad954bf016fc86 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..969e8877b6833402acab765cb6738dd6f64594a9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4cdac53c238aba5c4b7bb2ed667a9de01acd4f07 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-nightly/transmute-ref-dst-not-frombytes.rs:18:1 + | +18 | struct Dst(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-nightly/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f21b7273312dd5ebc288947f52e6f5d10c72bde --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::FromBytes)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `Immutable` +const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ca4b8eb77b8f3799d420946101d070fe4394098b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-dst-not-nocell.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: Immutable` is not satisfied + --> tests/ui-nightly/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `Immutable` is not implemented for `Dst` + --> tests/ui-nightly/transmute-ref-dst-not-nocell.rs:18:1 + | +18 | struct Dst(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Dst` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertDstIsImmutable` + --> tests/ui-nightly/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..8dd191e6f4038619262984d9723e12fce7d4d210 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e16a557611f40e3c03234aa8fa05b74b75be4767 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-nightly/transmute-ref-illegal-lifetime.rs:14:52 + | +12 | let x = 0u64; + | - binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..c65bd24a93b72b7db351d8b418f6524984370c62 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ed1cf2388f0c10e2beecd34c6b650ff0f0a66026 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,55 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:15:54 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(&0usize); + | + + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bbba06e1565749ce9f4c5e523c615dd1050d2b2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..bd66af374dfaa9c95b9fe2df1cf7b042e1fc2930 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:15:49 + | +15 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +15 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(&0usize); + | + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f357c92aa9b4dcfbc615642cce67e6bd929a7f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..99c816d8038951df38df56c588e1ae283b3010f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-intobytes.stderr @@ -0,0 +1,60 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `AssertSrcIsIntoBytes` + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsIntoBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `AssertSrcIsIntoBytes` + --> tests/ui-nightly/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsIntoBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..862951b888cb3d124492cc7c12327ffc80793e17 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::IntoBytes)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `Immutable` +const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..338ca7e46a5873bc60b38aaa75e471f4a33d6f2f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-not-nocell.stderr @@ -0,0 +1,60 @@ +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `Immutable` is not implemented for `Src` + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Src` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertSrcIsImmutable` + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `Src` + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Src` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertSrcIsImmutable` + --> tests/ui-nightly/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..262395bd7361ca4705b70a1b177af53948a2be13 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.rs @@ -0,0 +1,14 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..46cbaf2d305a6e459ea91eaa3ee57e812c9040e9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-ref-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_ref` exists for struct `zerocopy::util::macro_util::Wrap<&[u8], &[u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-nightly/transmute-ref-src-unsized.rs:14:31 + | + 14 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | ------------------------- doesn't satisfy `_: TransmuteRefDst<'_>` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `zerocopy::util::macro_util::Wrap<&[u8], &[u8; 1]>: zerocopy::util::macro_util::TransmuteRefDst<'_>` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..98d00e1950eec4e9686f791dc9d074fac7d26a23 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..aca5fa9be55a6394a06a08f618f6b811dfe5eda0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-decrease.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 2-byte type to 1-byte type: `AU16` -> `u8` + --> tests/ui-nightly/transmute-size-decrease.rs:18:27 + | +18 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ evaluation of `DECREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-size-decrease.rs:18:27 + | +18 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a0569aa34c115d431c66193fbb69431d6e7d312 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.stderr new file mode 100644 index 0000000000000000000000000000000000000000..3fde7cc324e2ce7cc9c4b2c5d1df48f683344877 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase-allow-shrink.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 1-byte type to 2-byte type: `u8` -> `Transmute` + --> tests/ui-nightly/transmute-size-increase-allow-shrink.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `INCREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-size-increase-allow-shrink.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `Transmute` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..06e1990bda59a6d02b2986798342ac954614fbfe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..599dc541b9f75a0674402785878d717c29fd7452 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-size-increase.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 1-byte type to 2-byte type: `u8` -> `AU16` + --> tests/ui-nightly/transmute-size-increase.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ evaluation of `INCREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-size-increase.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c9ac6625a308bac3213a2bea81845581201d821 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..68841b35dc8d5b27f996e574ba8ec625b205ab10 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/transmute-src-not-intobytes.stderr @@ -0,0 +1,34 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `SRC_NOT_AS_BYTES::transmute` + --> tests/ui-nightly/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a9fdeb2fb77e43634dce6666748e5a42082c483 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8747341a905ccbc987e777f82912cd64c247f0f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-dst-not-tryfrombytes.stderr @@ -0,0 +1,88 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs:15:33 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs:15:58 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::try_transmute` + --> src/util/macro_util.rs + | + | pub fn try_transmute(src: Src) -> Result> + | ------------- required by a bound in this function +... + | Dst: TryFromBytes, + | ^^^^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute-dst-not-tryfrombytes.rs:15:58 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..74c45cf87fff5c2b1c3ae702772d5f51c96d39d2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +fn main() { + let _decrease_size: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c9faceb34e76f9564ea81cb800920a9cab9f7e5a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/try_transmute-size-decrease.rs:17:41 + | +17 | let _decrease_size: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..05dfe05370b603536e642a806edfe8f595884f64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// `try_transmute!` does not support transmuting from a smaller type to a larger +// one. +fn main() { + let _increase_size: Result = try_transmute!(0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e912d389a9ecd8c08fd9f5b22ce07a2bc0433ae0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/try_transmute-size-increase.rs:17:43 + | +17 | let _increase_size: Result = try_transmute!(0u8); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..304fb004b516a36c528dbcf6a7e2c6c777d21f83 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + // `try_transmute` requires that the source type implements `IntoBytes` + let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..fe2720d0850564f71e24b7ce9ef9d53edd8e6e49 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute-src-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute-src-not-intobytes.rs:16:47 + | + 16 | let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `zerocopy::util::macro_util::try_transmute` + --> src/util/macro_util.rs + | + | pub fn try_transmute(src: Src) -> Result> + | ------------- required by a bound in this function + | where + | Src: IntoBytes, + | ^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8abd132c745d17e4f5adb303a15f3c52d73bb9d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs @@ -0,0 +1,19 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_mut; + +fn main() { + // `try_transmute_mut` requires that the destination type implements + // `IntoBytes` + let src = &mut AU16(0); + let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..64b8218af284f0edea6102e571c07134284f9d71 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.stderr @@ -0,0 +1,120 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::try_transmute_mut` + --> src/util/macro_util.rs + | + | pub fn try_transmute_mut(self) -> Result<&'a mut Dst, ValidityError<&'a mut Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + IntoBytes, + | ^^^^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::try_transmute_mut` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::try_transmute_mut` + --> src/util/macro_util.rs + | + | pub fn try_transmute_mut(self) -> Result<&'a mut Dst, ValidityError<&'a mut Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::try_transmute_mut` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs:18:33 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..d47321e000658b6d2f587fc9cbbdede7c44f7159 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::IntoBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `FromBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..857ab63a7a89da324812d98818962a24251d608d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-frombytes.stderr @@ -0,0 +1,95 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Src` + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:13:1 + | + 13 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Src` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-nightly/try_transmute_mut-src-not-frombytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff53576693acdc1700a38295b6ac6e927b4fa57d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `IntoBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..72639e0ad75d700ebd42495aa7e481cd978db12a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_mut-src-not-intobytes.stderr @@ -0,0 +1,95 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:13:1 + | + 13 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-nightly/try_transmute_mut-src-not-intobytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2f64893ebf0d40d653aaf8abc9951d0b296fa2e4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::try_transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `try_transmute_ref!` requires that its destination type be an immutable + // reference. + let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ec5a6a681896ae1789d15837aa8ad29b9acddf46 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-mutable.stderr @@ -0,0 +1,32 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/try_transmute_ref-dst-mutable.rs:16:33 + | + 16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | types differ in mutability + | arguments to this enum variant are incorrect + | + = note: expected mutable reference `&mut u8` + found reference `&_` +help: the type constructed contains `&_` due to the type of the argument passed + --> tests/ui-nightly/try_transmute_ref-dst-mutable.rs:16:33 + | + 16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ this argument influences the type of `Ok` +note: tuple variant defined here + --> $RUST/core/src/result.rs + | + | Ok(#[stable(feature = "rust1", since = "1.0.0")] T), + | ^^ + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/try_transmute_ref-dst-mutable.rs:16:33 + | +16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected enum `Result<&mut u8, _>` + found enum `Result<&_, ValidityError<&u8, _>>` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..078369342c5ce876eaba47ec93a85f44165a7c63 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..86cdc4dbc10edb488394ddc5b5a259f4fa8a3302 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr @@ -0,0 +1,120 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + Immutable, + | ^^^^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:33 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-nightly/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..593e68ffa7d135f5b732538e3c5892bd6c49cd0b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..09ffdf2b614bbb13b5d67588c5752225601d75f1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.stderr @@ -0,0 +1,63 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | + 17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function + | where + | Src: IntoBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-nightly/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | + 17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-nightly/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `zerocopy::util::macro_util::Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function + | where + | Src: IntoBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8c6d1d5314e2af8cc03122ec9b389c13a87ca4e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_bytes::(); +} + +fn takes_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f28a0c7f0c93694ca219d426a91a52930c008b2e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-from-bytes.rs:16:24 + | +16 | takes_from_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_from_bytes` + --> tests/ui-stable/diagnostic-not-implemented-from-bytes.rs:19:24 + | +19 | fn takes_from_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.rs new file mode 100644 index 0000000000000000000000000000000000000000..a84a833c138a89f003cf5e83a2b0df1077586801 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::FromZeros; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_from_zeros::(); +} + +fn takes_from_zeros() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.stderr new file mode 100644 index 0000000000000000000000000000000000000000..820c3913ed6bd8300ff38a2155fe31f562376502 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-from-zeros.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeros` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-from-zeros.rs:16:24 + | +16 | takes_from_zeros::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromZeros` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromZeros)]` to `NotZerocopy` + = help: the following other types implement trait `FromZeros`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_from_zeros` + --> tests/ui-stable/diagnostic-not-implemented-from-zeros.rs:19:24 + | +19 | fn takes_from_zeros() {} + | ^^^^^^^^^ required by this bound in `takes_from_zeros` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..48e9e6580106de2cef30fe5afcc8a4d4ce44f53a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Immutable; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_immutable::(); +} + +fn takes_immutable() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f225c0c3a6500da8bf00778cf91db6554ba96534 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-immutable.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-immutable.rs:16:23 + | +16 | takes_immutable::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `takes_immutable` + --> tests/ui-stable/diagnostic-not-implemented-immutable.rs:19:23 + | +19 | fn takes_immutable() {} + | ^^^^^^^^^ required by this bound in `takes_immutable` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..a348aafdf1981d9abfdbed39fc13306269c87bc1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::IntoBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_into_bytes::(); +} + +fn takes_into_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e8880baa5638aff6fcb0729478d786eb7b5e684b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-into-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-into-bytes.rs:16:24 + | +16 | takes_into_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `takes_into_bytes` + --> tests/ui-stable/diagnostic-not-implemented-into-bytes.rs:19:24 + | +19 | fn takes_into_bytes() {} + | ^^^^^^^^^ required by this bound in `takes_into_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b048e758154859c7f6873d98df32db0a608ea1b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.rs @@ -0,0 +1,57 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::{Immutable, IntoBytes}; + +fn main() { + // This is adapted from #1296, which includes the following text: + // + // The compiler errors when a type is missing Immutable are somewhat + // misleading, although I'm not sure there's much zerocopy can do about + // this. An example where the compiler recommends adding a reference + // rather than implementing Immutable (some were even more confusing than + // this): + // + // error[E0277]: the trait bound `virtio::wl::CtrlVfdNewDmabuf: zerocopy::Immutable` is not satisfied + // --> devices/src/virtio/wl.rs:317:20 + // | + // 317 | .write_obj(ctrl_vfd_new_dmabuf) + // | --------- ^^^^^^^^^^^^^^^^^^^ the trait `zerocopy::Immutable` is not implemented for `virtio::wl::CtrlVfdNewDmabuf` + // | | + // | required by a bound introduced by this call + // | + // note: required by a bound in `virtio::descriptor_utils::Writer::write_obj` + // --> devices/src/virtio/descriptor_utils.rs:536:25 + // | + // 536 | pub fn write_obj(&mut self, val: T) -> io::Result<()> { + // | ^^^^^^^^^ required by this bound in `Writer::write_obj` + // help: consider borrowing here + // | + // 317 | .write_obj(&ctrl_vfd_new_dmabuf) + // | + + // 317 | .write_obj(&mut ctrl_vfd_new_dmabuf) + // | ++++ + // + // Taking the compiler's suggestion results in a different error with a + // recommendation to remove the reference (back to the original code). + // + // As of this writing, the described problem is still happening thanks to + // https://github.com/rust-lang/rust/issues/130563. We include this test so + // that we can capture the current behavior, but we will update it once that + // Rust issue is fixed. + Foo.write_obj(NotZerocopy(())); +} + +struct Foo; + +impl Foo { + fn write_obj(&mut self, _val: T) {} +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8f17f04479fa0ed5cb45bc2367d24669c4622dd9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-issue-1296.stderr @@ -0,0 +1,49 @@ +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | --------- ^^^^^^^^^^^^^^^ the trait `Immutable` is not implemented for `NotZerocopy` + | | + | required by a bound introduced by this call + | +note: required by a bound in `Foo::write_obj` + --> tests/ui-stable/diagnostic-not-implemented-issue-1296.rs:56:21 + | +56 | fn write_obj(&mut self, _val: T) {} + | ^^^^^^^^^ required by this bound in `Foo::write_obj` +help: consider borrowing here + | +50 | Foo.write_obj(&NotZerocopy(())); + | + +50 | Foo.write_obj(&mut NotZerocopy(())); + | ++++ + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-issue-1296.rs:50:19 + | +50 | Foo.write_obj(NotZerocopy(())); + | --------- ^^^^^^^^^^^^^^^ unsatisfied trait bound + | | + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `Foo::write_obj` + --> tests/ui-stable/diagnostic-not-implemented-issue-1296.rs:56:33 + | +56 | fn write_obj(&mut self, _val: T) {} + | ^^^^^^^^^ required by this bound in `Foo::write_obj` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.rs new file mode 100644 index 0000000000000000000000000000000000000000..ded20312bec5c5d7c138d85238cdf3ecc4ecce3d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::KnownLayout; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_known_layout::(); +} + +fn takes_known_layout() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.stderr new file mode 100644 index 0000000000000000000000000000000000000000..acbb9209d63be697b2c7889a477c92431d5d2aa5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-known-layout.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: KnownLayout` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-known-layout.rs:16:26 + | +16 | takes_known_layout::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `KnownLayout` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(KnownLayout)]` to `NotZerocopy` + = help: the following other types implement trait `KnownLayout`: + &T + &mut T + () + *const T + *mut T + AU16 + AtomicBool + AtomicI16 + and $N others +note: required by a bound in `takes_known_layout` + --> tests/ui-stable/diagnostic-not-implemented-known-layout.rs:19:26 + | +19 | fn takes_known_layout() {} + | ^^^^^^^^^^^ required by this bound in `takes_known_layout` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..18900ecaa639aa301f47f89a99f0543d1d9c6f31 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::TryFromBytes; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_try_from_bytes::(); +} + +fn takes_try_from_bytes() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..2ec88bb602d53183e29b8c5da1b8f991d0f14336 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-try-from-bytes.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-try-from-bytes.rs:16:28 + | +16 | takes_try_from_bytes::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `takes_try_from_bytes` + --> tests/ui-stable/diagnostic-not-implemented-try-from-bytes.rs:19:28 + | +19 | fn takes_try_from_bytes() {} + | ^^^^^^^^^^^^ required by this bound in `takes_try_from_bytes` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.rs new file mode 100644 index 0000000000000000000000000000000000000000..6196f6a75bf8c45d85a539bdd265109844db47a2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; +use zerocopy::Unaligned; + +fn main() { + // We expect the proper diagnostic to be emitted on Rust 1.78.0 and later. + takes_unaligned::(); +} + +fn takes_unaligned() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1203f96044f604a71f259bbc6b2f9dd84fc51473 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/diagnostic-not-implemented-unaligned.stderr @@ -0,0 +1,27 @@ +error[E0277]: the trait bound `NotZerocopy: zerocopy::Unaligned` is not satisfied + --> tests/ui-stable/diagnostic-not-implemented-unaligned.rs:16:23 + | +16 | takes_unaligned::(); + | ^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `zerocopy::Unaligned` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Unaligned)]` to `NotZerocopy` + = help: the following other types implement trait `zerocopy::Unaligned`: + () + AtomicBool + AtomicI8 + AtomicU8 + Cell + F32 + F64 + I128 + and $N others +note: required by a bound in `takes_unaligned` + --> tests/ui-stable/diagnostic-not-implemented-unaligned.rs:19:23 + | +19 | fn takes_unaligned() {} + | ^^^^^^^^^ required by this bound in `takes_unaligned` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e01c4a9b6a26ee1b7ccf7b6c86517f510f65e1ec --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::NotZerocopy; + +fn main() {} + +// Should fail because `NotZerocopy: !FromBytes`. +const NOT_FROM_BYTES: NotZerocopy = + zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..32ebe7269ac3df1b2be5aabe137ec36c52caad84 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_not_from_bytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `NOT_FROM_BYTES::transmute` + --> tests/ui-stable/include_value_not_from_bytes.rs:17:5 + | +17 | zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.rs new file mode 100644 index 0000000000000000000000000000000000000000..d0c5fcfc5f5d451a7d11e25cbbd467ac02508840 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.rs @@ -0,0 +1,12 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.stderr new file mode 100644 index 0000000000000000000000000000000000000000..3e5d0f52b676bcde10609c1d4e5fbeaccd3ff242 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/include_value_wrong_size.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 4-byte type to 8-byte type: `[u8; 4]` -> `u64` + --> tests/ui-stable/include_value_wrong_size.rs:12:25 + | +12 | const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `WRONG_SIZE` failed here + | + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/include_value_wrong_size.rs:12:25 + | +12 | const WRONG_SIZE: u64 = zerocopy::include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `zerocopy::include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.rs new file mode 100644 index 0000000000000000000000000000000000000000..53e3eb9b0aa59db355673e1d96e9086e7db4e6bb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7e83b2f5ac30a7aa6c8d535b41f9c5da8ede0da0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-stable/max-align.rs:96:17 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9a76948fb03ba1dbde7a0ffbefe687188f1601d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.rs @@ -0,0 +1,29 @@ +// Copyright 2025 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::pointer::{ + invariant::{Aligned, Exclusive, Shared, Valid}, + Ptr, +}; + +fn _when_exclusive<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Exclusive, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Exclusive, Aligned, Valid)>, +) { + _small = big; +} + +fn _when_shared<'big: 'small, 'small>( + big: Ptr<'small, &'big u32, (Shared, Aligned, Valid)>, + mut _small: Ptr<'small, &'small u32, (Shared, Aligned, Valid)>, +) { + _small = big; +} + +fn main() {} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ddb8c740327d3cd22d2c17669ee6bcfaae241274 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/ptr-is-invariant-over-v.stderr @@ -0,0 +1,31 @@ +error: lifetime may not live long enough + --> tests/ui-stable/ptr-is-invariant-over-v.rs:19:5 + | +15 | fn _when_exclusive<'big: 'small, 'small>( + | ---- ------ lifetime `'small` defined here + | | + | lifetime `'big` defined here +... +19 | _small = big; + | ^^^^^^^^^^^^ assignment requires that `'small` must outlive `'big` + | + = help: consider adding the following bound: `'small: 'big` + = note: requirement occurs because of the type `Ptr<'_, &u32, (zerocopy::invariant::Exclusive, Aligned, zerocopy::invariant::Valid)>`, which makes the generic argument `&u32` invariant + = note: the struct `Ptr<'a, T, I>` is invariant over the parameter `T` + = help: see for more information about variance + +error: lifetime may not live long enough + --> tests/ui-stable/ptr-is-invariant-over-v.rs:26:5 + | +22 | fn _when_shared<'big: 'small, 'small>( + | ---- ------ lifetime `'small` defined here + | | + | lifetime `'big` defined here +... +26 | _small = big; + | ^^^^^^^^^^^^ assignment requires that `'small` must outlive `'big` + | + = help: consider adding the following bound: `'small: 'big` + = note: requirement occurs because of the type `Ptr<'_, &u32, (Shared, Aligned, zerocopy::invariant::Valid)>`, which makes the generic argument `&u32` invariant + = note: the struct `Ptr<'a, T, I>` is invariant over the parameter `T` + = help: see for more information about variance diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3daed1c842a35919b805fadb78cb2831ed0671fa --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.rs @@ -0,0 +1,17 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..f8e2b4d872ab712f792c6334eb5c08573c8d01d4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `DST_NOT_FROM_BYTES::transmute` + --> tests/ui-stable/transmute-dst-not-frombytes.rs:17:41 + | +17 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.rs new file mode 100644 index 0000000000000000000000000000000000000000..9bee817cf610f55c28a3ccebce546b911e416038 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.stderr new file mode 100644 index 0000000000000000000000000000000000000000..423a42b9712987fa1e14b6b0dc2431962a69074a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-const.stderr @@ -0,0 +1,32 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-stable/transmute-mut-const.rs:18:52 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-stable/transmute-mut-const.rs:15:1 + | +15 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(const_item_mutation)]` on by default + +error[E0015]: cannot call non-const method `Wrap::<&mut [u8; 2], &mut [u8; 2]>::transmute_mut_inference_helper` in constants + --> tests/ui-stable/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0015]: cannot call non-const method `Wrap::<&mut [u8; 2], &mut [u8; 2]>::transmute_mut` in constants + --> tests/ui-stable/transmute-mut-const.rs:18:37 + | +18 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f2d511cca0e4b558dfe75412754c543c2893f7a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..39f9ceb93baed6ef6194a489d0d19c107638867a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,19 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..504c12ac7e3542270c75fa56a26c56cd2ad290d6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d93ea8aa0800f9db47adfd0a9aaf522fe5f5b60c --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-stable/transmute-mut-dst-not-frombytes.rs:22:38 + | + 22 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-stable/transmute-mut-dst-not-frombytes.rs:19:1 + | + 19 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c9d60e6e0ec57737324a4c40f24e2ec97d4f22b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `IntoBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..768e8bbb1b7a62dd2b201459dde78ebdcefd295f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-dst-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-mut-dst-not-intobytes.rs:22:36 + | + 22 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-stable/transmute-mut-dst-not-intobytes.rs:19:1 + | + 19 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..c31765e4b96b498bbea7d521c8c949a26484ea9f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..7f128138f343786f99c77f483af55054cbc6ab2a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-stable/transmute-mut-illegal-lifetime.rs:14:56 + | +12 | let mut x = 0u64; + | ----- binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..d07829de056f5b0ac3266db2e92a6a1530331897 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..758ed12debc7fdacc3da78d1b3cfeffc4742bbed --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-dst-not-references.rs:15:59 + | +15 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +15 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(&mut 0usize); + | ++++ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d6e84542a805e03ddadb4cf33686a5fee315c2e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..4476fb134cb813673d682f25570a48a160485d39 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-immutable.stderr @@ -0,0 +1,11 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-immutable.rs:15:37 + | +15 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..71f6e67586a12c224a33e893c14cc05c5138beaf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..51dfa520710077e0489b7fa93ab4b0fc5011492b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-not-a-reference.rs:15:53 + | +15 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +15 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(&mut 0usize); + | ++++ diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..fbf6232cabf7e6236d9fbe35a05b402bc09bfdd6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..87e38e305b626e76d8f98beaa95f22647b39a96e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:22:38 + | + 22 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Src` + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:15:1 + | + 15 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Src` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..505734ac3d6f1164867a57885953fdf2493071ca --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromBytes, zerocopy::IntoBytes, zerocopy::Immutable)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..007edbf8f9d01abe0fa78e7b9dfd316c190c8f25 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-intobytes.rs:22:36 + | + 22 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-stable/transmute-mut-src-not-intobytes.rs:15:1 + | + 15 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..af2ffd363f88eb2be6362d805ef0e026fcde047a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type to +// a sized destination type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..cd3409e89321c1fefb977762c02ffbcfe33ddbb0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-mut-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_mut` exists for struct `Wrap<&mut [u8], &mut [u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-stable/transmute-mut-src-unsized.rs:15:35 + | + 15 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | ------------------------- doesn't satisfy `Wrap<&mut [u8], &mut [u8; 1]>: TransmuteMutDst<'_>` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `Wrap<&mut [u8], &mut [u8; 1]>: TransmuteMutDst<'_>` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.rs new file mode 100644 index 0000000000000000000000000000000000000000..27db0bbb9750912a2dff430fefc1208772740bbc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.stderr new file mode 100644 index 0000000000000000000000000000000000000000..0e40d34baf79f228fce770864128bf111cbeddbf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ptr-to-usize.stderr @@ -0,0 +1,24 @@ +error[E0277]: the trait bound `*const usize: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `IntoBytes` is not implemented for `*const usize` + | required by a bound introduced by this call + | + = note: Consider adding `#[derive(IntoBytes)]` to `*const usize` +help: the trait `IntoBytes` is implemented for `usize` + --> src/impls.rs + | + | unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +note: required by a bound in `POINTER_VALUE::transmute` + --> tests/ui-stable/transmute-ptr-to-usize.rs:18:30 + | +18 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..75f837acb0652fe7ea5b33a65fed56e332969c27 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5823f327cd4257a5ff78d8147bdbecda62938af7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-mutable.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:16:22 + | +16 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6dbd00599515ae0a2944190a634d3fdc4e95e04 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..008a398c753e7a6f51939002c174b346fb15704a --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:15:36 + | +15 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..969e8877b6833402acab765cb6738dd6f64594a9 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..aecc775520725794586c7442ff5a3dc37e812068 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-stable/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-stable/transmute-ref-dst-not-frombytes.rs:18:1 + | +18 | struct Dst(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-stable/transmute-ref-dst-not-frombytes.rs:21:34 + | +21 | const DST_NOT_FROM_BYTES: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f21b7273312dd5ebc288947f52e6f5d10c72bde --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::FromBytes)] +#[repr(transparent)] +struct Dst(AU16); + +// `transmute_ref` requires that the destination type implements `Immutable` +const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b0e20e0ed19ed5e36b25d77ff6f23a08b5249297 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-dst-not-nocell.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `Dst: Immutable` is not satisfied + --> tests/ui-stable/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `Immutable` is not implemented for `Dst` + --> tests/ui-stable/transmute-ref-dst-not-nocell.rs:18:1 + | +18 | struct Dst(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Dst` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertDstIsImmutable` + --> tests/ui-stable/transmute-ref-dst-not-nocell.rs:21:33 + | +21 | const DST_NOT_IMMUTABLE: &Dst = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.rs new file mode 100644 index 0000000000000000000000000000000000000000..8dd191e6f4038619262984d9723e12fce7d4d210 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1ef34feb7f07366452893b6460685de24fe67965 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-stable/transmute-ref-illegal-lifetime.rs:14:52 + | +12 | let x = 0u64; + | - binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.rs new file mode 100644 index 0000000000000000000000000000000000000000..c65bd24a93b72b7db351d8b418f6524984370c62 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 0000000000000000000000000000000000000000..dc636fc7bc86bee2143f9f74b88460934ac441a8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,55 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:15:54 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(&0usize); + | + + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:15:39 + | +15 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bbba06e1565749ce9f4c5e523c615dd1050d2b2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..3f86d392310a3f69fc85f0d9670700ff5d862035 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-not-a-reference.rs:15:49 + | +15 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +15 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(&0usize); + | + diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f357c92aa9b4dcfbc615642cce67e6bd929a7f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::Immutable)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5eb5496a3818c3aa348e5a255772b9dd3b6352f7 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-intobytes.stderr @@ -0,0 +1,60 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `AssertSrcIsIntoBytes` + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsIntoBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `AssertSrcIsIntoBytes` + --> tests/ui-stable/transmute-ref-src-not-intobytes.rs:21:33 + | +21 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsIntoBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.rs new file mode 100644 index 0000000000000000000000000000000000000000..862951b888cb3d124492cc7c12327ffc80793e17 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.rs @@ -0,0 +1,21 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute_ref; + +fn main() {} + +#[derive(zerocopy::IntoBytes)] +#[repr(transparent)] +struct Src(AU16); + +// `transmute_ref` requires that the source type implements `Immutable` +const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.stderr new file mode 100644 index 0000000000000000000000000000000000000000..ca32a513c44a3a36171bb6e068a00374a65789f0 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-not-nocell.stderr @@ -0,0 +1,60 @@ +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `Immutable` is not implemented for `Src` + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Src` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertSrcIsImmutable` + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: Immutable` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `Src` + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:18:1 + | +18 | struct Src(AU16); + | ^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `Src` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `AssertSrcIsImmutable` + --> tests/ui-stable/transmute-ref-src-not-nocell.rs:21:34 + | +21 | const SRC_NOT_IMMUTABLE: &AU16 = transmute_ref!(&Src(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsImmutable` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.rs new file mode 100644 index 0000000000000000000000000000000000000000..262395bd7361ca4705b70a1b177af53948a2be13 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.rs @@ -0,0 +1,14 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.stderr new file mode 100644 index 0000000000000000000000000000000000000000..490fc8a4fbf9e7054e9436d15707714deff4ff9e --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-ref-src-unsized.stderr @@ -0,0 +1,16 @@ +error[E0599]: the method `transmute_ref` exists for struct `Wrap<&[u8], &[u8; 1]>`, but its trait bounds were not satisfied + --> tests/ui-stable/transmute-ref-src-unsized.rs:14:31 + | + 14 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + ::: src/util/macro_util.rs + | + | pub struct Wrap(pub Src, pub PhantomData); + | ------------------------- doesn't satisfy `Wrap<&[u8], &[u8; 1]>: TransmuteRefDst<'_>` + | + = note: the following trait bounds were not satisfied: + `[u8]: Sized` + `<[u8; 1] as KnownLayout>::PointerMetadata = usize` + which is required by `Wrap<&[u8], &[u8; 1]>: TransmuteRefDst<'_>` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..98d00e1950eec4e9686f791dc9d074fac7d26a23 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..da96da33ce11b49d5230443c5e8d6d75dd583e69 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-decrease.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 2-byte type to 1-byte type: `AU16` -> `u8` + --> tests/ui-stable/transmute-size-decrease.rs:18:27 + | +18 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ evaluation of `DECREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-size-decrease.rs:18:27 + | +18 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a0569aa34c115d431c66193fbb69431d6e7d312 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.stderr new file mode 100644 index 0000000000000000000000000000000000000000..6e765eb8226dfa5424d4f36fa45cc48d5a4ae2f2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase-allow-shrink.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 1-byte type to 2-byte type: `u8` -> `Transmute` + --> tests/ui-stable/transmute-size-increase-allow-shrink.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `INCREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-size-increase-allow-shrink.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(#![allow(shrink)] 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `Transmute` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..06e1990bda59a6d02b2986798342ac954614fbfe --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..e2bb71f059ac68137fbf185fff68b2c6d47d2827 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-size-increase.stderr @@ -0,0 +1,17 @@ +error[E0080]: transmuting from 1-byte type to 2-byte type: `u8` -> `AU16` + --> tests/ui-stable/transmute-size-increase.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ evaluation of `INCREASE_SIZE` failed here + | + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-size-increase.rs:18:29 + | +18 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c9ac6625a308bac3213a2bea81845581201d821 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `IntoBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..5771406132d07aa52e2ba2fd328ddb494ab96de8 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/transmute-src-not-intobytes.stderr @@ -0,0 +1,34 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unsatisfied trait bound + | required by a bound introduced by this call + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `SRC_NOT_AS_BYTES::transmute` + --> tests/ui-stable/transmute-src-not-intobytes.rs:17:32 + | +17 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | required by a bound in this function + | required by this bound in `transmute` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a9fdeb2fb77e43634dce6666748e5a42082c483 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..bf18912c71e37e67633709b7bc965792dc348ab1 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-dst-not-tryfrombytes.stderr @@ -0,0 +1,88 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs:15:33 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs:15:58 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `try_transmute` + --> src/util/macro_util.rs + | + | pub fn try_transmute(src: Src) -> Result> + | ------------- required by a bound in this function +... + | Dst: TryFromBytes, + | ^^^^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute-dst-not-tryfrombytes.rs:15:58 + | + 15 | let dst_not_try_from_bytes: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.rs new file mode 100644 index 0000000000000000000000000000000000000000..74c45cf87fff5c2b1c3ae702772d5f51c96d39d2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +fn main() { + let _decrease_size: Result = try_transmute!(AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.stderr new file mode 100644 index 0000000000000000000000000000000000000000..03ccf50ec09fd73b4d66a47e5d041f4ac3577752 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/try_transmute-size-decrease.rs:17:41 + | +17 | let _decrease_size: Result = try_transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.rs new file mode 100644 index 0000000000000000000000000000000000000000..05dfe05370b603536e642a806edfe8f595884f64 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::AU16; +use zerocopy::try_transmute; + +// `try_transmute!` does not support transmuting from a smaller type to a larger +// one. +fn main() { + let _increase_size: Result = try_transmute!(0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.stderr new file mode 100644 index 0000000000000000000000000000000000000000..90cd927ff99b19c7d8d02a8617ad95d391fc57cb --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/try_transmute-size-increase.rs:17:43 + | +17 | let _increase_size: Result = try_transmute!(0u8); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..304fb004b516a36c528dbcf6a7e2c6c777d21f83 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute; + +fn main() { + // `try_transmute` requires that the source type implements `IntoBytes` + let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..2af9510ed9671b03a0c37aa1e9e7e7ceb583b776 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute-src-not-intobytes.stderr @@ -0,0 +1,31 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute-src-not-intobytes.rs:16:47 + | + 16 | let src_not_into_bytes: Result = try_transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `try_transmute` + --> src/util/macro_util.rs + | + | pub fn try_transmute(src: Src) -> Result> + | ------------- required by a bound in this function + | where + | Src: IntoBytes, + | ^^^^^^^^^ required by this bound in `try_transmute` + = note: this error originates in the macro `try_transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8abd132c745d17e4f5adb303a15f3c52d73bb9d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs @@ -0,0 +1,19 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_mut; + +fn main() { + // `try_transmute_mut` requires that the destination type implements + // `IntoBytes` + let src = &mut AU16(0); + let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..29376d344b67dfd943094dea12938bea438e6648 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.stderr @@ -0,0 +1,120 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::try_transmute_mut` + --> src/util/macro_util.rs + | + | pub fn try_transmute_mut(self) -> Result<&'a mut Dst, ValidityError<&'a mut Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + IntoBytes, + | ^^^^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::try_transmute_mut` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::try_transmute_mut` + --> src/util/macro_util.rs + | + | pub fn try_transmute_mut(self) -> Result<&'a mut Dst, ValidityError<&'a mut Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::try_transmute_mut` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs:18:33 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-dst-not-tryfrombytes.rs:18:63 + | + 18 | let dst_not_try_from_bytes: Result<&mut NotZerocopy, _> = try_transmute_mut!(src); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..d47321e000658b6d2f587fc9cbbdede7c44f7159 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::IntoBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `FromBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..fd5624dada7a34f357cf9ba74605674950a0de38 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-frombytes.stderr @@ -0,0 +1,95 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Src` + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:13:1 + | + 13 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Src` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-stable/try_transmute_mut-src-not-frombytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff53576693acdc1700a38295b6ac6e927b4fa57d --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.rs @@ -0,0 +1,22 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::transmute_mut; + +#[derive(zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::TryFromBytes)] +#[repr(C)] +struct Dst; + +fn main() { + // `try_transmute_mut` requires that the source type implements `IntoBytes` + let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..88deb96c9ba3561129b0815990d78b99dc5eaa74 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_mut-src-not-intobytes.stderr @@ -0,0 +1,95 @@ +error[E0277]: the trait bound `Src: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Src` + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:13:1 + | + 13 | struct Src; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Src` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function + | where + | Src: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `FromBytes` is not implemented for `Dst` + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(FromBytes)]` to `Dst` + = help: the following other types implement trait `FromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Dst: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:21:40 + | + 21 | let src_not_from_bytes: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `Dst` + --> tests/ui-stable/try_transmute_mut-src-not-intobytes.rs:17:1 + | + 17 | struct Dst; + | ^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `Dst` + = help: the following other types implement trait `IntoBytes`: + () + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + AtomicU16 + and $N others +note: required by a bound in `Wrap::<&'a mut Src, &'a mut Dst>::transmute_mut` + --> src/util/macro_util.rs + | + | pub fn transmute_mut(self) -> &'a mut Dst + | ------------- required by a bound in this associated function +... + | Dst: FromBytes + IntoBytes, + | ^^^^^^^^^ required by this bound in `Wrap::<&mut Src, &mut Dst>::transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.rs new file mode 100644 index 0000000000000000000000000000000000000000..2f64893ebf0d40d653aaf8abc9951d0b296fa2e4 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.rs @@ -0,0 +1,17 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::try_transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `try_transmute_ref!` requires that its destination type be an immutable + // reference. + let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.stderr new file mode 100644 index 0000000000000000000000000000000000000000..deb4b458453de164c0e30afdd45c90f6e6ba0ff2 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-mutable.stderr @@ -0,0 +1,32 @@ +error[E0308]: mismatched types + --> tests/ui-stable/try_transmute_ref-dst-mutable.rs:16:33 + | + 16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | types differ in mutability + | arguments to this enum variant are incorrect + | + = note: expected mutable reference `&mut u8` + found reference `&_` +help: the type constructed contains `&_` due to the type of the argument passed + --> tests/ui-stable/try_transmute_ref-dst-mutable.rs:16:33 + | + 16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ this argument influences the type of `Ok` +note: tuple variant defined here + --> $RUST/core/src/result.rs + | + | Ok(#[stable(feature = "rust1", since = "1.0.0")] T), + | ^^ + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/try_transmute_ref-dst-mutable.rs:16:33 + | +16 | let _: Result<&mut u8, _> = try_transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected enum `Result<&mut u8, _>` + found enum `Result<&_, ValidityError<&u8, _>>` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..078369342c5ce876eaba47ec93a85f44165a7c63 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..d3bb0aa08c957da14e9c4a094265d2f749e1dedf --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.stderr @@ -0,0 +1,120 @@ +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + Immutable, + | ^^^^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function +... + | Dst: TryFromBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:33 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + +error[E0277]: the trait bound `NotZerocopy: TryFromBytes` is not satisfied + --> tests/ui-stable/try_transmute_ref-dst-not-immutable-tryfrombytes.rs:17:59 + | + 17 | let dst_not_try_from_bytes: Result<&NotZerocopy, _> = try_transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `TryFromBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(TryFromBytes)]` to `NotZerocopy` + = help: the following other types implement trait `TryFromBytes`: + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + (A, B, C, D, E, F, G) + (A, B, C, D, E, F, G, H) + and $N others +note: required by a bound in `ValidityError` + --> src/error.rs + | + | pub struct ValidityError { + | ^^^^^^^^^^^^ required by this bound in `ValidityError` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.rs b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.rs new file mode 100644 index 0000000000000000000000000000000000000000..593e68ffa7d135f5b732538e3c5892bd6c49cd0b --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.rs @@ -0,0 +1,18 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../include.rs"); + +use util::{NotZerocopy, AU16}; +use zerocopy::try_transmute_ref; + +fn main() { + // `try_transmute_ref` requires that the source type implements `Immutable` + // and `IntoBytes` + let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); +} diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.stderr b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.stderr new file mode 100644 index 0000000000000000000000000000000000000000..812ce13503c922966a61a24ff2f8c115b7d51cdc --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.stderr @@ -0,0 +1,63 @@ +error[E0277]: the trait bound `NotZerocopy: IntoBytes` is not satisfied + --> tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | + 17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `IntoBytes` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(IntoBytes)]` to `NotZerocopy` + = help: the following other types implement trait `IntoBytes`: + () + AU16 + AtomicBool + AtomicI16 + AtomicI32 + AtomicI64 + AtomicI8 + AtomicIsize + and $N others +note: required by a bound in `Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function + | where + | Src: IntoBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Immutable` is not satisfied + --> tests/ui-stable/try_transmute_ref-src-not-immutable-intobytes.rs:17:48 + | + 17 | let src_not_into_bytes: Result<&AU16, _> = try_transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound + | +help: the trait `Immutable` is not implemented for `NotZerocopy` + --> tests/ui-stable/../include.rs + | + 15 | pub struct NotZerocopy(pub T); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: Consider adding `#[derive(Immutable)]` to `NotZerocopy` + = help: the following other types implement trait `Immutable`: + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) + and $N others +note: required by a bound in `Wrap::<&'a Src, &'a Dst>::try_transmute_ref` + --> src/util/macro_util.rs + | + | pub fn try_transmute_ref(self) -> Result<&'a Dst, ValidityError<&'a Src, Dst>> + | ----------------- required by a bound in this associated function + | where + | Src: IntoBytes + Immutable, + | ^^^^^^^^^ required by this bound in `Wrap::<&Src, &Dst>::try_transmute_ref` + = note: this error originates in the macro `try_transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/win-cargo.bat b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/win-cargo.bat new file mode 100644 index 0000000000000000000000000000000000000000..c0e3b044713598a6518732f7e6ad0bb2edc94fb5 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerocopy-0.8.39/win-cargo.bat @@ -0,0 +1,16 @@ +@rem Copyright 2024 The Fuchsia Authors + +@rem Licensed under a BSD-style license , Apache License, Version 2.0 +@rem , or the MIT +@rem license , at your option. +@rem This file may not be copied, modified, or distributed except according to +@rem those terms. + +@rem Build `cargo-zerocopy` without any RUSTFLAGS set in the environment +@set TEMP_RUSTFLAGS=%RUSTFLAGS% +@set RUSTFLAGS= +@cargo +stable build --manifest-path tools/Cargo.toml -p cargo-zerocopy -q +@set RUSTFLAGS=%TEMP_RUSTFLAGS% +@set TEMP_RUSTFLAGS= +@rem Thin wrapper around the `cargo-zerocopy` binary in `tools/cargo-zerocopy` +@tools\target\debug\cargo-zerocopy %* diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo-ok b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo-ok new file mode 100644 index 0000000000000000000000000000000000000000..5f8b795830acbab5961c1a28c76a7916c9631493 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo_vcs_info.json b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo_vcs_info.json new file mode 100644 index 0000000000000000000000000000000000000000..e54078422df704662aadd9efc75c907c44046a11 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/.cargo_vcs_info.json @@ -0,0 +1,7 @@ +{ + "git": { + "sha1": "29dfe2790b6cfdab94ca6a6b69f58ce54802dbf7", + "dirty": true + }, + "path_in_vcs": "utils/zerovec" +} \ No newline at end of file diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.lock b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..34c0dccb6c54173cd30750a66692652ef1bdeddd --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.lock @@ -0,0 +1,152 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "databake" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6ee9e2d2afb173bcdeee45934c89ec341ab26f91c9933774fc15c2b58f83ef" +dependencies = [ + "databake-derive", + "proc-macro2", + "quote", +] + +[[package]] +name = "databake-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6834770958c7b84223607e49758ec0dde273c4df915e734aad50f62968a4c134" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "syn" +version = "2.0.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "twox-hash" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" + +[[package]] +name = "unicode-ident" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" + +[[package]] +name = "zerovec" +version = "0.11.5" +dependencies = [ + "databake", + "serde", + "twox-hash", + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f06ba3a917c39ae1e3b2ad1df63739c1615a48a6 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml @@ -0,0 +1,145 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.82" +name = "zerovec" +version = "0.11.5" +authors = ["The ICU4X Project Developers"] +build = false +include = [ + "data/**/*", + "src/**/*", + "examples/**/*", + "benches/**/*", + "tests/**/*", + "Cargo.toml", + "LICENSE", + "README.md", + "build.rs", +] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Zero-copy vector backed by a byte array" +readme = "README.md" +keywords = [ + "zerocopy", + "serialization", + "zero-copy", + "serde", +] +categories = [ + "rust-patterns", + "memory-management", + "caching", + "no-std", + "data-structures", +] +license = "Unicode-3.0" +repository = "https://github.com/unicode-org/icu4x" + +[package.metadata.workspaces] +independent = true + +[package.metadata.docs.rs] +all-features = true + +[package.metadata.cargo-all-features] +max_combination_size = 3 + +[features] +alloc = ["serde?/alloc"] +databake = ["dep:databake"] +derive = ["dep:zerovec-derive"] +hashmap = [ + "dep:twox-hash", + "alloc", +] +serde = ["dep:serde"] +std = [] +yoke = ["dep:yoke"] + +[lib] +name = "zerovec" +path = "src/lib.rs" +bench = false + +[[example]] +name = "zv_serde" +path = "examples/zv_serde.rs" +required-features = ["serde"] + +[[bench]] +name = "vzv" +path = "benches/vzv.rs" +harness = false + +[[bench]] +name = "zeromap" +path = "benches/zeromap.rs" +harness = false +required-features = [ + "serde", + "hashmap", + "derive", +] + +[[bench]] +name = "zerovec" +path = "benches/zerovec.rs" +harness = false + +[[bench]] +name = "zerovec_iai" +path = "benches/zerovec_iai.rs" +harness = false + +[[bench]] +name = "zerovec_serde" +path = "benches/zerovec_serde.rs" +harness = false +required-features = ["serde"] + +[dependencies.databake] +version = "0.2.0" +features = ["derive"] +optional = true +default-features = false + +[dependencies.serde] +version = "1.0.220" +features = ["derive"] +optional = true +default-features = false + +[dependencies.twox-hash] +version = "2.0.0" +features = ["xxhash64"] +optional = true +default-features = false + +[dependencies.yoke] +version = "0.8.0" +optional = true +default-features = false + +[dependencies.zerofrom] +version = "0.1.3" +default-features = false + +[dependencies.zerovec-derive] +version = "0.11.1" +optional = true +default-features = false diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml.orig b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml.orig new file mode 100644 index 0000000000000000000000000000000000000000..6ed6a235fe109b260eeced639cff054e574efd1f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/Cargo.toml.orig @@ -0,0 +1,77 @@ +# This file is part of ICU4X. For terms of use, please see the file +# called LICENSE at the top level of the ICU4X source tree +# (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). + +[package] +name = "zerovec" +description = "Zero-copy vector backed by a byte array" +version = "0.11.5" +categories = ["rust-patterns", "memory-management", "caching", "no-std", "data-structures"] +keywords = ["zerocopy", "serialization", "zero-copy", "serde"] + +authors.workspace = true +edition.workspace = true +include.workspace = true +license.workspace = true +repository.workspace = true +rust-version = "1.82" + +[package.metadata.workspaces] +independent = true + +[package.metadata.docs.rs] +all-features = true + +[dependencies] +zerofrom = { workspace = true } + +zerovec-derive = { workspace = true, optional = true} + +databake = { workspace = true, features = ["derive"], optional = true } +serde = { workspace = true, features = ["derive"], optional = true } + +yoke = { workspace = true, optional = true } +twox-hash = { workspace = true, optional = true } + +[features] +derive = ["dep:zerovec-derive"] +hashmap = ["dep:twox-hash", "alloc"] +yoke = ["dep:yoke"] +serde = ["dep:serde"] +databake = ["dep:databake"] +alloc = ["serde?/alloc"] +# No longer does anything +std = [] + +[package.metadata.cargo-all-features] +# We have tons of features here, limit the amount of tests we run +max_combination_size = 3 + +[lib] +bench = false # This option is required for Benchmark CI + +[[bench]] +name = "zerovec" +harness = false + +[[bench]] +name = "zerovec_serde" +harness = false +required-features = ["serde"] + +[[bench]] +name = "vzv" +harness = false + +[[bench]] +name = "zerovec_iai" +harness = false + +[[bench]] +name = "zeromap" +harness = false +required-features = ["serde", "hashmap", "derive"] + +[[example]] +name = "zv_serde" +required-features = ["serde"] diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/LICENSE b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c9be6012c53792604f970ffa54350c31fcbd4b3f --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/LICENSE @@ -0,0 +1,46 @@ +UNICODE LICENSE V3 + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 2020-2024 Unicode, Inc. + +NOTICE TO USER: Carefully read the following legal agreement. BY +DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING DATA FILES, AND/OR +SOFTWARE, YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT +DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of data files and any associated documentation (the "Data Files") or +software and any associated documentation (the "Software") to deal in the +Data Files or Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, and/or sell +copies of the Data Files or Software, and to permit persons to whom the +Data Files or Software are furnished to do so, provided that either (a) +this copyright and permission notice appear with all copies of the Data +Files or Software, or (b) this copyright and permission notice appear in +associated Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF +THIRD PARTY RIGHTS. + +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE +BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, +OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA +FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall +not be used in advertising or otherwise to promote the sale, use or other +dealings in these Data Files or Software without prior written +authorization of the copyright holder. + +SPDX-License-Identifier: Unicode-3.0 + +— + +Portions of ICU4X may have been adapted from ICU4C and/or ICU4J. +ICU 1.8.1 to ICU 57.1 © 1995-2016 International Business Machines Corporation and others. diff --git a/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/README.md b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a708b085c4473ad1064b4e9b87c3903ff229d671 --- /dev/null +++ b/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/zerovec-0.11.5/README.md @@ -0,0 +1,197 @@ +# zerovec [![crates.io](https://img.shields.io/crates/v/zerovec)](https://crates.io/crates/zerovec) + + + +Zero-copy vector abstractions for arbitrary types, backed by byte slices. + +`zerovec` enables a far wider range of types — beyond just `&[u8]` and `&str` — to participate in +zero-copy deserialization from byte slices. It is `serde` compatible and comes equipped with +proc macros + +Clients upgrading to `zerovec` benefit from zero heap allocations when deserializing +read-only data. + +This crate has four main types: + +- [`ZeroVec<'a, T>`] (and [`ZeroSlice`](ZeroSlice)) for fixed-width types like `u32` +- [`VarZeroVec<'a, T>`] (and [`VarZeroSlice`](ZeroSlice)) for variable-width types like `str` +- [`ZeroMap<'a, K, V>`] to map from `K` to `V` +- [`ZeroMap2d<'a, K0, K1, V>`] to map from the pair `(K0, K1)` to `V` + +The first two are intended as close-to-drop-in replacements for `Vec` in Serde structs. The third and fourth are +intended as a replacement for `HashMap` or [`LiteMap`](https://docs.rs/litemap). When used with Serde derives, **be sure to apply +`#[serde(borrow)]` to these types**, same as one would for [`Cow<'a, T>`]. + +[`ZeroVec<'a, T>`], [`VarZeroVec<'a, T>`], [`ZeroMap<'a, K, V>`], and [`ZeroMap2d<'a, K0, K1, V>`] all behave like +[`Cow<'a, T>`] in that they abstract over either borrowed or owned data. When performing deserialization +from human-readable formats (like `json` and `xml`), typically these types will allocate and fully own their data, whereas if deserializing +from binary formats like `bincode` and `postcard`, these types will borrow data directly from the buffer being deserialized from, +avoiding allocations and only performing validity checks. As such, this crate can be pretty fast (see [below](#Performance) for more information) +on deserialization. + +See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for details on how this crate +works under the hood. + +## Cargo features + +This crate has several optional Cargo features: + - `serde`: Allows serializing and deserializing `zerovec`'s abstractions via [`serde`](https://docs.rs/serde) + - `yoke`: Enables implementations of `Yokeable` from the [`yoke`](https://docs.rs/yoke/) crate, which is also useful + in situations involving a lot of zero-copy deserialization. + - `derive`: Makes it easier to use custom types in these collections by providing the `#[make_ule]` and + `#[make_varule]` proc macros, which generate appropriate [`ULE`](https://docs.rs/zerovec/latest/zerovec/ule/trait.ULE.html) and + [`VarULE`](https://docs.rs/zerovec/latest/zerovec/ule/trait.VarULE.html)-conformant types for a given "normal" type. + - `std`: Enabled `std::Error` implementations for error types. This crate is by default `no_std` with a dependency on `alloc`. + +[`ZeroVec<'a, T>`]: ZeroVec +[`VarZeroVec<'a, T>`]: VarZeroVec +[`ZeroMap<'a, K, V>`]: ZeroMap +[`ZeroMap2d<'a, K0, K1, V>`]: ZeroMap2d +[`Cow<'a, T>`]: alloc::borrow::Cow + +## Examples + +Serialize and deserialize a struct with ZeroVec and VarZeroVec with Bincode: + +```rust +use zerovec::{VarZeroVec, ZeroVec}; + +// This example requires the "serde" feature +#[derive(serde::Serialize, serde::Deserialize)] +pub struct DataStruct<'data> { + #[serde(borrow)] + nums: ZeroVec<'data, u32>, + #[serde(borrow)] + chars: ZeroVec<'data, char>, + #[serde(borrow)] + strs: VarZeroVec<'data, str>, +} + +let data = DataStruct { + nums: ZeroVec::from_slice_or_alloc(&[211, 281, 421, 461]), + chars: ZeroVec::alloc_from_slice(&['ö', '冇', 'म']), + strs: VarZeroVec::from(&["hello", "world"]), +}; +let bincode_bytes = + bincode::serialize(&data).expect("Serialization should be successful"); +assert_eq!(bincode_bytes.len(), 63); + +let deserialized: DataStruct = bincode::deserialize(&bincode_bytes) + .expect("Deserialization should be successful"); +assert_eq!(deserialized.nums.first(), Some(211)); +assert_eq!(deserialized.chars.get(1), Some('冇')); +assert_eq!(deserialized.strs.get(1), Some("world")); +// The deserialization will not have allocated anything +assert!(!deserialized.nums.is_owned()); +``` + +Use custom types inside of ZeroVec: + +```rust +use zerovec::{ZeroVec, VarZeroVec, ZeroMap}; +use std::borrow::Cow; +use zerovec::ule::encode_varule_to_box; + +// custom fixed-size ULE type for ZeroVec +#[zerovec::make_ule(DateULE)] +#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, serde::Serialize, serde::Deserialize)] +struct Date { + y: u64, + m: u8, + d: u8 +} + +// custom variable sized VarULE type for VarZeroVec +#[zerovec::make_varule(PersonULE)] +#[zerovec::derive(Serialize, Deserialize)] // add Serde impls to PersonULE +#[derive(Clone, PartialEq, Eq, Ord, PartialOrd, serde::Serialize, serde::Deserialize)] +struct Person<'a> { + birthday: Date, + favorite_character: char, + #[serde(borrow)] + name: Cow<'a, str>, +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct Data<'a> { + #[serde(borrow)] + important_dates: ZeroVec<'a, Date>, + // note: VarZeroVec always must reference the ULE type directly + #[serde(borrow)] + important_people: VarZeroVec<'a, PersonULE>, + #[serde(borrow)] + birthdays_to_people: ZeroMap<'a, Date, PersonULE> +} + + +let person1 = Person { + birthday: Date { y: 1990, m: 9, d: 7}, + favorite_character: 'π', + name: Cow::from("Kate") +}; +let person2 = Person { + birthday: Date { y: 1960, m: 5, d: 25}, + favorite_character: '冇', + name: Cow::from("Jesse") +}; + +let important_dates = ZeroVec::alloc_from_slice(&[Date { y: 1943, m: 3, d: 20}, Date { y: 1976, m: 8, d: 2}, Date { y: 1998, m: 2, d: 15}]); +let important_people = VarZeroVec::from(&[&person1, &person2]); +let mut birthdays_to_people: ZeroMap = ZeroMap::new(); +// `.insert_var_v()` is slightly more convenient over `.insert()` for custom ULE types +birthdays_to_people.insert_var_v(&person1.birthday, &person1); +birthdays_to_people.insert_var_v(&person2.birthday, &person2); + +let data = Data { important_dates, important_people, birthdays_to_people }; + +let bincode_bytes = bincode::serialize(&data) + .expect("Serialization should be successful"); +assert_eq!(bincode_bytes.len(), 160); + +let deserialized: Data = bincode::deserialize(&bincode_bytes) + .expect("Deserialization should be successful"); + +assert_eq!(deserialized.important_dates.get(0).unwrap().y, 1943); +assert_eq!(&deserialized.important_people.get(1).unwrap().name, "Jesse"); +assert_eq!(&deserialized.important_people.get(0).unwrap().name, "Kate"); +assert_eq!(&deserialized.birthdays_to_people.get(&person1.birthday).unwrap().name, "Kate"); + +} // feature = serde and derive +``` + +## Performance + +`zerovec` is designed for fast deserialization from byte buffers with zero memory allocations +while minimizing performance regressions for common vector operations. + +Benchmark results on x86_64: + +| Operation | `Vec` | `zerovec` | +|---|---|---| +| Deserialize vec of 100 `u32` | 233.18 ns | 14.120 ns | +| Compute sum of vec of 100 `u32` (read every element) | 8.7472 ns | 10.775 ns | +| Binary search vec of 1000 `u32` 50 times | 442.80 ns | 472.51 ns | +| Deserialize vec of 100 strings | 7.3740 μs\* | 1.4495 μs | +| Count chars in vec of 100 strings (read every element) | 747.50 ns | 955.28 ns | +| Binary search vec of 500 strings 10 times | 466.09 ns | 790.33 ns | + +\* *This result is reported for `Vec`. However, Serde also supports deserializing to the partially-zero-copy `Vec<&str>`; this gives 1.8420 μs, much faster than `Vec` but a bit slower than `zerovec`.* + +| Operation | `HashMap` | `LiteMap` | `ZeroMap` | +|---|---|---|---| +| Deserialize a small map | 2.72 μs | 1.28 μs | 480 ns | +| Deserialize a large map | 50.5 ms | 18.3 ms | 3.74 ms | +| Look up from a small deserialized map | 49 ns | 42 ns | 54 ns | +| Look up from a large deserialized map | 51 ns | 155 ns | 213 ns | + +Small = 16 elements, large = 131,072 elements. Maps contain ``. + +The benches used to generate the above table can be found in the `benches` directory in the project repository. +`zeromap` benches are named by convention, e.g. `zeromap/deserialize/small`, `zeromap/lookup/large`. The type +is appended for baseline comparisons, e.g. `zeromap/lookup/small/hashmap`. + + + +## More Information + +For more information on development, authorship, contributing etc. please visit [`ICU4X home page`](https://github.com/unicode-org/icu4x).